From bca6e12f93fba5fbc0fd4b9c09366902a5702220 Mon Sep 17 00:00:00 2001 From: Ishika Roy Date: Wed, 10 Sep 2025 14:53:29 -0700 Subject: [PATCH 1/6] add regression changes --- regression/README.md | 92 +++++++ regression/benchmark_scripts/benchmark.py | 251 ++++++++++++++++++ .../benchmark_scripts/configs/README.md | 12 + .../configs/example_test_creation.py | 70 +++++ .../configs/test_name_config.json | 23 ++ .../configs/test_name_data.json | 117 ++++++++ .../benchmark_scripts/results/test_name.csv | 2 + regression/benchmark_scripts/transform.py | 115 ++++++++ regression/benchmark_scripts/utils.py | 165 ++++++++++++ .../benchmarks/clear_optimization_data.csv | 6 + .../full_solve_matrix_data_off_10.csv | 6 + .../full_solve_matrix_data_off_1000.csv | 6 + .../full_solve_matrix_data_on_10.csv | 6 + .../full_solve_matrix_data_on_1000.csv | 6 + .../full_solve_sync_matrix_data_off_10.csv | 6 + .../full_solve_sync_matrix_data_off_1000.csv | 6 + .../full_solve_sync_wpg_data_off_10.csv | 6 + .../full_solve_sync_wpg_data_off_1000.csv | 6 + .../benchmarks/full_solve_wpg_data_off_10.csv | 6 + .../full_solve_wpg_data_off_1000.csv | 6 + .../get_optimization_data_state_empty.csv | 6 + regression/benchmarks/health.csv | 6 + regression/benchmarks/homberger_C1_10_1.csv | 22 ++ regression/benchmarks/homberger_C1_10_4.csv | 22 ++ regression/benchmarks/homberger_C1_10_9.csv | 22 ++ regression/benchmarks/linlim_LC1_10_1.csv | 21 ++ regression/benchmarks/linlim_LC1_10_4.csv | 21 ++ regression/benchmarks/linlim_LC1_10_9.csv | 21 ++ regression/benchmarks/set_config.csv | 6 + regression/benchmarks/set_fleet_2.csv | 6 + regression/benchmarks/set_fleet_800.csv | 6 + regression/benchmarks/set_matrix_10.csv | 6 + regression/benchmarks/set_matrix_1000.csv | 6 + regression/benchmarks/set_tasks_10.csv | 6 + regression/benchmarks/set_tasks_1000.csv | 6 + .../benchmarks/set_waypoint_graph_10.csv | 6 + .../benchmarks/set_waypoint_graph_1000.csv | 6 + .../benchmarks/solve_delta_matrix_10.csv | 6 + .../benchmarks/solve_delta_matrix_1000.csv | 6 + regression/benchmarks/solve_delta_wpg_10.csv | 6 + .../benchmarks/solve_delta_wpg_1000.csv | 6 + regression/benchmarks/update_config.csv | 6 + regression/benchmarks/update_fleet_2.csv | 6 + regression/benchmarks/update_fleet_800.csv | 6 + regression/benchmarks/update_matrix_10.csv | 6 + regression/benchmarks/update_matrix_1000.csv | 6 + regression/benchmarks/update_tasks_10.csv | 6 + regression/benchmarks/update_tasks_1000.csv | 6 + .../benchmarks/update_waypoint_graph_10.csv | 6 + .../benchmarks/update_waypoint_graph_1000.csv | 6 + regression/config.sh | 67 +++++ regression/cronjob.sh | 203 ++++++++++++++ regression/functions.sh | 144 ++++++++++ regression/gsheet-report.py | 178 +++++++++++++ regression/lp_regression_test.sh | 42 +++ regression/mip_regression_test.sh | 42 +++ regression/report.sh | 67 +++++ regression/routing_regression_test.sh | 43 +++ regression/run_regression.sh | 27 ++ regression/save_benchmark_results.py | 41 +++ regression/save_benchmarks.sh | 37 +++ regression/send-slack-report.sh | 116 ++++++++ regression/setup-benchmark-dir.sh | 29 ++ regression/slack_msg.json | 68 +++++ regression/test-container.sh | 30 +++ regression/update_asv_database.py | 132 +++++++++ regression/write-cuopt-meta-data.sh | 38 +++ 67 files changed, 2490 insertions(+) create mode 100644 regression/README.md create mode 100644 regression/benchmark_scripts/benchmark.py create mode 100644 regression/benchmark_scripts/configs/README.md create mode 100644 regression/benchmark_scripts/configs/example_test_creation.py create mode 100644 regression/benchmark_scripts/configs/test_name_config.json create mode 100644 regression/benchmark_scripts/configs/test_name_data.json create mode 100644 regression/benchmark_scripts/results/test_name.csv create mode 100644 regression/benchmark_scripts/transform.py create mode 100644 regression/benchmark_scripts/utils.py create mode 100644 regression/benchmarks/clear_optimization_data.csv create mode 100644 regression/benchmarks/full_solve_matrix_data_off_10.csv create mode 100644 regression/benchmarks/full_solve_matrix_data_off_1000.csv create mode 100644 regression/benchmarks/full_solve_matrix_data_on_10.csv create mode 100644 regression/benchmarks/full_solve_matrix_data_on_1000.csv create mode 100644 regression/benchmarks/full_solve_sync_matrix_data_off_10.csv create mode 100644 regression/benchmarks/full_solve_sync_matrix_data_off_1000.csv create mode 100644 regression/benchmarks/full_solve_sync_wpg_data_off_10.csv create mode 100644 regression/benchmarks/full_solve_sync_wpg_data_off_1000.csv create mode 100644 regression/benchmarks/full_solve_wpg_data_off_10.csv create mode 100644 regression/benchmarks/full_solve_wpg_data_off_1000.csv create mode 100644 regression/benchmarks/get_optimization_data_state_empty.csv create mode 100644 regression/benchmarks/health.csv create mode 100644 regression/benchmarks/homberger_C1_10_1.csv create mode 100644 regression/benchmarks/homberger_C1_10_4.csv create mode 100644 regression/benchmarks/homberger_C1_10_9.csv create mode 100644 regression/benchmarks/linlim_LC1_10_1.csv create mode 100644 regression/benchmarks/linlim_LC1_10_4.csv create mode 100644 regression/benchmarks/linlim_LC1_10_9.csv create mode 100644 regression/benchmarks/set_config.csv create mode 100644 regression/benchmarks/set_fleet_2.csv create mode 100644 regression/benchmarks/set_fleet_800.csv create mode 100644 regression/benchmarks/set_matrix_10.csv create mode 100644 regression/benchmarks/set_matrix_1000.csv create mode 100644 regression/benchmarks/set_tasks_10.csv create mode 100644 regression/benchmarks/set_tasks_1000.csv create mode 100644 regression/benchmarks/set_waypoint_graph_10.csv create mode 100644 regression/benchmarks/set_waypoint_graph_1000.csv create mode 100644 regression/benchmarks/solve_delta_matrix_10.csv create mode 100644 regression/benchmarks/solve_delta_matrix_1000.csv create mode 100644 regression/benchmarks/solve_delta_wpg_10.csv create mode 100644 regression/benchmarks/solve_delta_wpg_1000.csv create mode 100644 regression/benchmarks/update_config.csv create mode 100644 regression/benchmarks/update_fleet_2.csv create mode 100644 regression/benchmarks/update_fleet_800.csv create mode 100644 regression/benchmarks/update_matrix_10.csv create mode 100644 regression/benchmarks/update_matrix_1000.csv create mode 100644 regression/benchmarks/update_tasks_10.csv create mode 100644 regression/benchmarks/update_tasks_1000.csv create mode 100644 regression/benchmarks/update_waypoint_graph_10.csv create mode 100644 regression/benchmarks/update_waypoint_graph_1000.csv create mode 100644 regression/config.sh create mode 100755 regression/cronjob.sh create mode 100644 regression/functions.sh create mode 100755 regression/gsheet-report.py create mode 100644 regression/lp_regression_test.sh create mode 100644 regression/mip_regression_test.sh create mode 100755 regression/report.sh create mode 100644 regression/routing_regression_test.sh create mode 100644 regression/run_regression.sh create mode 100644 regression/save_benchmark_results.py create mode 100644 regression/save_benchmarks.sh create mode 100755 regression/send-slack-report.sh create mode 100755 regression/setup-benchmark-dir.sh create mode 100644 regression/slack_msg.json create mode 100644 regression/test-container.sh create mode 100644 regression/update_asv_database.py create mode 100755 regression/write-cuopt-meta-data.sh diff --git a/regression/README.md b/regression/README.md new file mode 100644 index 000000000..66660984d --- /dev/null +++ b/regression/README.md @@ -0,0 +1,92 @@ +# Cuopt Regression Testing + + + +## Getting started + +To make it easy for you to get started with GitLab, here's a list of recommended next steps. + +Already a pro? Just edit this README.md and make it your own. Want to make it easy? [Use the template at the bottom](#editing-this-readme)! + +## Add your files + +- [ ] [Create](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#create-a-file) or [upload](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#upload-a-file) files +- [ ] [Add files using the command line](https://docs.gitlab.com/ee/gitlab-basics/add-file.html#add-a-file-using-the-command-line) or push an existing Git repository with the following command: + +``` +cd existing_repo +git remote add origin https://gitlab-master.nvidia.com/ramakrishnap/cuopt-regression-testing.git +git branch -M main +git push -uf origin main +``` + +## Integrate with your tools + +- [ ] [Set up project integrations](https://gitlab-master.nvidia.com/ramakrishnap/cuopt-regression-testing/-/settings/integrations) + +## Collaborate with your team + +- [ ] [Invite team members and collaborators](https://docs.gitlab.com/ee/user/project/members/) +- [ ] [Create a new merge request](https://docs.gitlab.com/ee/user/project/merge_requests/creating_merge_requests.html) +- [ ] [Automatically close issues from merge requests](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#closing-issues-automatically) +- [ ] [Enable merge request approvals](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/) +- [ ] [Automatically merge when pipeline succeeds](https://docs.gitlab.com/ee/user/project/merge_requests/merge_when_pipeline_succeeds.html) + +## Test and Deploy + +Use the built-in continuous integration in GitLab. + +- [ ] [Get started with GitLab CI/CD](https://docs.gitlab.com/ee/ci/quick_start/index.html) +- [ ] [Analyze your code for known vulnerabilities with Static Application Security Testing(SAST)](https://docs.gitlab.com/ee/user/application_security/sast/) +- [ ] [Deploy to Kubernetes, Amazon EC2, or Amazon ECS using Auto Deploy](https://docs.gitlab.com/ee/topics/autodevops/requirements.html) +- [ ] [Use pull-based deployments for improved Kubernetes management](https://docs.gitlab.com/ee/user/clusters/agent/) +- [ ] [Set up protected environments](https://docs.gitlab.com/ee/ci/environments/protected_environments.html) + +*** + +# Editing this README + +When you're ready to make this README your own, just edit this file and use the handy template below (or feel free to structure it however you want - this is just a starting point!). Thank you to [makeareadme.com](https://www.makeareadme.com/) for this template. + +## Suggestions for a good README +Every project is different, so consider which of these sections apply to yours. The sections used in the template are suggestions for most open source projects. Also keep in mind that while a README can be too long and detailed, too long is better than too short. If you think your README is too long, consider utilizing another form of documentation rather than cutting out information. + +## Name +Choose a self-explaining name for your project. + +## Description +Let people know what your project can do specifically. Provide context and add a link to any reference visitors might be unfamiliar with. A list of Features or a Background subsection can also be added here. If there are alternatives to your project, this is a good place to list differentiating factors. + +## Badges +On some READMEs, you may see small images that convey metadata, such as whether or not all the tests are passing for the project. You can use Shields to add some to your README. Many services also have instructions for adding a badge. + +## Visuals +Depending on what you are making, it can be a good idea to include screenshots or even a video (you'll frequently see GIFs rather than actual videos). Tools like ttygif can help, but check out Asciinema for a more sophisticated method. + +## Installation +Within a particular ecosystem, there may be a common way of installing things, such as using Yarn, NuGet, or Homebrew. However, consider the possibility that whoever is reading your README is a novice and would like more guidance. Listing specific steps helps remove ambiguity and gets people to using your project as quickly as possible. If it only runs in a specific context like a particular programming language version or operating system or has dependencies that have to be installed manually, also add a Requirements subsection. + +## Usage +Use examples liberally, and show the expected output if you can. It's helpful to have inline the smallest example of usage that you can demonstrate, while providing links to more sophisticated examples if they are too long to reasonably include in the README. + +## Support +Tell people where they can go to for help. It can be any combination of an issue tracker, a chat room, an email address, etc. + +## Roadmap +If you have ideas for releases in the future, it is a good idea to list them in the README. + +## Contributing +State if you are open to contributions and what your requirements are for accepting them. + +For people who want to make changes to your project, it's helpful to have some documentation on how to get started. Perhaps there is a script that they should run or some environment variables that they need to set. Make these steps explicit. These instructions could also be useful to your future self. + +You can also document commands to lint the code or run tests. These steps help to ensure high code quality and reduce the likelihood that the changes inadvertently break something. Having instructions for running tests is especially helpful if it requires external setup, such as starting a Selenium server for testing in a browser. + +## Authors and acknowledgment +Show your appreciation to those who have contributed to the project. + +## License +For open source projects, say how it is licensed. + +## Project status +If you have run out of energy or time for your project, put a note at the top of the README saying that development has slowed down or stopped completely. Someone may choose to fork your project or volunteer to step in as a maintainer or owner, allowing your project to keep going. You can also make an explicit request for maintainers. diff --git a/regression/benchmark_scripts/benchmark.py b/regression/benchmark_scripts/benchmark.py new file mode 100644 index 000000000..56ef78fb2 --- /dev/null +++ b/regression/benchmark_scripts/benchmark.py @@ -0,0 +1,251 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + + +from utils import get_configuration, LPMetrics, RoutingMetrics +from cuopt import linear_programming +from cuopt import routing +from cuopt import utilities +import rmm +import time +import pandas as pd +import glob +import logging as log +from datetime import datetime +import os +import argparse + +def create_regression_markdown(data, regression_path, test_type_string): + regression_md_file = regression_path + "/" + test_type_string + "_regressions.md" + + md_data = "*No regressions*" + # This to reduce size of slack message + limit_no_of_regression_list = 5 + + if len(data) > 0: + status = "*!! Regressions found !!*" + end_msg = "\n*Continues ...*" if len(data) > limit_no_of_regression_list else "" + table = data[:limit_no_of_regression_list].to_string(index=False) + md_data = status + f'\n```\n{table}\n```' + end_msg + + with open(regression_md_file, "w") as fp: + fp.write(md_data) + +def record_regressions(test_name, data, req_metrics, regression_path, test_type_string): + + regression_file = regression_path + "/" + test_type_string + "_regressions.csv" + + regression_df = pd.DataFrame({"Test Name":[], "Metric Name":[], "Value":[], "Avg Value":[], "Regression(%)":[]}) + for name in req_metrics: + if name.startswith("bks_change_"): + pchange = data[name].iloc[-1].item() + metric_name = name.replace("bks_change_", "") + limit = req_metrics[metric_name]["bks"].get("threshold", 5) + prev_val_mean = pchange + latest_val = pchange + else: + limit = req_metrics[name].get("threshold", 5) + prev_val_mean = data[name][:-1][-30:].mean().item() if len(data) > 1 else data[name].iloc[-1].item() + latest_val = data[name].iloc[-1].item() + + if prev_val_mean == 0: + pchange = latest_val + else: + pchange = ((latest_val - prev_val_mean)/prev_val_mean) * 100 + + if abs(pchange) >= limit: + regression_df.loc[len(regression_df)] = [test_name, name, latest_val, prev_val_mean, pchange] + + regression_df.to_csv(regression_file) + create_regression_markdown(regression_df, regression_path, test_type_string) + +def get_bks_change( + metrics, required_metrics +): + bks_metrics = {} + for metric, value in required_metrics.items(): + if "bks" in value.keys(): + bks = value["bks"]["value"] + if bks == None: + continue + current = metrics[metric] + if bks == 0: + bks_metrics["bks_change_" + metric] = abs(current) * 100 + elif current == 0: + bks_metrics["bks_change_" + metric] = abs(bks) * 100 + else: + bks_metrics["bks_change_" + metric] = abs(((current - bks)/bks) * 100) + + return bks_metrics + +def record_result( + test_name, metrics, required_metrics, csv_path, test_type_string +): + + file_path = csv_path + "/" + test_name + ".csv" + + bks_metrics = get_bks_change(metrics, required_metrics) + # Add default metrics to data + required_metrics.update(bks_metrics) + metrics.update(bks_metrics) + + req_metrics = list(required_metrics.keys()) + ["date_time", "git_commit"] + + current_data = pd.DataFrame({key : [metrics[key]] for key in sorted(req_metrics)}) + if os.path.isfile(file_path): + previous_data = pd.read_csv(file_path, index_col=0) + updated_data = pd.concat([previous_data, current_data], ignore_index=True) + else: + updated_data = current_data + + record_regressions(test_name, updated_data, required_metrics, csv_path, test_type_string) + + updated_data.to_csv(file_path) + + +def run_benchmark( + test_name, + data_model, + solver_settings, + required_metrics, + csv_path, + git_commit, + test_status_file +): + mr = rmm.mr.get_current_device_resource() + + start_time = time.time() + if test_name.startswith("LP_") or test_name.startswith("MIP_"): + metrics = LPMetrics()._asdict() + solver_settings.set_parameter("infeasibility_detection", False) + solver_settings.set_parameter("time_limit", 180) + solution = linear_programming.Solve(data_model, solver_settings) + else: + metrics = RoutingMetrics()._asdict() + solution = routing.Solve(data_model, solver_settings) + end_time = time.time() + + metrics["gpu_memory_usage"] = int(mr.allocation_counts.peak_bytes/(1024*1024)) + metrics["date_time"] = datetime.now().strftime("%m_%d_%Y_%H_%M_%S") + metrics["git_commit"] = git_commit + + success_status = False + + if test_name.startswith("LP_") or test_name.startswith("MIP_"): + ## Optimal solution + if solution.get_termination_reason() == 1: + test_type_string = "lp" if test_name.startswith("LP_") else "mip" + success_status = True + metrics["solver_time"] = solution.get_solve_time() + metrics["primal_objective_value"] = solution.get_primal_objective() + if test_type_string == "lp": + lp_stats = solution.get_lp_stats() + metrics["nb_iterations"] = lp_stats["nb_iterations"] + else: + milp_stats = solution.get_milp_stats() + metrics["mip_gap"] = milp_stats["mip_gap"] + metrics["max_constraint_violation"] = milp_stats["max_constraint_violation"] + metrics["max_int_violation"] = milp_stats["max_int_violation"] + metrics["max_variable_bound_violation"] = milp_stats["max_variable_bound_violation"] + record_result(test_name, metrics, required_metrics, csv_path, test_type_string) + else: + if solution.get_status() == 0: + success_status = True + metrics["solver_time"] = end_time - start_time + metrics["total_objective_value"] = solution.get_total_objective() + metrics["vehicle_count"] = solution.get_vehicle_count() + + objectives = solution.get_objective_values() + if "prize" in required_metrics: + metrics["prize"] = objectives[routing.Objective.PRIZE] + if "cost" in required_metrics: + metrics["cost"] = objectives[routing.Objective.COST] + if "travel_time" in required_metrics: + metrics["travel_time"] = objectives[routing.Objective.TRAVEL_TIME] + + record_result(test_name, metrics, required_metrics, csv_path, "routing") + + return "SUCCESS" if success_status is True else "FAILED" + + +def reinitialize_rmm(): + + pool_size = 2**30 + rmm.reinitialize(pool_allocator=True, initial_pool_size=pool_size) + + base_mr = rmm.mr.get_current_device_resource() + stats_mr = rmm.mr.StatisticsResourceAdaptor(base_mr) + rmm.mr.set_current_device_resource(stats_mr) + + return base_mr, stats_mr + + +def run(config_file_path, csv_path, git_commit, log_path, test_status_file): + + config_files = glob.glob(config_file_path + "/*_config.json") + + for config in config_files: + + mr, stats_mr = reinitialize_rmm() + test_name = str(config) + status = "FAILED" + try: + + test_name, data_model, solver_settings, requested_metrics = get_configuration(config, config_file_path) + + log.basicConfig(level=log.INFO, filename=log_path+"/"+test_name+"_log.txt", filemode="a+", + format="%(asctime)-15s %(levelname)-8s %(message)s") + log.info(f"------------- Test Start : {test_name} -------------------") + + status = run_benchmark( + test_name, + data_model, + solver_settings, + requested_metrics, + csv_path, + git_commit, + test_status_file + ) + + except Exception as e: + log.error(str(e)) + + with open(test_status_file, "a") as f: + f.write("\n") + f.write(test_name +": " + status) + + # Delete instance of rmm + del mr + del stats_mr + + log.info(f"------------- Test End : {test_name} -------------------") + +if __name__ == "__main__": + + parser = argparse.ArgumentParser() + + parser.add_argument( + "-c", "--config-path", type=str, help="Path to all configuration file" + ) + parser.add_argument( + "-r", "--csv-path", type=str, help="Path to store result files, this would be for github where results gets stored" + ) + parser.add_argument( + "-g", "--git-commit", type=str, help="git commit sha to keep track of runs" + ) + parser.add_argument( + "-l", "--log-path", type=str, help="Path to log files" + ) + parser.add_argument( + "-s", "--test-status-file", type=str, help="All test status will be stored in this file" + ) + + args = parser.parse_args() + run(args.config_path, args.csv_path, args.git_commit, args.log_path, args.test_status_file) diff --git a/regression/benchmark_scripts/configs/README.md b/regression/benchmark_scripts/configs/README.md new file mode 100644 index 000000000..765ab5cde --- /dev/null +++ b/regression/benchmark_scripts/configs/README.md @@ -0,0 +1,12 @@ +# Creating configuration and data file + +- For each test, create a configuration file and a corresponding data file. +- Refer `test_name_confg.json` for the format of the configuration file. +- Supported metrics can be found in `cuopt/regression/benchmark_scripts/utils.py` +- File names should start with test names followed by `config` or data depending on type of it. +- Data file should be as per openapi spec of cuopt server +- These configuration and data files needs to be uploaded to `s3://cuopt-datasets/regression_datasets/` + + ``` + aws s3 cp /path/to/files s3://cuopt-datasets/regression_datasets/ + ``` diff --git a/regression/benchmark_scripts/configs/example_test_creation.py b/regression/benchmark_scripts/configs/example_test_creation.py new file mode 100644 index 000000000..9d07d3912 --- /dev/null +++ b/regression/benchmark_scripts/configs/example_test_creation.py @@ -0,0 +1,70 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. +# CONFIDENTIAL, provided under NDA. + +import cuopt +from cuopt import routing +from cuopt.routing import utils +import json + +""" +This is an example of creating a modified test from Homberger dataset. +In this test, the RC2_10_5 test is modified so that the vehicle count is reduced to 12 and the order prizes are set. +The prizes are high enough so that prize always becomes the primary objective. +One can easily use an existing json file and modify the data as well by loading the json as a dictionary +""" +test_name = "prize_collection_vrp" +# test_name = "LC1_10_9" + +# base_file_name = "/home/nfs/rgandham/git-repos/reopt/datasets/pdptw/LC1_10_9.pdptw" +base_file_name = "/home/nfs/rgandham/git-repos/reopt/datasets/cvrptw/RC2_10_5.TXT" + +# model_dict = utils.create_model_dictionary_from_file(base_file_name, is_pdp=True) +model_dict = utils.create_model_dictionary_from_file(base_file_name) + + +# Reduce the fleet size to 12 +num_vehicles = 12 +fleet_data = model_dict["fleet_data"] + +vehicle_locations = fleet_data["vehicle_locations"] +vehicle_tw = fleet_data["vehicle_time_windows"] +capacities = fleet_data["capacities"] + +new_locs = [vehicle_locations[i] for i in range(num_vehicles)] +new_tw = [vehicle_tw[i] for i in range(num_vehicles)] +new_cap = [[capacities[0][i] for i in range(num_vehicles)]] * 1 + +fleet_data["vehicle_locations"] = new_locs +fleet_data["vehicle_time_windows"] = new_tw +fleet_data["capacities"] = new_cap + +# Add prizes +task_data = model_dict["task_data"] + +n_tasks = len(task_data["demand"][0]) + +prizes = [10000.0] * n_tasks +task_data["prizes"] = prizes + + +# Set 10 min time limit +solver_config = {} +solver_config["time_limit"] = 600 + +model_dict["solver_config"] = solver_config + +test_config_file_name = test_name + "_config.json" +model_data_file_name = test_name + "_data.json" + +test_config = {} +test_config["test_name"] = test_name +test_config["file_name"] = model_data_file_name +test_config["metrics"] = ["vehicle_count", "total_cost", "prize", "memory_usage"] + +with open(test_config_file_name, 'w') as fp: + json.dump(test_config, fp) + fp.close() + +with open(model_data_file_name, 'w') as fp: + json.dump(model_dict, fp) + fp.close() \ No newline at end of file diff --git a/regression/benchmark_scripts/configs/test_name_config.json b/regression/benchmark_scripts/configs/test_name_config.json new file mode 100644 index 000000000..d87b7bc84 --- /dev/null +++ b/regression/benchmark_scripts/configs/test_name_config.json @@ -0,0 +1,23 @@ +{ + "test_name": "test_name", + "file_name": "test_name_data.json", + "metrics": { + "total_objective_value": { + "threshold": 5, + "unit": "total_objective_value" + }, + "vehicle_count": { + "threshold": 5, + "unit": "vehicle_count" + }, + "solver_time": { + "threshold": 5, + "unit": "seconds" + }, + "gpu_memory_usage": { + "threshold": 20, + "unit": "MB" + } + }, + "details": "Add details about you test" +} diff --git a/regression/benchmark_scripts/configs/test_name_data.json b/regression/benchmark_scripts/configs/test_name_data.json new file mode 100644 index 000000000..e6918ad58 --- /dev/null +++ b/regression/benchmark_scripts/configs/test_name_data.json @@ -0,0 +1,117 @@ +{ + "cost_waypoint_graph_data": { + "waypoint_graph": null + }, + "travel_time_waypoint_graph_data": { + "waypoint_graph": null + }, + "cost_matrix_data": { + "data": { + "0": [ + [ + 0, + 1, + 1 + ], + [ + 1, + 0, + 1 + ], + [ + 1, + 1, + 0 + ] + ] + } + }, + "travel_time_matrix_data": { + "data": null + }, + "task_data": { + "task_locations": [ + 0, + 1, + 2 + ], + "demand": [ + [ + 0, + 1, + 1 + ], + [ + 0, + 3, + 1 + ] + ], + "task_time_windows": [ + [ + 0, + 10 + ], + [ + 0, + 4 + ], + [ + 2, + 4 + ] + ], + "service_times": [ + 0, + 1, + 1 + ] + }, + "fleet_data": { + "vehicle_locations": [ + [ + 0, + 0 + ], + [ + 0, + 0 + ] + ], + "capacities": [ + [ + 2, + 2 + ], + [ + 4, + 1 + ] + ], + "vehicle_time_windows": [ + [ + 0, + 10 + ], + [ + 0, + 10 + ] + ], + "skip_first_trips": [ + false, + false + ], + "drop_return_trips": [ + false, + false + ], + "vehicle_max_costs": [ + 20, + 20 + ] + }, + "solver_config": { + "time_limit": 10 + } +} diff --git a/regression/benchmark_scripts/results/test_name.csv b/regression/benchmark_scripts/results/test_name.csv new file mode 100644 index 000000000..85bf3d976 --- /dev/null +++ b/regression/benchmark_scripts/results/test_name.csv @@ -0,0 +1,2 @@ +,solver_time,total_objective_value,vehicle_count +0,10.004132270812988,3.0,1 diff --git a/regression/benchmark_scripts/transform.py b/regression/benchmark_scripts/transform.py new file mode 100644 index 000000000..29384e5e2 --- /dev/null +++ b/regression/benchmark_scripts/transform.py @@ -0,0 +1,115 @@ +#!/usr/bin/python + +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +import argparse +from pathlib import Path +import json +import cuopt_mps_parser + +def _mps_parse(LP_problem_data, tolerances, time_limit, iteration_limit): + + if isinstance(LP_problem_data, cuopt_mps_parser.parser_wrapper.DataModel): + model = LP_problem_data + else: + model = cuopt_mps_parser.ParseMps(LP_problem_data) + + problem_data = cuopt_mps_parser.toDict(model, json=True) + #variable_names = problem_data.pop("variable_names") + + problem_data["solver_config"] = {} + if tolerances is not None: + problem_data["solver_config"]["tolerances"] = tolerances + if time_limit is not None: + problem_data["solver_config"]["time_limit"] = time_limit + if iteration_limit is not None: + problem_data["solver_config"]["iteration_limit"] = iteration_limit + return problem_data + + +def create_config_and_data(input_directory, file_name, output_directory, prefix, time_limit=None, tolerances=None, iteration_limit=None): + + file_path = input_directory/file_name + data = _mps_parse(file_path.as_posix(), tolerances, time_limit, iteration_limit) + + base_file_name = file_name.split(".")[0] + + config_file_name = prefix +"_" +base_file_name+"_config.json" + data_file_name = prefix +"_" +base_file_name+"_data.json" + + config_data = { + "test_name": prefix +"_" +base_file_name, + "file_name": data_file_name, + "metrics": { + "primal_objective_value": { + "threshold": 1, + "unit": "primal_objective_value", + }, + "solver_time": { + "threshold": 1, + "unit": "seconds" + } + }, + "details": base_file_name + " test" + } + + with open(output_directory/config_file_name, "w") as fp: + json.dump(config_data, fp, indent=4, sort_keys=True) + + with open(output_directory/data_file_name, "w") as fp: + json.dump(data, fp, indent=4, sort_keys=True) + + +if __name__ == "__main__": + + parser = argparse.ArgumentParser( + description="Solve a cuOpt problem using a managed service client." + ) + parser.add_argument( + "folder", + type=str, + help="Folder path" + ) + parser.add_argument( + "-o", + "--output", + type=str, + default="", + help="Output folder path" + ) + parser.add_argument( + "-tl", + "--time-limit", + default=None, + type=int, + help="LP timit in milliseconds" + ) + parser.add_argument( + "-p", + "--prefix", + type=str, + default="", + help="Prefix for config and data" + ) + + args = parser.parse_args() + input_directory = Path(args.folder) + output_directory = Path(args.output) + + # List all files with .mps extension + mps_files = [f.name for f in input_directory.glob('*.mps')] + list_of_files = [ + "50v-10", "lotsize", "swath1", "nursesched-medium-hint03", "academictimetablesmall", "dano3_3", + "neos-4338804-snowy", "istanbul-no-cutoff", "s100", "traininstance2" + ] + for mps_file in mps_files: + if mps_file.split(".")[0] in list_of_files: + create_config_and_data(input_directory, mps_file, output_directory, args.prefix, args.time_limit) diff --git a/regression/benchmark_scripts/utils.py b/regression/benchmark_scripts/utils.py new file mode 100644 index 000000000..eeb48b990 --- /dev/null +++ b/regression/benchmark_scripts/utils.py @@ -0,0 +1,165 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +#from cuopt_server.utils.utils import build_routing_datamodel_from_json, build_lp_datamodel_from_json +from cuopt import routing + +import os +import json +from typing import NamedTuple + + + +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # noqa +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os + +from cuopt_server.utils.job_queue import SolverLPJob +from cuopt_server.utils.linear_programming.data_definition import LPData +from cuopt_server.utils.linear_programming.solver import ( + create_data_model as lp_create_data_model, + create_solver as lp_create_solver, +) +from cuopt_server.utils.routing.data_definition import OptimizedRoutingData +from cuopt_server.utils.routing.solver import ( + create_data_model as routing_create_data_model, + create_solver as routing_create_solver, + prep_optimization_data as routing_prep_optimization_data, +) +from cuopt_server.utils.solver import populate_optimization_data + + +def build_routing_datamodel_from_json(data): + """ + data: A valid dictionary or a json file-path with + valid format as per open-api spec. + """ + + if isinstance(data, dict): + pass + elif os.path.isfile(data): + with open(data, "r") as f: + data = dict(OptimizedRoutingData.parse_obj(json.loads(f.read()))) + else: + raise ValueError( + f"Invalid type : {type(data)} has been provided as input, " + "requires json input" + ) + + optimization_data = populate_optimization_data(**data) + ( + optimization_data, + cost_matrix, + travel_time_matrix, + _, + ) = routing_prep_optimization_data(optimization_data) + _, data_model = routing_create_data_model( + optimization_data, + cost_matrix=cost_matrix, + travel_time_matrix=travel_time_matrix, + ) + + _, solver_settings = routing_create_solver(optimization_data) + + return data_model, solver_settings + + +def build_lp_datamodel_from_json(data): + """ + data: A valid dictionary or a json file-path with + valid format as per open-api spec. + """ + + if isinstance(data, dict): + data = LPData.parse_obj(data) + elif os.path.isfile(data): + with open(data, "r") as f: + data = json.loads(f.read()) + # Remove this once we support variable names + data.pop("variable_names") + data = LPData.parse_obj(data) + else: + raise ValueError( + f"Invalid type : {type(data)} has been provided as input, " + "requires json input" + ) + + stub_id = 9999 + stub_warnings = [] + job = SolverLPJob(stub_id, data, None, stub_warnings) + # transform data into digestible format + job._transform(job.LP_data) + data = job.get_data() + + _, data_model = lp_create_data_model(data) + _, solver_settings = lp_create_solver(data, None) + + return data_model, solver_settings + + +class RoutingMetrics(NamedTuple): + + total_objective_value:float = -1 + vehicle_count:int = -1 + cost:float = -1 + prize:float = -1 + travel_time:float = -1 + solver_time:float = -1 + gpu_memory_usage:float = -1 + git_commit: str = "" + date_time: str = "" + +class LPMetrics(NamedTuple): + + primal_objective_value:float = -1 + solver_time:float = -1 + gpu_memory_usage:float = -1 + git_commit: str = "" + date_time: str = "" + + +def get_configuration(config_file, data_file_path): + + data = {} + if os.path.isfile(config_file): + with open(config_file) as f: + data = json.load(f) + else: + raise ValueError(f"Invalid type : {type(data)} has been provided as input, requires json input") + + + test_name = data["test_name"] + + if data["file_name"].startswith("LP_") or data["file_name"].startswith("MIP_"): + data_model, solver_settings = build_lp_datamodel_from_json(data_file_path+"/"+data["file_name"]) + else: + data_model, solver_settings = build_routing_datamodel_from_json(data_file_path+"/"+data["file_name"]) + + + requested_metrics = data["metrics"] + + return test_name, data_model, solver_settings, requested_metrics + + + diff --git a/regression/benchmarks/clear_optimization_data.csv b/regression/benchmarks/clear_optimization_data.csv new file mode 100644 index 000000000..edcc31a38 --- /dev/null +++ b/regression/benchmarks/clear_optimization_data.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +0.0041052934131585,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0026461367844603,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0031441673054359,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0032693036133423,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +0.0109413468002458,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/full_solve_matrix_data_off_10.csv b/regression/benchmarks/full_solve_matrix_data_off_10.csv new file mode 100644 index 000000000..4b3e08e17 --- /dev/null +++ b/regression/benchmarks/full_solve_matrix_data_off_10.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +0.092595966591034,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0909385792911052,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0861730184755288,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.084613940725103,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +0.1967066440993221,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/full_solve_matrix_data_off_1000.csv b/regression/benchmarks/full_solve_matrix_data_off_1000.csv new file mode 100644 index 000000000..5bd7062b6 --- /dev/null +++ b/regression/benchmarks/full_solve_matrix_data_off_1000.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +2.898428950691596,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +2.7740834328811617,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +2.7887534837936983,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +2.732585116138216,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +8.018868986098823,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/full_solve_matrix_data_on_10.csv b/regression/benchmarks/full_solve_matrix_data_on_10.csv new file mode 100644 index 000000000..d10214bf0 --- /dev/null +++ b/regression/benchmarks/full_solve_matrix_data_on_10.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +0.0967919212067499,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0893040685099549,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0923860481823794,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0971072109881788,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +0.2034532838006271,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/full_solve_matrix_data_on_1000.csv b/regression/benchmarks/full_solve_matrix_data_on_1000.csv new file mode 100644 index 000000000..fe6a4623b --- /dev/null +++ b/regression/benchmarks/full_solve_matrix_data_on_1000.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +11.469153839105276,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +11.13433374390006,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +11.183070917613804,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +11.18312375949463,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +33.7928514968,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/full_solve_sync_matrix_data_off_10.csv b/regression/benchmarks/full_solve_sync_matrix_data_off_10.csv new file mode 100644 index 000000000..2ac3c367e --- /dev/null +++ b/regression/benchmarks/full_solve_sync_matrix_data_off_10.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +0.0691923394217155,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0651698967907577,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0671704228967428,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0672970376908779,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +0.1821914255015144,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/full_solve_sync_matrix_data_off_1000.csv b/regression/benchmarks/full_solve_sync_matrix_data_off_1000.csv new file mode 100644 index 000000000..32fbe04e0 --- /dev/null +++ b/regression/benchmarks/full_solve_sync_matrix_data_off_1000.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +3.606089364201762,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +3.4653110398212448,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +3.4959583321004173,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +3.5713294571847656,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +10.516578904900234,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/full_solve_sync_wpg_data_off_10.csv b/regression/benchmarks/full_solve_sync_wpg_data_off_10.csv new file mode 100644 index 000000000..d58e1150a --- /dev/null +++ b/regression/benchmarks/full_solve_sync_wpg_data_off_10.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +0.0859221719903871,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0878685727715492,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0826912648160941,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0830997216049581,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +0.2035729169008845,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/full_solve_sync_wpg_data_off_1000.csv b/regression/benchmarks/full_solve_sync_wpg_data_off_1000.csv new file mode 100644 index 000000000..d211a427e --- /dev/null +++ b/regression/benchmarks/full_solve_sync_wpg_data_off_1000.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +4.490188621985726,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +4.402784487907775,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +4.422826303972397,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +4.39203272620216,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +13.783291200599342,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/full_solve_wpg_data_off_10.csv b/regression/benchmarks/full_solve_wpg_data_off_10.csv new file mode 100644 index 000000000..975fce6f7 --- /dev/null +++ b/regression/benchmarks/full_solve_wpg_data_off_10.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +0.102928383112885,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.1016542179044336,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0938111932831816,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.1761252559022978,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +0.2027525610988959,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/full_solve_wpg_data_off_1000.csv b/regression/benchmarks/full_solve_wpg_data_off_1000.csv new file mode 100644 index 000000000..47b32ea06 --- /dev/null +++ b/regression/benchmarks/full_solve_wpg_data_off_1000.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +4.142862814525143,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +3.972703066887334,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +3.9499765183078126,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +3.935864896082785,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +11.48112585589988,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/get_optimization_data_state_empty.csv b/regression/benchmarks/get_optimization_data_state_empty.csv new file mode 100644 index 000000000..bead231d4 --- /dev/null +++ b/regression/benchmarks/get_optimization_data_state_empty.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +0.0043799643055535,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0028748559881933,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0033562235999852,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0031923993839882,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +0.0073803732979285,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/health.csv b/regression/benchmarks/health.csv new file mode 100644 index 000000000..8d26fdb5e --- /dev/null +++ b/regression/benchmarks/health.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +0.0044362302869558,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0026844964013434,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0031451011775061,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0029689648887142,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +0.0067414406003081,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/homberger_C1_10_1.csv b/regression/benchmarks/homberger_C1_10_1.csv new file mode 100644 index 000000000..08e2ebe14 --- /dev/null +++ b/regression/benchmarks/homberger_C1_10_1.csv @@ -0,0 +1,22 @@ +status,solver_run_time,etl_time,memory,err_msg,time_stamp,travel_cost,commit_hash +SUCCESS,202.94488906860352,6.14421534538269,3852,Try relaxing Time Window constraints,01_18_2023_13_24_39,42479.20703125,4a550c4d8a18beddaef2884e3321b54046d3ca5a +SUCCESS,200.23936939239505,6.696606159210205,3852,Try relaxing Time Window constraints,01_18_2023_14_32_48,42479.20703125,4a550c4d8a18beddaef2884e3321b54046d3ca5a +SUCCESS,202.26948261260983,5.718092441558838,3852,Try relaxing Time Window constraints,01_18_2023_16_24_33,42479.20703125,4a550c4d8a18beddaef2884e3321b54046d3ca5a +SUCCESS,202.1016023159027,6.568071603775024,3852,Try relaxing Time Window constraints,01_19_2023_00_36_42,42479.20703125,4a550c4d8a18beddaef2884e3321b54046d3ca5a +SUCCESS,200.4102230072021,4.566569805145264,3852,Try relaxing Time Window constraints,01_20_2023_10_12_50,42479.20703125,d8b5ac3dee312da96440a4879c7f258d955cae53 +SUCCESS,202.70853686332703,6.867353677749634,3852,Try relaxing Time Window constraints,01_21_2023_00_53_04,42479.20703125,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 +SUCCESS,200.31551885604856,5.373513460159302,3852,Try relaxing Time Window constraints,01_22_2023_00_14_13,42479.20703125,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 +SUCCESS,202.78480195999143,4.6431286334991455,3852,Try relaxing Time Window constraints,01_23_2023_00_16_16,42479.20703125,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 +SUCCESS,202.963894367218,5.030569076538086,3852,Try relaxing Time Window constraints,01_24_2023_00_23_59,42479.20703125,1e0f2faebb52900fd0a678d5e0a2b1b93112820d +SUCCESS,200.27961683273315,4.810116767883301,3852,Try relaxing Time Window constraints,01_24_2023_08_00_08,42479.20703125,1e0f2faebb52900fd0a678d5e0a2b1b93112820d +SUCCESS,202.98268461227417,5.974777460098267,3852,Try relaxing Time Window constraints,01_24_2023_08_50_41,42479.20703125,1e0f2faebb52900fd0a678d5e0a2b1b93112820d +SUCCESS,202.81959056854248,5.531239986419678,3852,Try relaxing Time Window constraints,01_25_2023_00_39_29,42479.20703125,1e0f2faebb52900fd0a678d5e0a2b1b93112820d +SUCCESS,200.29493069648743,4.704516410827637,3852,Try relaxing Time Window constraints,01_26_2023_00_16_03,42479.20703125,cec6da5b7083e002441900cfca186292f481936f +SUCCESS,202.062420129776,4.859360218048096,3852,Try relaxing Time Window constraints,01_27_2023_00_23_38,42479.20703125,a51fe6c34510cd20b93ec6cfc15b8509d0babfca +SUCCESS,202.6819722652436,7.775798320770264,3852,Try relaxing Time Window constraints,01_28_2023_00_46_05,42479.20703125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,202.8960475921631,7.011343240737915,3852,Try relaxing Time Window constraints,01_29_2023_00_58_14,42479.20703125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,203.09712195396423,5.212660074234009,3852,Try relaxing Time Window constraints,01_30_2023_00_47_19,42479.20703125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,202.71639442443848,6.375207424163818,3852,Try relaxing Time Window constraints,01_30_2023_12_41_09,42479.20703125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,202.4589099884033,5.115618705749512,3852,Try relaxing Time Window constraints,01_30_2023_15_50_22,42479.20703125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,202.56271409988403,5.815870761871338,3852,Try relaxing Time Window constraints,01_31_2023_01_10_25,42479.20703125,3354f792963b368a476c07ef11e1fcd66ca5ea5f +SUCCESS,203.2009189128876,12.784011125564575,3852,Try relaxing Time Window constraints,02_01_2023_00_28_37,42480.3984375,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/homberger_C1_10_4.csv b/regression/benchmarks/homberger_C1_10_4.csv new file mode 100644 index 000000000..d01fd6f6a --- /dev/null +++ b/regression/benchmarks/homberger_C1_10_4.csv @@ -0,0 +1,22 @@ +status,solver_run_time,etl_time,memory,err_msg,time_stamp,travel_cost,commit_hash +SUCCESS,202.4027383327484,4.14943790435791,3824,Try relaxing Time Window constraints,01_18_2023_13_24_39,41629.4140625,4a550c4d8a18beddaef2884e3321b54046d3ca5a +SUCCESS,202.7299354076385,3.9376816749572754,3824,Try relaxing Time Window constraints,01_18_2023_14_32_48,41629.4140625,4a550c4d8a18beddaef2884e3321b54046d3ca5a +SUCCESS,202.4123239517212,4.010818243026733,3824,Try relaxing Time Window constraints,01_18_2023_16_24_33,41629.4140625,4a550c4d8a18beddaef2884e3321b54046d3ca5a +SUCCESS,202.4237217903137,3.919779777526856,3824,Try relaxing Time Window constraints,01_19_2023_00_36_42,41629.4140625,4a550c4d8a18beddaef2884e3321b54046d3ca5a +SUCCESS,202.888254404068,3.9284555912017822,3824,Try relaxing Time Window constraints,01_20_2023_10_12_50,41629.4140625,d8b5ac3dee312da96440a4879c7f258d955cae53 +SUCCESS,202.5099935531616,3.868138551712036,3824,Try relaxing Time Window constraints,01_21_2023_00_53_04,41629.4140625,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 +SUCCESS,202.51063680648804,4.160922050476074,3824,Try relaxing Time Window constraints,01_22_2023_00_14_13,41629.4140625,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 +SUCCESS,202.65994668006897,3.8936829566955566,3824,Try relaxing Time Window constraints,01_23_2023_00_16_16,41629.4140625,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 +SUCCESS,202.6500322818756,4.055036783218384,3824,Try relaxing Time Window constraints,01_24_2023_00_23_59,41629.4140625,1e0f2faebb52900fd0a678d5e0a2b1b93112820d +SUCCESS,202.8837172985077,3.979902029037476,3824,Try relaxing Time Window constraints,01_24_2023_08_00_08,41629.4140625,1e0f2faebb52900fd0a678d5e0a2b1b93112820d +SUCCESS,202.54010653495789,4.112768888473511,3824,Try relaxing Time Window constraints,01_24_2023_08_50_41,41629.4140625,1e0f2faebb52900fd0a678d5e0a2b1b93112820d +SUCCESS,202.3439211845398,4.057533264160156,3824,Try relaxing Time Window constraints,01_25_2023_00_39_29,41629.4140625,1e0f2faebb52900fd0a678d5e0a2b1b93112820d +SUCCESS,202.5833644866944,4.075305938720703,3824,Try relaxing Time Window constraints,01_26_2023_00_16_03,41629.4140625,cec6da5b7083e002441900cfca186292f481936f +SUCCESS,202.6500098705292,4.032953500747681,3824,Try relaxing Time Window constraints,01_27_2023_00_23_38,41629.4140625,a51fe6c34510cd20b93ec6cfc15b8509d0babfca +SUCCESS,202.9884734153748,5.518364906311035,3824,Try relaxing Time Window constraints,01_28_2023_00_46_05,41629.4140625,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,202.73613834381104,4.132292747497559,3824,Try relaxing Time Window constraints,01_29_2023_00_58_14,41629.4140625,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,202.3717851638794,3.8987321853637695,3824,Try relaxing Time Window constraints,01_30_2023_00_47_19,41629.4140625,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,202.979766368866,5.730100393295288,3824,Try relaxing Time Window constraints,01_30_2023_12_41_09,41629.4140625,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,202.46156549453733,4.168130397796631,3824,Try relaxing Time Window constraints,01_30_2023_15_50_22,41629.4140625,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,202.65178418159485,4.034475564956665,3824,Try relaxing Time Window constraints,01_31_2023_01_10_25,41629.4140625,3354f792963b368a476c07ef11e1fcd66ca5ea5f +SUCCESS,206.1987981796265,10.41266632080078,3824,Try relaxing Time Window constraints,02_01_2023_00_28_37,41802.203125,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/homberger_C1_10_9.csv b/regression/benchmarks/homberger_C1_10_9.csv new file mode 100644 index 000000000..29f444004 --- /dev/null +++ b/regression/benchmarks/homberger_C1_10_9.csv @@ -0,0 +1,22 @@ +status,solver_run_time,etl_time,memory,err_msg,time_stamp,travel_cost,commit_hash +SUCCESS,202.1899116039276,4.372752666473389,3813,Try relaxing Time Window constraints,01_18_2023_13_24_39,42345.01953125,4a550c4d8a18beddaef2884e3321b54046d3ca5a +SUCCESS,201.80366277694705,4.024761915206909,3813,Try relaxing Time Window constraints,01_18_2023_14_32_48,42345.01953125,4a550c4d8a18beddaef2884e3321b54046d3ca5a +SUCCESS,202.53451490402225,4.239882469177246,3813,Try relaxing Time Window constraints,01_18_2023_16_24_33,42345.01953125,4a550c4d8a18beddaef2884e3321b54046d3ca5a +SUCCESS,202.2297282218933,4.069121360778809,3813,Try relaxing Time Window constraints,01_19_2023_00_36_42,42345.01953125,4a550c4d8a18beddaef2884e3321b54046d3ca5a +SUCCESS,202.15557503700256,3.991995334625244,3813,Try relaxing Time Window constraints,01_20_2023_10_12_50,42345.01953125,d8b5ac3dee312da96440a4879c7f258d955cae53 +SUCCESS,202.0832393169403,4.107094764709473,3813,Try relaxing Time Window constraints,01_21_2023_00_53_04,42345.01953125,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 +SUCCESS,202.35359001159668,4.070865869522095,3813,Try relaxing Time Window constraints,01_22_2023_00_14_13,42345.01953125,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 +SUCCESS,202.113706111908,3.918311357498169,3813,Try relaxing Time Window constraints,01_23_2023_00_16_16,42345.01953125,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 +SUCCESS,202.28220200538635,4.163920402526856,3813,Try relaxing Time Window constraints,01_24_2023_00_23_59,42345.01953125,1e0f2faebb52900fd0a678d5e0a2b1b93112820d +SUCCESS,202.4687077999115,3.939903259277344,3813,Try relaxing Time Window constraints,01_24_2023_08_00_08,42345.01953125,1e0f2faebb52900fd0a678d5e0a2b1b93112820d +SUCCESS,202.31099653244016,3.9958384037017822,3813,Try relaxing Time Window constraints,01_24_2023_08_50_41,42345.01953125,1e0f2faebb52900fd0a678d5e0a2b1b93112820d +SUCCESS,202.6687958240509,4.282079219818115,3813,Try relaxing Time Window constraints,01_25_2023_00_39_29,42345.01953125,1e0f2faebb52900fd0a678d5e0a2b1b93112820d +SUCCESS,202.5248498916626,4.124437808990479,3813,Try relaxing Time Window constraints,01_26_2023_00_16_03,42345.01953125,cec6da5b7083e002441900cfca186292f481936f +SUCCESS,202.1295850276947,3.97187089920044,3813,Try relaxing Time Window constraints,01_27_2023_00_23_38,42345.01953125,a51fe6c34510cd20b93ec6cfc15b8509d0babfca +SUCCESS,202.0128083229065,5.610944032669067,3813,Try relaxing Time Window constraints,01_28_2023_00_46_05,42345.01953125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,202.4298839569092,4.143335103988648,3813,Try relaxing Time Window constraints,01_29_2023_00_58_14,42345.01953125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,202.29705381393435,3.988018989562988,3813,Try relaxing Time Window constraints,01_30_2023_00_47_19,42345.01953125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,202.0680272579193,5.667192459106445,3813,Try relaxing Time Window constraints,01_30_2023_12_41_09,42345.01953125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,202.2976925373077,4.1305906772613525,3813,Try relaxing Time Window constraints,01_30_2023_15_50_22,42345.01953125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,202.4270453453064,4.296361923217773,3813,Try relaxing Time Window constraints,01_31_2023_01_10_25,42345.01953125,3354f792963b368a476c07ef11e1fcd66ca5ea5f +SUCCESS,207.36984372138977,11.430570125579834,3813,Try relaxing Time Window constraints,02_01_2023_00_28_37,42923.703125,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/linlim_LC1_10_1.csv b/regression/benchmarks/linlim_LC1_10_1.csv new file mode 100644 index 000000000..325e0dc75 --- /dev/null +++ b/regression/benchmarks/linlim_LC1_10_1.csv @@ -0,0 +1,21 @@ +status,solver_run_time,etl_time,memory,err_msg,time_stamp,travel_cost,commit_hash +SUCCESS,234.6376609802246,0.7243020534515381,1392,Try relaxing Time Window constraints,01_18_2023_13_35_08,132510.390625,4a550c4d8a18beddaef2884e3321b54046d3ca5a +SUCCESS,228.23448133468628,0.7411677837371826,1391,Try relaxing Time Window constraints,01_18_2023_14_43_14,132511.03125,4a550c4d8a18beddaef2884e3321b54046d3ca5a +SUCCESS,228.23107290267944,0.7489080429077148,1392,Try relaxing Time Window constraints,01_19_2023_00_47_10,132488.65625,4a550c4d8a18beddaef2884e3321b54046d3ca5a +SUCCESS,220.52945804595947,0.7301907539367676,1389,Try relaxing Time Window constraints,01_20_2023_10_23_12,132684.640625,d8b5ac3dee312da96440a4879c7f258d955cae53 +SUCCESS,233.1353704929352,0.7111175060272217,1392,Try relaxing Time Window constraints,01_21_2023_01_03_36,132511.03125,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 +SUCCESS,224.9602243900299,0.6813702583312988,1391,Try relaxing Time Window constraints,01_22_2023_00_24_36,132632.1875,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 +SUCCESS,229.9897944927216,0.6986827850341797,1392,Try relaxing Time Window constraints,01_23_2023_00_26_41,132555.84375,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 +SUCCESS,228.47381496429443,0.7829642295837402,1392,Try relaxing Time Window constraints,01_24_2023_00_34_26,132488.65625,1e0f2faebb52900fd0a678d5e0a2b1b93112820d +SUCCESS,237.6755757331848,0.7272443771362305,1392,Try relaxing Time Window constraints,01_24_2023_08_10_31,132555.84375,1e0f2faebb52900fd0a678d5e0a2b1b93112820d +SUCCESS,228.05768513679504,0.7051000595092773,1391,Try relaxing Time Window constraints,01_24_2023_09_01_07,132489.3125,1e0f2faebb52900fd0a678d5e0a2b1b93112820d +SUCCESS,226.24537992477417,0.737703800201416,1391,Try relaxing Time Window constraints,01_25_2023_00_50_09,132577.5625,1e0f2faebb52900fd0a678d5e0a2b1b93112820d +SUCCESS,210.6106126308441,0.7423005104064941,1389,Try relaxing Time Window constraints,01_26_2023_00_26_27,132488.65625,cec6da5b7083e002441900cfca186292f481936f +SUCCESS,215.53656339645383,0.7288351058959961,1391,Try relaxing Time Window constraints,01_27_2023_00_34_04,132556.5,a51fe6c34510cd20b93ec6cfc15b8509d0babfca +SUCCESS,212.8446328639984,1.0236175060272217,1389,Try relaxing Time Window constraints,01_28_2023_00_56_38,42806.44921875,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,212.08036494255063,0.7389419078826904,1388,Try relaxing Time Window constraints,01_29_2023_01_08_43,42489.30859375,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,215.55140352249143,0.7778847217559814,1388,Try relaxing Time Window constraints,01_30_2023_00_57_46,42488.65625,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,213.6677827835083,1.0266873836517334,1388,Try relaxing Time Window constraints,01_30_2023_12_51_42,42610.46875,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,214.6161751747132,0.7036209106445312,1388,Try relaxing Time Window constraints,01_30_2023_16_00_48,42510.3828125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,206.1508820056916,0.712501049041748,1389,Try relaxing Time Window constraints,01_31_2023_01_20_53,42510.3828125,3354f792963b368a476c07ef11e1fcd66ca5ea5f +SUCCESS,260.46342182159424,2.3488495349884038,1388,Try relaxing Time Window constraints,02_01_2023_00_39_43,43458.0546875,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/linlim_LC1_10_4.csv b/regression/benchmarks/linlim_LC1_10_4.csv new file mode 100644 index 000000000..b3a181f1f --- /dev/null +++ b/regression/benchmarks/linlim_LC1_10_4.csv @@ -0,0 +1,21 @@ +status,solver_run_time,etl_time,memory,err_msg,time_stamp,travel_cost,commit_hash +SUCCESS,228.4590950012207,0.4174575805664062,1366,Try relaxing Time Window constraints,01_18_2023_13_35_08,134437.984375,4a550c4d8a18beddaef2884e3321b54046d3ca5a +SUCCESS,230.9396343231201,0.4012937545776367,1363,Try relaxing Time Window constraints,01_18_2023_14_43_14,136835.859375,4a550c4d8a18beddaef2884e3321b54046d3ca5a +SUCCESS,225.7199718952179,0.413813829421997,1365,Try relaxing Time Window constraints,01_19_2023_00_47_10,135084.53125,4a550c4d8a18beddaef2884e3321b54046d3ca5a +SUCCESS,217.0161476135254,0.3954634666442871,1362,Try relaxing Time Window constraints,01_20_2023_10_23_12,133937.671875,d8b5ac3dee312da96440a4879c7f258d955cae53 +SUCCESS,220.2502839565277,0.4050805568695068,1359,Try relaxing Time Window constraints,01_21_2023_01_03_36,136410.484375,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 +SUCCESS,227.21949887275696,0.4711213111877441,1359,Try relaxing Time Window constraints,01_22_2023_00_24_36,135826.3125,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 +SUCCESS,227.33157467842105,0.3872089385986328,1360,Try relaxing Time Window constraints,01_23_2023_00_26_41,133772.546875,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 +SUCCESS,223.0627908706665,0.3764033317565918,1367,Try relaxing Time Window constraints,01_24_2023_00_34_26,134500.0625,1e0f2faebb52900fd0a678d5e0a2b1b93112820d +SUCCESS,229.52511739730835,0.4286582469940185,1362,Try relaxing Time Window constraints,01_24_2023_08_10_31,134388.734375,1e0f2faebb52900fd0a678d5e0a2b1b93112820d +SUCCESS,222.2460687160492,0.4110927581787109,1362,Try relaxing Time Window constraints,01_24_2023_09_01_07,136785.8125,1e0f2faebb52900fd0a678d5e0a2b1b93112820d +SUCCESS,225.35983300209045,0.4234979152679443,1361,Try relaxing Time Window constraints,01_25_2023_00_50_09,133834.765625,1e0f2faebb52900fd0a678d5e0a2b1b93112820d +SUCCESS,211.9453625679016,0.3975887298583984,1358,Try relaxing Time Window constraints,01_26_2023_00_26_27,135007.578125,cec6da5b7083e002441900cfca186292f481936f +SUCCESS,211.1832706928253,0.4141361713409424,1363,Try relaxing Time Window constraints,01_27_2023_00_34_04,135862.28125,a51fe6c34510cd20b93ec6cfc15b8509d0babfca +SUCCESS,212.13958954811096,0.5645616054534912,1364,Try relaxing Time Window constraints,01_28_2023_00_56_38,41154.609375,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,212.52610898017883,0.406682014465332,1360,Try relaxing Time Window constraints,01_29_2023_01_08_43,41272.10546875,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,212.9528636932373,0.4297752380371094,1356,Try relaxing Time Window constraints,01_30_2023_00_57_46,42294.234375,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,210.4439640045166,0.5646810531616211,1356,Try relaxing Time Window constraints,01_30_2023_12_51_42,42386.4609375,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,212.9664409160614,0.4054524898529053,1367,Try relaxing Time Window constraints,01_30_2023_16_00_48,42615.96484375,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,206.41025924682617,0.4145197868347168,1359,Try relaxing Time Window constraints,01_31_2023_01_20_53,41637.00390625,3354f792963b368a476c07ef11e1fcd66ca5ea5f +SUCCESS,270.8348495960236,1.1712639331817627,1357,Try relaxing Time Window constraints,02_01_2023_00_39_43,44848.16796875,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/linlim_LC1_10_9.csv b/regression/benchmarks/linlim_LC1_10_9.csv new file mode 100644 index 000000000..6d07bb554 --- /dev/null +++ b/regression/benchmarks/linlim_LC1_10_9.csv @@ -0,0 +1,21 @@ +status,solver_run_time,etl_time,memory,err_msg,time_stamp,travel_cost,commit_hash +SUCCESS,238.67641973495483,0.4070606231689453,1381,Try relaxing Time Window constraints,01_18_2023_13_35_08,139254.9375,4a550c4d8a18beddaef2884e3321b54046d3ca5a +SUCCESS,232.9940016269684,0.4224081039428711,1380,Try relaxing Time Window constraints,01_18_2023_14_43_14,140103.0,4a550c4d8a18beddaef2884e3321b54046d3ca5a +SUCCESS,234.68699979782104,0.4444656372070312,1381,Try relaxing Time Window constraints,01_19_2023_00_47_10,141163.640625,4a550c4d8a18beddaef2884e3321b54046d3ca5a +SUCCESS,218.58095908164967,0.4063160419464111,1380,Try relaxing Time Window constraints,01_20_2023_10_23_12,139411.203125,d8b5ac3dee312da96440a4879c7f258d955cae53 +SUCCESS,230.3954677581787,0.4042942523956299,1378,Try relaxing Time Window constraints,01_21_2023_01_03_36,139395.890625,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 +SUCCESS,232.57781553268435,0.4077394008636474,1378,Try relaxing Time Window constraints,01_22_2023_00_24_36,140732.28125,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 +SUCCESS,231.7747838497162,0.401658296585083,1379,Try relaxing Time Window constraints,01_23_2023_00_26_41,140143.421875,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 +SUCCESS,232.3545906543732,0.4278278350830078,1378,Try relaxing Time Window constraints,01_24_2023_00_34_26,139762.8125,1e0f2faebb52900fd0a678d5e0a2b1b93112820d +SUCCESS,234.09125399589536,0.411196231842041,1378,Try relaxing Time Window constraints,01_24_2023_08_10_31,140568.75,1e0f2faebb52900fd0a678d5e0a2b1b93112820d +SUCCESS,226.61660289764404,0.4029746055603027,1379,Try relaxing Time Window constraints,01_24_2023_09_01_07,140093.953125,1e0f2faebb52900fd0a678d5e0a2b1b93112820d +SUCCESS,231.881952047348,0.4578347206115722,1378,Try relaxing Time Window constraints,01_25_2023_00_50_09,139465.359375,1e0f2faebb52900fd0a678d5e0a2b1b93112820d +SUCCESS,208.54942059516907,0.3924274444580078,1380,Try relaxing Time Window constraints,01_26_2023_00_26_27,141658.125,cec6da5b7083e002441900cfca186292f481936f +SUCCESS,218.20822954177856,0.3863475322723388,1381,Try relaxing Time Window constraints,01_27_2023_00_34_04,140597.359375,a51fe6c34510cd20b93ec6cfc15b8509d0babfca +SUCCESS,211.22141408920288,0.5680606365203857,1377,Try relaxing Time Window constraints,01_28_2023_00_56_38,49620.765625,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,214.280752658844,0.4076018333435058,1378,Try relaxing Time Window constraints,01_29_2023_01_08_43,46729.375,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,214.09263396263125,0.395796537399292,1376,Try relaxing Time Window constraints,01_30_2023_00_57_46,48296.01953125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,213.01783680915835,0.5552072525024414,1377,Try relaxing Time Window constraints,01_30_2023_12_51_42,49357.98828125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,218.2983613014221,0.3832075595855713,1375,Try relaxing Time Window constraints,01_30_2023_16_00_48,46253.51953125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +SUCCESS,211.4424307346344,0.3975503444671631,1375,Try relaxing Time Window constraints,01_31_2023_01_20_53,48054.55859375,3354f792963b368a476c07ef11e1fcd66ca5ea5f +SUCCESS,268.6615102291107,1.1218979358673096,1377,Try relaxing Time Window constraints,02_01_2023_00_39_43,48655.6015625,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/set_config.csv b/regression/benchmarks/set_config.csv new file mode 100644 index 000000000..c525c8585 --- /dev/null +++ b/regression/benchmarks/set_config.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +0.004645535396412,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0046047047129832,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0044730253051966,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.004629728605505,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +0.0077153033998911,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/set_fleet_2.csv b/regression/benchmarks/set_fleet_2.csv new file mode 100644 index 000000000..3dd3c225f --- /dev/null +++ b/regression/benchmarks/set_fleet_2.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +0.0095658392179757,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.009450409177225,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0100178633118048,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0093642852152697,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +0.0184322164990589,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/set_fleet_800.csv b/regression/benchmarks/set_fleet_800.csv new file mode 100644 index 000000000..fa582bf6a --- /dev/null +++ b/regression/benchmarks/set_fleet_800.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +0.1390531376935541,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.1451090188929811,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.1381841997965239,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.1395055412198416,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +0.3440185967003344,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/set_matrix_10.csv b/regression/benchmarks/set_matrix_10.csv new file mode 100644 index 000000000..f97ca7f13 --- /dev/null +++ b/regression/benchmarks/set_matrix_10.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +0.0318860427825711,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0271919017890468,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0215748190879821,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0207240985939279,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +0.0808484257991949,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/set_matrix_1000.csv b/regression/benchmarks/set_matrix_1000.csv new file mode 100644 index 000000000..3d6a7d99b --- /dev/null +++ b/regression/benchmarks/set_matrix_1000.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +2.447241939394735,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +2.3234502929844894,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +2.338384240376763,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +2.24809804528486,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +6.911417219002033,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/set_tasks_10.csv b/regression/benchmarks/set_tasks_10.csv new file mode 100644 index 000000000..51c1f5ead --- /dev/null +++ b/regression/benchmarks/set_tasks_10.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +0.0109893269021995,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0129346061963588,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0134648023871704,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0118026977172121,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +0.0226951214972359,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/set_tasks_1000.csv b/regression/benchmarks/set_tasks_1000.csv new file mode 100644 index 000000000..29772bb96 --- /dev/null +++ b/regression/benchmarks/set_tasks_1000.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +0.1538944556959904,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.1491747667896561,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.1730471987975761,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.1544712165719829,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +0.4287849125015782,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/set_waypoint_graph_10.csv b/regression/benchmarks/set_waypoint_graph_10.csv new file mode 100644 index 000000000..ed4883933 --- /dev/null +++ b/regression/benchmarks/set_waypoint_graph_10.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +0.0056097029941156,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0032982378965243,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0038573885685764,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0037153065786696,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +0.0103513664995261,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/set_waypoint_graph_1000.csv b/regression/benchmarks/set_waypoint_graph_1000.csv new file mode 100644 index 000000000..644ae2231 --- /dev/null +++ b/regression/benchmarks/set_waypoint_graph_1000.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +3.4773945213062687,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +3.380432048998773,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +3.321970174077433,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +3.325099247705657,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +9.763971190099983,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/solve_delta_matrix_10.csv b/regression/benchmarks/solve_delta_matrix_10.csv new file mode 100644 index 000000000..2d481b3e9 --- /dev/null +++ b/regression/benchmarks/solve_delta_matrix_10.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +0.0660040138754993,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0658921508234925,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0601130882976576,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0595540319103747,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +0.1493235697002091,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/solve_delta_matrix_1000.csv b/regression/benchmarks/solve_delta_matrix_1000.csv new file mode 100644 index 000000000..cd43dc82a --- /dev/null +++ b/regression/benchmarks/solve_delta_matrix_1000.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +0.3474221469950862,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.3429003946832381,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.3333525741123594,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.33962144900579,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +1.0392802602997109,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/solve_delta_wpg_10.csv b/regression/benchmarks/solve_delta_wpg_10.csv new file mode 100644 index 000000000..301ffd427 --- /dev/null +++ b/regression/benchmarks/solve_delta_wpg_10.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +0.0767553822021,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.079037431103643,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0698185498942621,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.157676170382183,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +0.164265617098863,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/solve_delta_wpg_1000.csv b/regression/benchmarks/solve_delta_wpg_1000.csv new file mode 100644 index 000000000..45f4931ae --- /dev/null +++ b/regression/benchmarks/solve_delta_wpg_1000.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +0.5354226037045009,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.5224156844080425,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.5350534689961932,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.5143750281655229,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +1.3945245039001748,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/update_config.csv b/regression/benchmarks/update_config.csv new file mode 100644 index 000000000..ba59ede20 --- /dev/null +++ b/regression/benchmarks/update_config.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +0.0046170215005986,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0044824522337876,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0047105842968449,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0045994932879693,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +0.0075603434997901,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/update_fleet_2.csv b/regression/benchmarks/update_fleet_2.csv new file mode 100644 index 000000000..ec6ae8ff4 --- /dev/null +++ b/regression/benchmarks/update_fleet_2.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +0.0095946694957092,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0095075284014455,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0096409979043528,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.009402271406725,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +0.0175724803993944,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/update_fleet_800.csv b/regression/benchmarks/update_fleet_800.csv new file mode 100644 index 000000000..3ddd46327 --- /dev/null +++ b/regression/benchmarks/update_fleet_800.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +0.1386791780241765,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.1419437378877774,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.1385688012931495,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.1430311175994575,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +0.3444252583984052,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/update_matrix_10.csv b/regression/benchmarks/update_matrix_10.csv new file mode 100644 index 000000000..17b382481 --- /dev/null +++ b/regression/benchmarks/update_matrix_10.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +0.0080373852746561,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0046605161041952,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0055735706002451,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0051797617808915,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +0.0133258155001385,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/update_matrix_1000.csv b/regression/benchmarks/update_matrix_1000.csv new file mode 100644 index 000000000..7db442f7b --- /dev/null +++ b/regression/benchmarks/update_matrix_1000.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +2.4152652503806165,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +2.319457295385655,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +2.338757945317775,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +2.2231198537279853,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +6.86295339329954,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/update_tasks_10.csv b/regression/benchmarks/update_tasks_10.csv new file mode 100644 index 000000000..5adaeb373 --- /dev/null +++ b/regression/benchmarks/update_tasks_10.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +0.0095267685013823,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.011702395172324,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0130278801894746,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0122657145839184,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +0.0276424682000651,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/update_tasks_1000.csv b/regression/benchmarks/update_tasks_1000.csv new file mode 100644 index 000000000..832386cc1 --- /dev/null +++ b/regression/benchmarks/update_tasks_1000.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +0.1570440814946778,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.1466961717000231,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.1645322358002886,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.1534246206167154,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +0.3910466975998133,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/update_waypoint_graph_10.csv b/regression/benchmarks/update_waypoint_graph_10.csv new file mode 100644 index 000000000..dca45caf9 --- /dev/null +++ b/regression/benchmarks/update_waypoint_graph_10.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +0.0051487428951077,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0031310768914408,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0035910105914808,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +0.0034982840297743,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +0.0092652412997267,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/update_waypoint_graph_1000.csv b/regression/benchmarks/update_waypoint_graph_1000.csv new file mode 100644 index 000000000..8f7a39ca6 --- /dev/null +++ b/regression/benchmarks/update_waypoint_graph_1000.csv @@ -0,0 +1,6 @@ +run_time,date_time,commit_hash +2.170334866119083,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +2.1527948976843616,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +2.062339087412693,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf +2.0358887692331336,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f +6.185019124099199,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/config.sh b/regression/config.sh new file mode 100644 index 000000000..3b073555a --- /dev/null +++ b/regression/config.sh @@ -0,0 +1,67 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +THIS_DIR=$(cd $(dirname ${BASH_SOURCE[0]}) && pwd) + +CUOPT_SCRIPTS_DIR=$THIS_DIR +OUTPUT_DIR=$SCRATCH_DIR/benchmark_runs/ + +ACCOUNT=datascience_rapids_testing +PARTITION="batch_dgx2h_m2" +GPUS_PER_NODE=1 + +# Path to the squashs file containing the container image +IMAGE="nvidia/cuopt:25.10.0a-cuda12.9-py3.12" +#SQSH_IMAGE=$SCRATCH_DIR/container_state/cuopt.sqsh + +ROUTING_CONFIGS_PATH=$SCRATCH_DIR/routing_configs/ +LP_CONFIGS_PATH=$SCRATCH_DIR/lp_configs/ +MIP_CONFIGS_PATH=$SCRATCH_DIR/mip_configs/ + +STATUS_FILE=$OUTPUT_DIR/status.txt +WORKER_RMM_POOL_SIZE=${WORKER_RMM_POOL_SIZE:-24G} + +DATASETS_DIR=$SCRATCH_DIR/datasets + +REPO_DIR_NAME=cuopt +RESULT_DIR_NAME=cuopt-regression +SSH_CREDS=/home/iroy/.ssh/ + +# Assume CUOPT_SLACK_APP_ID is defined! +CUOPT_SLACK_APP_ID="T04SYRAP3/B04BKLJ7R0F/8EPiEMTDcXFeB5FzQVEJp8t2" +WEBHOOK_URL=${WEBHOOK_URL:-https://hooks.slack.com/services/${CUOPT_SLACK_APP_ID}} +S3_FILE_PREFIX=s3://reopt-testing-public/regression_tests +S3_URL_PREFIX=https://reopt-testing-public.s3.amazonaws.com/regression_tests + +# Most are defined using the bash := or :- syntax, which means they +# will be set only if they were previously unset. The project config +# is loaded first, which gives it the opportunity to override anything +# in this file that uses that syntax. If there are variables in this +# file that should not be overridded by a project, then they will +# simply not use that syntax and override, since these variables are +# read last. +RESULTS_ARCHIVE_DIR=$OUTPUT_DIR/results +RESULTS_DIR=$RESULTS_ARCHIVE_DIR/latest +METADATA_FILE=$RESULTS_DIR/metadata.sh +WORKSPACE=$OUTPUT_DIR/workspace +TESTING_DIR=$WORKSPACE/testing +BENCHMARK_DIR=$WORKSPACE/benchmark +SCRIPTS_DIR=$THIS_DIR + +BUILD_LOG_FILE=$RESULTS_DIR/build_log.txt +DATE=${DATE:-$(date --utc "+%Y-%m-%d_%H:%M:%S")_UTC} + +# vars that are not overridden by the project. + +# These must remain relative to $RESULTS_DIR since some scripts assume +# that, and also assume the names "tests" and "benchmarks", and +# therefore cannot be overridden by a project. +TESTING_RESULTS_DIR=${RESULTS_DIR}/tests +BENCHMARK_RESULTS_DIR=${RESULTS_DIR}/benchmarks diff --git a/regression/cronjob.sh b/regression/cronjob.sh new file mode 100755 index 000000000..b69b50686 --- /dev/null +++ b/regression/cronjob.sh @@ -0,0 +1,203 @@ +#!/bin/bash +# SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +# NOTE: this script is currently run from cron using this crontab entry: +# 30 2 * * * bash --login -c 'env RAPIDS_MG_TOOLS_DIR=/lustre/fsw/datascience_rapids_cugraphgnn/ramakrishnap/Projects/regression_testing/multi-gpu-tools /lustre/fsw/datascience_rapids_cugraphgnn/ramakrishnap/Proje> + +# Abort script on first error to ensure script-env.sh is sourced. +set -e + +if [[ -v SLURM_NODEID ]]; then + echo "Detected the env var SLURM_NODEID is set. Is this script running on a compute node?" + echo "This script must be run *outside* of a slurm job (this script starts slurm jobs, but is not a job itself)." + exit 1 +fi + +# Must ensure PROJECT_DIR is exported first then load rapids-mg-tools env +export PROJECT_DIR=${PROJECT_DIR:-$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)} + +source ${PROJECT_DIR}/config.sh +source ${PROJECT_DIR}/functions.sh + +BUILD_FAILED=0 +RUN_BENCHMARKS=0 +RUN_TESTS=0 + +if hasArg --benchmark; then + RUN_BENCHMARKS=1 +fi +if hasArg --test; then + RUN_TESTS=1 +fi +if (! hasArg --test) && (! hasArg --benchmark); then + RUN_TESTS=1 + RUN_BENCHMARKS=1 +fi + +################################################################################ + +# Create a results dir unique for this run +setupResultsDir + +# Switch to allowing errors from commands, since test failures will +# result in non-zero return codes and this script should attempt to +# run all tests. +set +e + +################################################################################ +#logger "Testing cuOpt in container..." +#srun \ +# --account $ACCOUNT \ +# --partition $PARTITION \ +# --nv-meta ml-model.dlss,dcgm_opt_out.yes \ +# --job-name=test-container.testing \ +# --nodes 1 \ +# --gpus-per-node 1 \ +# --time=120 \ +# --export=ALL \ +# --exclusive -K \ +# --container-mounts=${CUOPT_SCRIPTS_DIR}:${CUOPT_SCRIPTS_DIR},${OUTPUT_DIR}:${OUTPUT_DIR},${SSH_CREDS}:/root/.ssh \ +# --container-image=$IMAGE \ +# --output=$BUILD_LOG_FILE \ +# bash ${PROJECT_DIR}/test-container.sh +TESTING_FAILED=$? +logger "done testing container, return code was $TESTING_FAILED" + +if [[ $TESTING_FAILED == 0 ]]; then + + ############################################################################ + # Setup and run tests + if [[ $RUN_BENCHMARKS == 1 ]]; then + logger "Running benchmarks..." + logger "GPUs per node : $GPUS_PER_NODE" + # SNMG tests - run in parallel + #srun \ + # --account $ACCOUNT \ + # --partition $PARTITION \ + # --nv-meta ml-model.dlss,dcgm_opt_out.yes \ + # --job-name=run-nightly-benchmarks \ + # --nodes 1 \ + # --gpus-per-node $GPUS_PER_NODE \ + # --time=4:00:00 \ + # --export=ALL \ + # --exclusive -K\ + # --container-mounts ${ROUTING_CONFIGS_PATH}:${ROUTING_CONFIGS_PATH},${CUOPT_SCRIPTS_DIR}:${CUOPT_SCRIPTS_DIR},${OUTPUT_DIR}:${OUTPUT_DIR} \ + # --container-image=$IMAGE \ + # --output=${BENCHMARK_RESULTS_DIR}/benchmark_routing_log.txt \ + # bash ${CUOPT_SCRIPTS_DIR}/routing_regression_test.sh & + #PID_1=$! + #logger "Process ID $PID_1 in background" + + srun \ + --account $ACCOUNT \ + --partition $PARTITION \ + --nv-meta ml-model.dlss,dcgm_opt_out.yes \ + --job-name=run-nightly-benchmarks \ + --nodes 1 \ + --gpus-per-node $GPUS_PER_NODE \ + --time=4:00:00 \ + --export=ALL \ + --exclusive -K\ + --container-mounts ${LP_CONFIGS_PATH}:${LP_CONFIGS_PATH},${CUOPT_SCRIPTS_DIR}:${CUOPT_SCRIPTS_DIR},${OUTPUT_DIR}:${OUTPUT_DIR} \ + --container-image=$IMAGE \ + --output=${BENCHMARK_RESULTS_DIR}/benchmark_lp_log.txt \ + bash ${CUOPT_SCRIPTS_DIR}/lp_regression_test.sh & + PID_2=$! + + srun \ + --account $ACCOUNT \ + --partition $PARTITION \ + --nv-meta ml-model.dlss,dcgm_opt_out.yes \ + --job-name=run-nightly-benchmarks \ + --nodes 1 \ + --gpus-per-node $GPUS_PER_NODE \ + --time=4:00:00 \ + --export=ALL \ + --exclusive -K\ + --container-mounts ${MIP_CONFIGS_PATH}:${MIP_CONFIGS_PATH},${CUOPT_SCRIPTS_DIR}:${CUOPT_SCRIPTS_DIR},${OUTPUT_DIR}:${OUTPUT_DIR} \ + --container-image=$IMAGE \ + --output=${BENCHMARK_RESULTS_DIR}/benchmark_mip_log.txt \ + bash ${CUOPT_SCRIPTS_DIR}/mip_regression_test.sh & + PID_3=$! + + wait $PID_0 $PID_1 $PID_2 $PID_3 + fi + +else # if [[ $TESTING_FAILED == 0 ]] + logger "Container testing Failed!" +fi + +: <<'END' +################################################################################ +# Send report based on contents of $RESULTS_DIR +# These steps do not require a worker node. + +# When running both testing and benchmark and if some benchmarks fail, +# the entire nightly will fail. The benchmark logs reported on Slack +# contains information about the failures. +logger "Generating report" + +if [ -f $METADATA_FILE ]; then + source $METADATA_FILE +fi + +activateCondaEnv + +if [[ $BUILD_FAILED == 0 ]]; then + if [[ $RUN_BENCHMARKS == 1 ]]; then + # Push regression tests to repo + cd ${WORKSPACE}/${RESULT_DIR_NAME}; git add data/*; git commit -m "Update for commit : ${PROJECT_VERSION}"; git push; cd - + # bash ${CUOPT_SCRIPTS_DIR}/save_benchmarks.sh $PROJECT_VERSION + fi +fi + +# Copy all config files to one folder +cp $ROUTING_CONFIGS_PATH/*config.json $LP_CONFIGS_PATH/*config.json $MIP_CONFIGS_PATH/*config.json $ALL_CONFIGS_PATH/ + +RUN_ASV_OPTION="" +if hasArg --skip-asv; then + logger "Skipping running ASV" +else + # Only create/update the asv database if there is both a commit Hash and a branch otherwise + # asv will return an error. If there is $PROJECT_BUILD, that implies there is Neither the + # git commit hash nor the branch which are required to create/update the asv db + if [[ "$PROJECT_BUILD" == "" ]]; then + # Update/create the ASV database + logger "Updating ASV database" + python $PROJECT_DIR/update_asv_database.py --commitHash=$PROJECT_VERSION --repo-url=$PROJECT_REPO_URL --branch=$PROJECT_REPO_BRANCH --commitTime=$PROJECT_REPO_TIME --results-dir=$RESULTS_DIR --machine-name> + RUN_ASV_OPTION=--run-asv + logger "Updated ASV database" + else + logger "Detected a conda install, cannot run ASV since a commit hash/time is needed." + fi +fi + +if hasArg --spreadsheet; then + logger "Generating spreadsheet" + export SPREADSHEET_URL=$(python $PROJECT_DIR/gsheet-report.py --results-dir=$RESULTS_DIR |grep "spreadsheet url is"|cut -d ' ' -f4) + #python $PROJECT_DIR/gsheet-report.py --results-dir=$RESULTS_DIR +fi + +# The cuopt pull has missing .git folder which causes subsequent runs, lets delete and pull it fresh everytime. +rm -rf $RESULTS_DIR/benchmarks/results/asv/cuopt/ +rm -rf $RESULTS_DIR/tests + +${SCRIPTS_DIR}/create-html-reports.sh $RUN_ASV_OPTION + +if hasArg --skip-sending-report; then + logger "Skipping sending report." +else + logger "Uploading to S3, posting to Slack" + ${PROJECT_DIR}/send-slack-report.sh +fi +END +logger "cronjob.sh done." + diff --git a/regression/functions.sh b/regression/functions.sh new file mode 100644 index 000000000..8f4421d14 --- /dev/null +++ b/regression/functions.sh @@ -0,0 +1,144 @@ +# Copyright (c) 2021, NVIDIA CORPORATION. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is source'd from script-env.sh to add functions to the +# calling environment, hence no #!/bin/bash as the first line. This +# also assumes the variables used in this file have been defined +# elsewhere. + +NUMARGS=$# +ARGS=$* +function hasArg { + (( ${NUMARGS} != 0 )) && (echo " ${ARGS} " | grep -q " $1 ") +} + +function logger { + echo -e ">>>> $@" +} + +# Calling "setTee outfile" will cause all stdout and stderr of the +# current script to be output to "tee", which outputs to stdout and +# "outfile" simultaneously. This is useful by allowing a script to +# "tee" itself at any point without being called with tee. +_origFileDescriptorsSaved=0 +function setTee { + if [[ $_origFileDescriptorsSaved == 0 ]]; then + # Save off the original file descr 1 and 2 as 3 and 4 + exec 3>&1 4>&2 + _origFileDescriptorsSaved=1 + fi + teeFile=$1 + # Create a named pipe. + pipeName=$(mktemp -u) + mkfifo $pipeName + # Close the currnet 1 and 2 and restore to original (3, 4) in the + # event this function is called repeatedly. + exec 1>&- 2>&- + exec 1>&3 2>&4 + # Start a tee process reading from the named pipe. Redirect stdout + # and stderr to the named pipe which goes to the tee process. The + # named pipe "file" can be removed and the tee process stays alive + # until the fd is closed. + tee -a < $pipeName $teeFile & + exec > $pipeName 2>&1 + rm $pipeName +} + +# Call this to stop script output from going to "tee" after a prior +# call to setTee. +function unsetTee { + if [[ $_origFileDescriptorsSaved == 1 ]]; then + # Close the current fd 1 and 2 which should stop the tee + # process, then restore 1 and 2 to original (saved as 3, 4). + exec 1>&- 2>&- + exec 1>&3 2>&4 + fi +} + +# Creates a unique results dir based on date, then links the common +# results dir name to it. +function setupResultsDir { + mkdir -p ${RESULTS_ARCHIVE_DIR}/${DATE} + # Store the target of $RESULTS_DIR before $RESULTS_DIR get linked to + # a different dir + previous_results=$(readlink -f $RESULTS_DIR) + + rm -rf $RESULTS_DIR + ln -s ${RESULTS_ARCHIVE_DIR}/${DATE} $RESULTS_DIR + mkdir -p $TESTING_RESULTS_DIR + mkdir -p $BENCHMARK_RESULTS_DIR/results/ + + old_asv_dir=$previous_results/benchmarks/results/asv + if [ -d $old_asv_dir ]; then + cp -r $old_asv_dir $BENCHMARK_RESULTS_DIR/results + fi +} + + +# echos the name of the directory that $1 is linked to. Useful for +# getting the actual path of the results dir since that is often +# sym-linked to a unique (based on timestamp) results dir name. +function getNonLinkedFileName { + linkname=$1 + targetname=$(readlink -f $linkname) + if [[ "$targetname" != "" ]]; then + echo $targetname + else + echo $linkname + fi +} + +function waitForSlurmJobsToComplete { + ids=$* + jobs=$(python -c "print(\",\".join(\"$ids\".split()))") # make a comma-separated list + jobsInQueue=$(squeue --noheader --jobs=$jobs) + while [[ $jobsInQueue != "" ]]; do + sleep 2 + jobsInQueue=$(squeue --noheader --jobs=$jobs) + done +} + +# Clones repo from URL specified by $1 as name $2 in to directory +# $3. For example: +# "cloneRepo https://github.com/rapidsai/cugraph.git /my/repos cg" +# results in cugraph being cloned to /my/repos/cg. +# NOTE: This removes any existing cloned repos that match the +# destination. +function cloneRepo { + repo_url=$1 + repo_name=$2 + dest_dir=$3 + mkdir -p $dest_dir + pushd $dest_dir > /dev/null + logger "Clone $repo_url in $dest_dir..." + if [ -d $repo_name ]; then + rm -rf $repo_name + if [ -d $repo_name ]; then + echo "ERROR: ${dest_dir}/$repo_name was not completely removed." + error 1 + fi + fi + git clone $repo_url + popd > /dev/null +} + +# Only define this function if it has not already been defined in the +# current environment, which allows the project to override it from +# its functions.sh file that was previously source'd. +if [[ $(type -t activateCondaEnv) == "" ]]; then + function activateCondaEnv { + logger "Activating conda env ${CONDA_ENV}..." + eval "$(conda shell.bash hook)" + conda activate $CONDA_ENV + } +fi diff --git a/regression/gsheet-report.py b/regression/gsheet-report.py new file mode 100755 index 000000000..63077f650 --- /dev/null +++ b/regression/gsheet-report.py @@ -0,0 +1,178 @@ +# +# Copyright (c) 2021, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pathlib import Path +import json +import gspread +from oauth2client.service_account import ServiceAccountCredentials +import time +import os +from datetime import datetime + +class Gsheet_Report: + def __init__(self, results_dir): + self.benchmark_dir_path = Path(results_dir)/"benchmarks" + self.benchmark_result_list = list(self.benchmark_dir_path.glob("benchmark_result*")) + # FIXME: This is a default list of the current MNMG algos benchmarkee, this is subject to change + self.map_algo_sheet = {'bfs':"BFS", "sssp":"SSSP", "louvain":"Louvain", "pagerank":"Pagerank", "wcc":"WCC", "katz":"Katz"} + self.algos = None + self.sheet_names = None + self.spreadsheet = None + self.gc = None + + def _setup_authentication(self): + # Setup authentication and open the spreasheet + # Before running cronjob, run a script setting the credential path + if os.environ.get("GOOGLE_SHEETS_CREDENTIALS_PATH", None): + credentials_path = os.environ["GOOGLE_SHEETS_CREDENTIALS_PATH"] + self.gc = gspread.service_account(filename=credentials_path) + else: + raise Exception("Invalid credentials path") + + def _import_sample_worksheet(self): + # import a sample benchmark result table and copy it to the new benchmark result spreadsheet + date_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S') + spreadsheet_name = f"MNMG-benchmark-results {date_time}" + self.spreadsheet = self.gc.create(spreadsheet_name) + + sample_spreadsheet_name = "sample" + + def from_sample_spreadsheet(gc, sample_spreadsheet_name, spreadsheet): + sample_spreadsheet = self.gc.open(sample_spreadsheet_name) + sample_worksheet = sample_spreadsheet.worksheet('sample') + sample_worksheet.copy_to(spreadsheet.id) + + from_sample_spreadsheet(self.gc, sample_spreadsheet_name, self.spreadsheet) + + # The new create spreadsheet has a default worksheet + # delete that worksheet + self.spreadsheet.del_worksheet(self.spreadsheet.get_worksheet(0)) + # Rename the only worksheet to sheet to sample + # This worksheet containing an empty result table will be copied to as + # many sheet as there algos in the benchmark result dir + self.spreadsheet.get_worksheet(0).update_title('sample') + + # Send the new create spreadsheet to my google drive + self.spreadsheet.share('jnke2016@gmail.com', perm_type='user', role='writer') + + def _extract_sheet_names(self): + # From the benchmark result dir, get the list of algo's name that were run + # Those will be used to as worksheet's name + + # if the benchmark result list is empty, no benchmark were run + if len(self.benchmark_result_list) == 0: + return + + algos = map(lambda x: str(x).split('.')[1].split('.')[0], list(self.benchmark_result_list)) + # remove duplicates + self.algos = list(set(algos)) + self.sheet_names = map(lambda x:self.map_algo_sheet[x], self.algos) + return True + + + def _create_worksheets(self, sheet_names=None): + # Create as many worksheet as there are algos in the benchmark results dir + if not isinstance(sheet_names, list) and sheet_names is not None: + sheet_names = [sheet_names] + + if sheet_names is not None: + valid_algos_benchmarked = set(self.algos) & set(sheet_names) + # Do not create the spreadsheet of an algo which wasn't benchmarked + if len(valid_algos_benchmarked) < len(sheet_names): + raise Exception(f"Invalid algo(s) specified: \n" + "The list of algos benchmarked are "f"{self.algos}") + + worksheet = self.spreadsheet.worksheet('sample') + # If no sheet names provided, create worksheets for all MNMG algos in + if sheet_names is None: + sheet_names = self.sheet_names + + for sheet_name in sheet_names: + if sheet_name in self.map_algo_sheet.keys(): + self.spreadsheet.duplicate_sheet(source_sheet_id=worksheet.id, new_sheet_name=self.map_algo_sheet[sheet_name]) + else: + self.spreadsheet.duplicate_sheet(source_sheet_id=worksheet.id, new_sheet_name=sheet_name) + + def _write_gsheet(self, algos=None): + # Write the results from the json to the corresponding cell in the worksheet + def extract_cell(spreadsheet, sheet_name, scale, ngpus): + worksheet = spreadsheet.worksheet(sheet_name) + # The row containing the number of GPUs is 'ngpus_row'+1 + ngpus_row = worksheet.find("Number of GPUs").row + # Find the number of GPUs cell in that row + ngpus_algo_col = worksheet.find(str(ngpus), in_row=ngpus_row+1).col + # Get the column containing the scale + scale_col = worksheet.find("Scale").col + # Find the scale's row within 'scale_col' + scale_algo_row = worksheet.find(str(scale), in_column=scale_col).row + return worksheet, scale_algo_row , ngpus_algo_col + + if algos is not None: + if not isinstance(algos, list): + algos = [algos] + # ensure the algos specified were benchmarked + valid_algos_benchmarked = set(self.algos) & set(algos) + if len(valid_algos_benchmarked) == 0: + raise Exception("Invalid algo(s) specified:\n" + f"{algos}" " not a subset of " f"{self.algos}") + benchmark_result_list=[] + # Get a list of the json files that will be scan to update the spreadsheet + for file_name in self.benchmark_result_list: + algo_file = str(file_name).split('.')[1].split('.')[0] + # Only create/update spreadsheet of the algos specified + if algo_file in algos: + benchmark_result_list.append(file_name) + self.benchmark_result_list = benchmark_result_list + for file_name in self.benchmark_result_list: + time.sleep(5) + with open(file_name, 'r') as openfile: + bResult_dic = json.load(openfile) + sheet_name = bResult_dic["funcName"].split('.')[1] + scale = bResult_dic["argNameValuePairs"][0][1] + ngpus = bResult_dic["argNameValuePairs"][1][1] + result = bResult_dic["result"] + worksheet, row, col = extract_cell(self.spreadsheet, self.map_algo_sheet[sheet_name], scale, ngpus) + worksheet.update_cell(row, col, result) + + # delete sample worksheet + self.spreadsheet.del_worksheet(self.spreadsheet.worksheet("sample")) + + def _get_spreadsheet_url(self): + url_prefix = "https://docs.google.com/spreadsheets/d/" + spreadsheet_url = f"{url_prefix}{self.spreadsheet.id}" + print("spreadsheet url is", spreadsheet_url) + + + def update_spreadsheet(self, algos=None): + self._setup_authentication() + self._import_sample_worksheet() + self._get_spreadsheet_url() + benchmark_json = self._extract_sheet_names() + # Only proceed if there are benchmark results + if benchmark_json : + self._create_worksheets(algos) + self._write_gsheet(algos) + + +if __name__ == "__main__": + import argparse + ap = argparse.ArgumentParser() + ap.add_argument("--results-dir", type=str, required=True, + help="directory to store the results in json files") + args = ap.parse_args() + + gsheet_report = Gsheet_Report(results_dir=args.results_dir) + gsheet_report.update_spreadsheet() + diff --git a/regression/lp_regression_test.sh b/regression/lp_regression_test.sh new file mode 100644 index 000000000..58c413c6e --- /dev/null +++ b/regression/lp_regression_test.sh @@ -0,0 +1,42 @@ +#!/bin/bash +# SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +# Abort script on first error +set -e + +DELAY=30 + +# Must ensure PROJECT_DIR is exported first then load rapids-mg-tools env +export PROJECT_DIR=${PROJECT_DIR:-$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)} +source ${PROJECT_DIR}/config.sh +source ${PROJECT_DIR}/functions.sh + +################################################################################ + +# Extract the build meta-data from either the conda environment or the +# cugraph source dir and write out a file which can be read by other +# scripts. If the cugraph conda packages are present, those take +# precedence, otherwise meta-data will be extracted from the sources. + +GIT_COMMIT="abc" #$(cd ${WORKSPACE}/${REPO_DIR_NAME}; git rev-parse HEAD) +LOG_PATH=${RESULTS_DIR}/benchmarks/ + +nvidia-smi + +mkdir -p ${RESULTS_DIR}/benchmarks/results/csvs/ +#rm -rf ${WORKSPACE}/${RESULT_DIR_NAME}/data/regressions.csv + +logger "Running lp tests ........" +python ${CUOPT_SCRIPTS_DIR}/benchmark_scripts/benchmark.py -c ${LP_CONFIGS_PATH} -r ${RESULTS_DIR}/benchmarks/results/csvs/ -g ${GIT_COMMIT} -l ${LOG_PATH} -s ${RESULTS_DIR}/benchmarks/results/lp_tests_status.txt +logger "Completed lp tests ........" + + +#cp ${WORKSPACE}/${RESULT_DIR_NAME}/data/* ${RESULTS_DIR}/benchmarks/results/csvs/ diff --git a/regression/mip_regression_test.sh b/regression/mip_regression_test.sh new file mode 100644 index 000000000..09a14cd27 --- /dev/null +++ b/regression/mip_regression_test.sh @@ -0,0 +1,42 @@ +#!/bin/bash +# SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +# Abort script on first error +set -e + +DELAY=30 + +# Must ensure PROJECT_DIR is exported first then load rapids-mg-tools env +export PROJECT_DIR=${PROJECT_DIR:-$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)} +source ${PROJECT_DIR}/config.sh +source ${PROJECT_DIR}/functions.sh + +################################################################################ + +# Extract the build meta-data from either the conda environment or the +# cugraph source dir and write out a file which can be read by other +# scripts. If the cugraph conda packages are present, those take +# precedence, otherwise meta-data will be extracted from the sources. + +GIT_COMMIT="abc" #$(cd ${WORKSPACE}/${REPO_DIR_NAME}; git rev-parse HEAD) +LOG_PATH=${RESULTS_DIR}/benchmarks/ + +nvidia-smi + +mkdir -p ${RESULTS_DIR}/benchmarks/results/csvs/ +#rm -rf ${WORKSPACE}/${RESULT_DIR_NAME}/data/regressions.csv + +logger "Running mip tests ........" +python ${CUOPT_SCRIPTS_DIR}/benchmark_scripts/benchmark.py -c ${MIP_CONFIGS_PATH} -r ${RESULTS_DIR}/benchmarks/results/csvs/ -g ${GIT_COMMIT} -l ${LOG_PATH} -s ${RESULTS_DIR}/benchmarks/results/mip_tests_status.txt +logger "Completed mip tests ........" + + +#cp ${WORKSPACE}/${RESULT_DIR_NAME}/data/* ${RESULTS_DIR}/benchmarks/results/csvs/ diff --git a/regression/report.sh b/regression/report.sh new file mode 100755 index 000000000..288a88c6c --- /dev/null +++ b/regression/report.sh @@ -0,0 +1,67 @@ +#!/bin/bash +# Copyright (c) 2021, NVIDIA CORPORATION. + +# Creates a conda environment to be used for cuopt benchmarking. + +# Abort script on first error +set -e +# Must ensure PROJECT_DIR is exported first then load rapids-mg-tools env +export PROJECT_DIR=${PROJECT_DIR:-$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)} +if [ -n "$RAPIDS_MG_TOOLS_DIR" ]; then + source ${RAPIDS_MG_TOOLS_DIR}/script-env.sh +elif [ -n "$(which script-env.sh)" ]; then + source $(which script-env.sh) +else + echo "Error: \$RAPIDS_MG_TOOLS_DIR/script-env.sh could not be read nor was script-env.sh in PATH." + exit 1 +fi + +activateCondaEnv + +################################################################################ +# Send report based on contents of $RESULTS_DIR +# These steps do not require a worker node. + +# When running both testing and benchmark and if some benchmarks fail, +# the entire nightly will fail. The benchmark logs reported on Slack +# contains information about the failures. +logger "Generating report" + +if [ -f $METADATA_FILE ]; then + source $METADATA_FILE +fi + +RUN_ASV_OPTION="" +if hasArg --skip-asv; then + logger "Skipping running ASV" +else + # Only create/update the asv database if there is both a commit Hash and a branch otherwise + # asv will return an error. If there is $PROJECT_BUILD, that implies there is Neither the + # git commit hash nor the branch which are required to create/update the asv db + if [[ "$PROJECT_BUILD" == "" ]]; then + # Update/create the ASV database + logger "Updating ASV database" + python $PROJECT_DIR/update_asv_database.py --commitHash=$PROJECT_VERSION --repo-url=$PROJECT_REPO_URL --branch=$PROJECT_REPO_BRANCH --commitTime=$PROJECT_REPO_TIME --results-dir=$RESULTS_DIR --machine-name=$MACHINE --gpu-type=$GPU_TYPE + RUN_ASV_OPTION=--run-asv + else + logger "Detected a conda install, cannot run ASV since a commit hash/time is needed." + fi +fi + +if hasArg --spreadsheet; then + logger "Generating spreadsheet" + export SPREADSHEET_URL=$(python $PROJECT_DIR/gsheet-report.py --results-dir=$RESULTS_DIR |grep "spreadsheet url is"|cut -d ' ' -f4) + #python $PROJECT_DIR/gsheet-report.py --results-dir=$RESULTS_DIR + +fi + +${SCRIPTS_DIR}/create-html-reports.sh $RUN_ASV_OPTION + +if hasArg --skip-sending-report; then + logger "Skipping sending report." +else + logger "Uploading to S3, posting to Slack" + ${PROJECT_DIR}/send-slack-report.sh +fi + +logger "cronjob.sh done." diff --git a/regression/routing_regression_test.sh b/regression/routing_regression_test.sh new file mode 100644 index 000000000..8bc335625 --- /dev/null +++ b/regression/routing_regression_test.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +# Abort script on first error +set -e + +DELAY=30 + +# Must ensure PROJECT_DIR is exported first then load rapids-mg-tools env +export PROJECT_DIR=${PROJECT_DIR:-$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)} +source ${PROJECT_DIR}/config.sh +source ${PROJECT_DIR}/functions.sh + +################################################################################ + +# Extract the build meta-data from either the conda environment or the +# cugraph source dir and write out a file which can be read by other +# scripts. If the cugraph conda packages are present, those take +# precedence, otherwise meta-data will be extracted from the sources. + +GIT_COMMIT="abc" #$(cd ${WORKSPACE}/${REPO_DIR_NAME}; git rev-parse HEAD) +LOG_PATH=${RESULTS_DIR}/benchmarks/ + +nvidia-smi + +mkdir -p ${RESULTS_DIR}/benchmarks/results/csvs/ +#rm -rf ${WORKSPACE}/${RESULT_DIR_NAME}/data/regressions.csv + + +logger "Running routing tests ........" +python ${CUOPT_SCRIPTS_DIR}/benchmark_scripts/benchmark.py -c ${ROUTING_CONFIGS_PATH} -r ${RESULTS_DIR}/benchmarks/results/csvs/ -g ${GIT_COMMIT} -l ${LOG_PATH} -s ${RESULTS_DIR}/benchmarks/results/routing_tests_status.txt +logger "Completed routing tests ........" + + +#cp ${WORKSPACE}/${RESULT_DIR_NAME}/data/* ${RESULTS_DIR}/benchmarks/results/csvs/ diff --git a/regression/run_regression.sh b/regression/run_regression.sh new file mode 100644 index 000000000..05f8917b8 --- /dev/null +++ b/regression/run_regression.sh @@ -0,0 +1,27 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +# Get latest set of datasets +#rm -rf $SCRATCH_DIR/routing_configs/* +#rm -rf $SCRATCH_DIR/lp_configs/* +#rm -rf $SCRATCH_DIR/mip_configs/* + +#aws s3 cp s3://cuopt-datasets/regression_datasets/ $SCRATCH_DIR/routing_configs/ --recursive +#aws s3 cp s3://cuopt-datasets/lp_datasets/ $SCRATCH_DIR/lp_configs/ --recursive +#aws s3 cp s3://cuopt-datasets/mip_datasets/ $SCRATCH_DIR/mip_configs/ --recursive + +# Git clone multi gpu tools + +##rm -rf $SCRATCH_DIR/multi-gpu-tools + +##git clone ssh://git@gitlab-master.nvidia.com:12051/ramakrishnap/multi-gpu-tools.git $SCRATCH_DIR/multi-gpu-tools + +# Run build and test +bash $SCRATCH_DIR/cuopt/regression/cronjob.sh --build-cuopt-env --benchmark --skip-spreadsheet diff --git a/regression/save_benchmark_results.py b/regression/save_benchmark_results.py new file mode 100644 index 000000000..595469efb --- /dev/null +++ b/regression/save_benchmark_results.py @@ -0,0 +1,41 @@ +import pandas as pd +from pathlib import Path +import os +import argparse + +def create_update_benchamrk_db(benchmark_path, output_path, commit_hash): + bench_path = Path(benchmark_path)/"benchmarks" + out_path = output_path + "/benchmarks/" + + # List all benchmark_result files + benchmark_result_list = bench_path.glob("results*.csv") + + for file in benchmark_result_list: + with open(file, 'r') as openfile: + data = pd.read_csv(openfile, index_col="test") + data["commit_hash"] = commit_hash + for index, rows in data.iterrows(): + out_file = index.split(".")[0] + ".csv" + out_file_path = out_path + "/" + out_file + + if os.path.exists(out_file_path): + data = pd.read_csv(out_file_path) + data = pd.concat([data, rows.to_frame().T], ignore_index=True) + data.to_csv(out_file_path, index=False) + else: + rows.to_frame().T.to_csv(out_file_path, index=False) + +parser = argparse.ArgumentParser() +parser.add_argument("-b", "--benchmark_path", help = "Path to new sets of results") +parser.add_argument("-o", "--output_path", help = "Path to save results") +parser.add_argument("-c", "--commit_hash", help = "Git commit hash for the run") + +# Read arguments from command line +args = parser.parse_args() + +if args.benchmark_path and args.output_path and args.commit_hash: + create_update_benchamrk_db(args.benchmark_path, args.output_path, args.commit_hash) +else: + raise ValueError("Missing mandatory options, please provide all the options") + + diff --git a/regression/save_benchmarks.sh b/regression/save_benchmarks.sh new file mode 100644 index 000000000..2ad3e25ba --- /dev/null +++ b/regression/save_benchmarks.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# Copyright (c) 2021, NVIDIA CORPORATION. + +# Abort script on first error +set -e + +# Must ensure PROJECT_DIR is exported first then load rapids-mg-tools env +export PROJECT_DIR=${PROJECT_DIR:-$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)} +if [ -n "$RAPIDS_MG_TOOLS_DIR" ]; then + source ${RAPIDS_MG_TOOLS_DIR}/script-env.sh +elif [ -n "$(which script-env.sh)" ]; then + source $(which script-env.sh) +else + echo "Error: \$RAPIDS_MG_TOOLS_DIR/script-env.sh could not be read nor was script-env.sh in PATH." + exit 1 +fi + +################################################################################ + +# Extract the build meta-data from either the conda environment or the +# cugraph source dir and write out a file which can be read by other +# scripts. If the cugraph conda packages are present, those take +# precedence, otherwise meta-data will be extracted from the sources. + +#module load cuda/11.0.3 +activateCondaEnv + +echo "Saving benchmarks ........" + +echo $1 +echo ${RESULTS_DIR} +echo ${PROJECT_VERSION} + +python ${CUOPT_SCRIPTS_DIR}/save_benchmark_results.py -b ${RESULTS_DIR} -o ${CUOPT_SCRIPTS_DIR} -c $1 + +cd ${CUOPT_SCRIPTS_DIR}; git add benchmarks/*; git commit -m "update benchmarks"; git push; cd - + diff --git a/regression/send-slack-report.sh b/regression/send-slack-report.sh new file mode 100755 index 000000000..206265b3f --- /dev/null +++ b/regression/send-slack-report.sh @@ -0,0 +1,116 @@ +#!/bin/bash +# Copyright (c) 2020-2021, NVIDIA CORPORATION. + +# Abort script on first error +set -e + + +# Must ensure PROJECT_DIR is exported first then load rapids-mg-tools env +export PROJECT_DIR=${PROJECT_DIR:-$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)} +if [ -n "$RAPIDS_MG_TOOLS_DIR" ]; then + source ${RAPIDS_MG_TOOLS_DIR}/script-env.sh +elif [ -n "$(which script-env.sh)" ]; then + source $(which script-env.sh) +else + echo "Error: \$RAPIDS_MG_TOOLS_DIR/script-env.sh could not be read nor was script-env.sh in PATH." + exit 1 +fi + +################################################################################ +# Need to activate a specific conda env to use AWS CLI tools. +# NOTE: the AWS CLI tools are also available to install directly: +# https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-linux.html + +activateCondaEnv + +# FIXME : These env variables should already be exported like RESULTS_DIR but +# verify that before removing them +TESTING_RESULTS_DIR=${RESULTS_DIR}/tests +BENCHMARK_RESULTS_DIR=${RESULTS_DIR}/benchmarks + +# Get the overall status. +ALL_REPORTS=$(find -L $RESULTS_DIR -maxdepth 2 -name "*-results-*.txt") +BENCHMARK_REPORT=$(find -L $BENCHMARK_RESULTS_DIR -maxdepth 1 -name "*-results-*.txt") +TEST_REPORT=$(find -L $TESTING_RESULTS_DIR -maxdepth 1 -name "*-results-*.txt") + +STATUS='FAILED' +STATUS_IMG='https://img.icons8.com/cotton/80/000000/cancel--v1.png' +if [ "$ALL_REPORTS" != "" ]; then + if ! (grep -w FAILED $ALL_REPORTS > /dev/null); then + STATUS='PASSED' + STATUS_IMG='https://img.icons8.com/bubbles/100/000000/approval.png' + fi + +fi + +# Generate a one-line summary based on existance of certain reports, etc. +if [[ "$ALL_REPORTS" == "" ]]; then + ONE_LINE_SUMMARY="*Build failed*" +elif [[ "$STATUS" == "FAILED" ]]; then + if (grep -w FAILED $BENCHMARK_REPORT > /dev/null); then + ONE_LINE_SUMMARY="*One or more benchmarks failed*" + fi + if (grep -w FAILED $TEST_REPORT > /dev/null); then + ONE_LINE_SUMMARY="*One or more tests failed*" + fi + if (grep -w FAILED $TEST_REPORT > /dev/null) && (grep -w FAILED $BENCHMARK_REPORT > /dev/null); then + ONE_LINE_SUMMARY="*One or more tests and benchmarks failed*" + fi +else + ONE_LINE_SUMMARY="Build succeeded, all tests and benchmarks passed" +fi + +RESULTS_DIR_NAME=$(basename $(getNonLinkedFileName $RESULTS_DIR)) + +# Upload everything +logger "Uploading all files in $RESULTS_DIR ..." +logger "Uploading all files in $RESULTS_DIR_NAME ..." +aws s3 cp --follow-symlinks --acl public-read --recursive ${RESULTS_DIR} ${S3_FILE_PREFIX}/${RESULTS_DIR_NAME} +logger "done uploading all files in $RESULTS_DIR" + +# Set vars used in the report +PROJECT_VERSION_STRING="" +PROJECT_VERSION="" +PROJECT_BUILD="" +PROJECT_CHANNEL="" +PROJECT_REPO_URL="" +PROJECT_REPO_BRANCH="" +if [ -f $METADATA_FILE ]; then + source $METADATA_FILE +fi +# Assume if PROJECT_BUILD is set then a conda version string should be +# created, else a git version string. +if [[ "$PROJECT_BUILD" != "" ]]; then + PROJECT_VERSION_STRING=" cuOpt ver.: $PROJECT_VERSION + build: $PROJECT_BUILD + channel: $PROJECT_CHANNEL" +else + PROJECT_VERSION_STRING=" cuOpt ver.: $PROJECT_VERSION + repo: $PROJECT_REPO_URL + branch: $PROJECT_REPO_BRANCH" +fi + +export STATUS +export STATUS_IMG +export PROJECT_VERSION_STRING +export HUMAN_READABLE_DATE="$(date '+`%D`, `%H:%M` (PT)')" +# These files should be created by create-html-reports.sh +export REPORT_URL="${S3_URL_PREFIX}/${RESULTS_DIR_NAME}/report.html" +export ASV_URL="${S3_URL_PREFIX}/${RESULTS_DIR_NAME}/benchmarks/asv/html/index.html" +export LOGS_URL="${S3_URL_PREFIX}/${RESULTS_DIR_NAME}/index.html" +# export SPREADSHEET_URL=$SPREADSHEET_URL +export ONE_LINE_SUMMARY + +echo +echo "REPORT_URL: ${REPORT_URL}" +# echo "SPREADSHEET_URL: ${SPREADSHEET_URL}" + +if hasArg --skip-sending-report; then + logger "Skipping sending Slack report." +else + echo "$(envsubst < ${PROJECT_DIR}/slack_msg.json)" + curl -X POST \ + -H 'Content-type: application/json' \ + --data "$(envsubst < ${PROJECT_DIR}/slack_msg.json)" \ + ${WEBHOOK_URL} +fi diff --git a/regression/setup-benchmark-dir.sh b/regression/setup-benchmark-dir.sh new file mode 100755 index 000000000..c51e46211 --- /dev/null +++ b/regression/setup-benchmark-dir.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# Copyright (c) 2021, NVIDIA CORPORATION. + +# Abort script on first error +set -e + +# Must ensure PROJECT_DIR is exported first then load rapids-mg-tools env +export PROJECT_DIR=${PROJECT_DIR:-$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)} +if [ -n "$RAPIDS_MG_TOOLS_DIR" ]; then + source ${RAPIDS_MG_TOOLS_DIR}/script-env.sh +elif [ -n "$(which script-env.sh)" ]; then + source $(which script-env.sh) +else + echo "Error: \$RAPIDS_MG_TOOLS_DIR/script-env.sh could not be read nor was script-env.sh in PATH." + exit 1 +fi + +################################################################################ + + + +if [ ! -d ${WORKSPACE}/${REPO_DIR_NAME} ]; then + cloneRepo "$CUGRAPH_REPO_URL" $REPO_DIR_NAME $WORKSPACE +fi + + +rm -rf ${BENCHMARK_DIR} +mkdir -p ${BENCHMARK_DIR} +cp -r ${WORKSPACE}/${REPO_DIR_NAME}/benchmarks/python_e2e ${BENCHMARK_DIR} \ No newline at end of file diff --git a/regression/slack_msg.json b/regression/slack_msg.json new file mode 100644 index 000000000..a73e659b2 --- /dev/null +++ b/regression/slack_msg.json @@ -0,0 +1,68 @@ +{ + "channel": "cuopt-regression-testing", + "username": "cuOpt Messaging", + "icon_emoji": ":robot_face:", + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "${ONE_LINE_SUMMARY}" + } + }, + { + "type": "divider" + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "<${REPORT_URL}|*Results Report*>\nBuild status and test results." + }, + "accessory": { + "type": "button", + "url": "${REPORT_URL}", + "text": { + "type": "plain_text", + "emoji": true, + "text": "View" + }, + "value": "click_me_123" + } + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "<${ASV_URL}|*ASV Dashboard*>\nBenchmark results." + }, + "accessory": { + "type": "button", + "url": "${ASV_URL}", + "text": { + "type": "plain_text", + "emoji": true, + "text": "View" + }, + "value": "click_me_123" + } + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "<${LOGS_URL}|*Logs*>\nAll available logs." + }, + "accessory": { + "type": "button", + "url": "${LOGS_URL}", + "text": { + "type": "plain_text", + "emoji": true, + "text": "View" + }, + "value": "click_me_123" + } + } + ] +} diff --git a/regression/test-container.sh b/regression/test-container.sh new file mode 100644 index 000000000..85732ec2e --- /dev/null +++ b/regression/test-container.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +# Creates a conda environment to be used for cuopt benchmarking. + +# Abort script on first error +set -e + +# Must ensure PROJECT_DIR is exported first then load rapids-mg-tools env +export PROJECT_DIR=${PROJECT_DIR:-$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)} + +################################################################################ + +# Test +logger "Testing container image $IMAGE" +python -c "import cuopt; print(cuopt)" + +# Other scripts look for this to be the last line to determine if this +# script completed successfully. This is only possible because of the +# "set -e" above. +echo "done." +logger "done." diff --git a/regression/update_asv_database.py b/regression/update_asv_database.py new file mode 100644 index 000000000..f505ed9ee --- /dev/null +++ b/regression/update_asv_database.py @@ -0,0 +1,132 @@ +# Copyright (c) 2021, NVIDIA CORPORATION. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pathlib import Path +import platform +import psutil +from asvdb import utils, BenchmarkInfo, BenchmarkResult, ASVDb +import json +import pandas as pd +import time + + +def update_asv_db(commitHash=None, + commitTime=None, + branch=None, + repo_url=None, + results_dir=None, + machine_name=None, + gpu_type=None): + """ + Read the benchmark_result* files in results_dir/benchmarks and + update an existing asv benchmark database or create one if one + does not exist in results_dir/benchmarks/asv. If no + benchmark_result* files are present, return without updating or + creating. + """ + + # commitHash = commitHash + str(int(time.time())) + benchmark_dir_path = Path(results_dir)/"benchmarks" + asv_dir_path = benchmark_dir_path/"asv" + + # List all benchmark_result files + benchmark_result_list = benchmark_dir_path.glob("results*.csv") + + bResultList = [] + # Create result objects for each benchmark result and store it in a list + for file_name in benchmark_result_list: + with open(file_name, 'r') as openfile: + data = pd.read_csv(openfile, index_col="test") + if "service_endpoint" in str(file_name) or "service_method" in str(file_name): + name = "Service_Endpoint" if "service_endpoint" in str(file_name) else "Service_Method" + for index, rows in data.iterrows(): + bResult = BenchmarkResult(funcName=name+"."+index+"_runtime", result=rows["run_time"], unit="Seconds") + bResultList.append(bResult) + else: + for index, rows in data.iterrows(): + bResult = BenchmarkResult(funcName=index+"_solver_runtime", result=rows["solver_run_time"], unit="Seconds") + bResultList.append(bResult) + bResult = BenchmarkResult(funcName=index+"_etl_runtime", result=rows["etl_time"], unit="Seconds") + bResultList.append(bResult) + bResult = BenchmarkResult(funcName=index+"_memory", result=rows["memory"], unit="MB") + bResultList.append(bResult) + bResult = BenchmarkResult(funcName=index+"_travel_cost", result=rows["travel_cost"], unit="Distance") + bResultList.append(bResult) + + if len(bResultList) == 0: + print("Could not find files matching 'benchmark_result*' in " + f"{benchmark_dir_path}, not creating/updating ASV database " + f"in {asv_dir_path}.") + return + + uname = platform.uname() + # Maybe also write those metadata to metadata.sh ? + osType = "%s %s" % (uname.system, uname.release) + # Remove unnecessary osType detail + osType = ".".join(osType.split("-")[0].split(".", 2)[:2]) + pythonVer = platform.python_version() + # Remove unnecessary python version detail + pythonVer = ".".join(pythonVer.split(".", 2)[:2]) + bInfo_dict = { + 'machineName' : machine_name, + #cudaVer : "10.0", + 'osType' : osType, + 'pythonVer' : pythonVer, + 'commitHash' : commitHash, + 'branch' : branch, + #commit time needs to be in milliseconds + 'commitTime' : commitTime*1000, + 'gpuType' : gpu_type, + 'cpuType' : uname.processor, + 'arch' : uname.machine, + 'ram' : "%d" % psutil.virtual_memory().total + } + bInfo = BenchmarkInfo(**bInfo_dict) + + # extract the branch name + branch = bInfo_dict['branch'] + + db = ASVDb(dbDir=str(asv_dir_path), + repo=repo_url, + branches=[branch]) + + for res in bResultList: + db.addResult(bInfo, res) + + +if __name__ == "__main__": + import argparse + ap = argparse.ArgumentParser() + ap.add_argument("--commitHash", type=str, required=True, + help="project version") + ap.add_argument("--commitTime", type=str, required=True, + help="project version date") + ap.add_argument("--repo-url", type=str, required=True, + help="project repo url") + ap.add_argument("--branch", type=str, required=True, + help="project branch") + ap.add_argument("--results-dir", type=str, required=True, + help="directory to store the results in json files") + ap.add_argument("--machine-name", type=str, required=True, + help="Slurm cluster name") + ap.add_argument("--gpu-type", type=str, required=True, + help="the official product name of the GPU") + args = ap.parse_args() + + update_asv_db(commitHash=args.commitHash, + commitTime=int(args.commitTime), + branch=args.branch, + repo_url=args.repo_url, + results_dir=args.results_dir, + machine_name=args.machine_name, + gpu_type=args.gpu_type) diff --git a/regression/write-cuopt-meta-data.sh b/regression/write-cuopt-meta-data.sh new file mode 100755 index 000000000..2fcaf6c3f --- /dev/null +++ b/regression/write-cuopt-meta-data.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# Copyright (c) 2021, NVIDIA CORPORATION. + +# Abort script on first error +set -e + +# Must ensure PROJECT_DIR is exported first then load rapids-mg-tools env +export PROJECT_DIR=${PROJECT_DIR:-$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)} +if [ -n "$RAPIDS_MG_TOOLS_DIR" ]; then + source ${RAPIDS_MG_TOOLS_DIR}/script-env.sh +elif [ -n "$(which script-env.sh)" ]; then + source $(which script-env.sh) +else + echo "Error: \$RAPIDS_MG_TOOLS_DIR/script-env.sh could not be read nor was script-env.sh in PATH." + exit 1 +fi + +################################################################################ + +# Extract the build meta-data from either the conda environment or the +# cugraph source dir and write out a file which can be read by other +# scripts. If the cugraph conda packages are present, those take +# precedence, otherwise meta-data will be extracted from the sources. + +#module load cuda/11.0.3 +activateCondaEnv + +nvidia-smi + + +# auto-detect based on if the libcugraph conda pacakge is installed +# (a from-source build does not have a libcugraph package registered +# in the conda env since it is installed directly via the build). +if (conda list | grep -q libcuopt); then + ${SCRIPTS_DIR}/write-meta-data.sh --from-conda +else + ${SCRIPTS_DIR}/write-meta-data.sh --from-source +fi From b25ab2c33279fe58a667b5d9d25fe619017e3b78 Mon Sep 17 00:00:00 2001 From: Ishika Roy Date: Wed, 10 Sep 2025 15:02:12 -0700 Subject: [PATCH 2/6] add regression changes --- .../benchmarks/clear_optimization_data.csv | 6 ----- .../full_solve_matrix_data_off_10.csv | 6 ----- .../full_solve_matrix_data_off_1000.csv | 6 ----- .../full_solve_matrix_data_on_10.csv | 6 ----- .../full_solve_matrix_data_on_1000.csv | 6 ----- .../full_solve_sync_matrix_data_off_10.csv | 6 ----- .../full_solve_sync_matrix_data_off_1000.csv | 6 ----- .../full_solve_sync_wpg_data_off_10.csv | 6 ----- .../full_solve_sync_wpg_data_off_1000.csv | 6 ----- .../benchmarks/full_solve_wpg_data_off_10.csv | 6 ----- .../full_solve_wpg_data_off_1000.csv | 6 ----- .../get_optimization_data_state_empty.csv | 6 ----- regression/benchmarks/health.csv | 6 ----- regression/benchmarks/homberger_C1_10_1.csv | 22 ------------------- regression/benchmarks/homberger_C1_10_4.csv | 22 ------------------- regression/benchmarks/homberger_C1_10_9.csv | 22 ------------------- regression/benchmarks/linlim_LC1_10_1.csv | 21 ------------------ regression/benchmarks/linlim_LC1_10_4.csv | 21 ------------------ regression/benchmarks/linlim_LC1_10_9.csv | 21 ------------------ regression/benchmarks/set_config.csv | 6 ----- regression/benchmarks/set_fleet_2.csv | 6 ----- regression/benchmarks/set_fleet_800.csv | 6 ----- regression/benchmarks/set_matrix_10.csv | 6 ----- regression/benchmarks/set_matrix_1000.csv | 6 ----- regression/benchmarks/set_tasks_10.csv | 6 ----- regression/benchmarks/set_tasks_1000.csv | 6 ----- .../benchmarks/set_waypoint_graph_10.csv | 6 ----- .../benchmarks/set_waypoint_graph_1000.csv | 6 ----- .../benchmarks/solve_delta_matrix_10.csv | 6 ----- .../benchmarks/solve_delta_matrix_1000.csv | 6 ----- regression/benchmarks/solve_delta_wpg_10.csv | 6 ----- .../benchmarks/solve_delta_wpg_1000.csv | 6 ----- regression/benchmarks/update_config.csv | 6 ----- regression/benchmarks/update_fleet_2.csv | 6 ----- regression/benchmarks/update_fleet_800.csv | 6 ----- regression/benchmarks/update_matrix_10.csv | 6 ----- regression/benchmarks/update_matrix_1000.csv | 6 ----- regression/benchmarks/update_tasks_10.csv | 6 ----- regression/benchmarks/update_tasks_1000.csv | 6 ----- .../benchmarks/update_waypoint_graph_10.csv | 6 ----- .../benchmarks/update_waypoint_graph_1000.csv | 6 ----- 41 files changed, 339 deletions(-) delete mode 100644 regression/benchmarks/clear_optimization_data.csv delete mode 100644 regression/benchmarks/full_solve_matrix_data_off_10.csv delete mode 100644 regression/benchmarks/full_solve_matrix_data_off_1000.csv delete mode 100644 regression/benchmarks/full_solve_matrix_data_on_10.csv delete mode 100644 regression/benchmarks/full_solve_matrix_data_on_1000.csv delete mode 100644 regression/benchmarks/full_solve_sync_matrix_data_off_10.csv delete mode 100644 regression/benchmarks/full_solve_sync_matrix_data_off_1000.csv delete mode 100644 regression/benchmarks/full_solve_sync_wpg_data_off_10.csv delete mode 100644 regression/benchmarks/full_solve_sync_wpg_data_off_1000.csv delete mode 100644 regression/benchmarks/full_solve_wpg_data_off_10.csv delete mode 100644 regression/benchmarks/full_solve_wpg_data_off_1000.csv delete mode 100644 regression/benchmarks/get_optimization_data_state_empty.csv delete mode 100644 regression/benchmarks/health.csv delete mode 100644 regression/benchmarks/homberger_C1_10_1.csv delete mode 100644 regression/benchmarks/homberger_C1_10_4.csv delete mode 100644 regression/benchmarks/homberger_C1_10_9.csv delete mode 100644 regression/benchmarks/linlim_LC1_10_1.csv delete mode 100644 regression/benchmarks/linlim_LC1_10_4.csv delete mode 100644 regression/benchmarks/linlim_LC1_10_9.csv delete mode 100644 regression/benchmarks/set_config.csv delete mode 100644 regression/benchmarks/set_fleet_2.csv delete mode 100644 regression/benchmarks/set_fleet_800.csv delete mode 100644 regression/benchmarks/set_matrix_10.csv delete mode 100644 regression/benchmarks/set_matrix_1000.csv delete mode 100644 regression/benchmarks/set_tasks_10.csv delete mode 100644 regression/benchmarks/set_tasks_1000.csv delete mode 100644 regression/benchmarks/set_waypoint_graph_10.csv delete mode 100644 regression/benchmarks/set_waypoint_graph_1000.csv delete mode 100644 regression/benchmarks/solve_delta_matrix_10.csv delete mode 100644 regression/benchmarks/solve_delta_matrix_1000.csv delete mode 100644 regression/benchmarks/solve_delta_wpg_10.csv delete mode 100644 regression/benchmarks/solve_delta_wpg_1000.csv delete mode 100644 regression/benchmarks/update_config.csv delete mode 100644 regression/benchmarks/update_fleet_2.csv delete mode 100644 regression/benchmarks/update_fleet_800.csv delete mode 100644 regression/benchmarks/update_matrix_10.csv delete mode 100644 regression/benchmarks/update_matrix_1000.csv delete mode 100644 regression/benchmarks/update_tasks_10.csv delete mode 100644 regression/benchmarks/update_tasks_1000.csv delete mode 100644 regression/benchmarks/update_waypoint_graph_10.csv delete mode 100644 regression/benchmarks/update_waypoint_graph_1000.csv diff --git a/regression/benchmarks/clear_optimization_data.csv b/regression/benchmarks/clear_optimization_data.csv deleted file mode 100644 index edcc31a38..000000000 --- a/regression/benchmarks/clear_optimization_data.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -0.0041052934131585,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0026461367844603,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0031441673054359,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0032693036133423,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -0.0109413468002458,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/full_solve_matrix_data_off_10.csv b/regression/benchmarks/full_solve_matrix_data_off_10.csv deleted file mode 100644 index 4b3e08e17..000000000 --- a/regression/benchmarks/full_solve_matrix_data_off_10.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -0.092595966591034,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0909385792911052,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0861730184755288,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.084613940725103,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -0.1967066440993221,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/full_solve_matrix_data_off_1000.csv b/regression/benchmarks/full_solve_matrix_data_off_1000.csv deleted file mode 100644 index 5bd7062b6..000000000 --- a/regression/benchmarks/full_solve_matrix_data_off_1000.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -2.898428950691596,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -2.7740834328811617,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -2.7887534837936983,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -2.732585116138216,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -8.018868986098823,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/full_solve_matrix_data_on_10.csv b/regression/benchmarks/full_solve_matrix_data_on_10.csv deleted file mode 100644 index d10214bf0..000000000 --- a/regression/benchmarks/full_solve_matrix_data_on_10.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -0.0967919212067499,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0893040685099549,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0923860481823794,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0971072109881788,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -0.2034532838006271,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/full_solve_matrix_data_on_1000.csv b/regression/benchmarks/full_solve_matrix_data_on_1000.csv deleted file mode 100644 index fe6a4623b..000000000 --- a/regression/benchmarks/full_solve_matrix_data_on_1000.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -11.469153839105276,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -11.13433374390006,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -11.183070917613804,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -11.18312375949463,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -33.7928514968,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/full_solve_sync_matrix_data_off_10.csv b/regression/benchmarks/full_solve_sync_matrix_data_off_10.csv deleted file mode 100644 index 2ac3c367e..000000000 --- a/regression/benchmarks/full_solve_sync_matrix_data_off_10.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -0.0691923394217155,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0651698967907577,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0671704228967428,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0672970376908779,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -0.1821914255015144,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/full_solve_sync_matrix_data_off_1000.csv b/regression/benchmarks/full_solve_sync_matrix_data_off_1000.csv deleted file mode 100644 index 32fbe04e0..000000000 --- a/regression/benchmarks/full_solve_sync_matrix_data_off_1000.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -3.606089364201762,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -3.4653110398212448,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -3.4959583321004173,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -3.5713294571847656,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -10.516578904900234,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/full_solve_sync_wpg_data_off_10.csv b/regression/benchmarks/full_solve_sync_wpg_data_off_10.csv deleted file mode 100644 index d58e1150a..000000000 --- a/regression/benchmarks/full_solve_sync_wpg_data_off_10.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -0.0859221719903871,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0878685727715492,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0826912648160941,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0830997216049581,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -0.2035729169008845,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/full_solve_sync_wpg_data_off_1000.csv b/regression/benchmarks/full_solve_sync_wpg_data_off_1000.csv deleted file mode 100644 index d211a427e..000000000 --- a/regression/benchmarks/full_solve_sync_wpg_data_off_1000.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -4.490188621985726,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -4.402784487907775,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -4.422826303972397,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -4.39203272620216,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -13.783291200599342,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/full_solve_wpg_data_off_10.csv b/regression/benchmarks/full_solve_wpg_data_off_10.csv deleted file mode 100644 index 975fce6f7..000000000 --- a/regression/benchmarks/full_solve_wpg_data_off_10.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -0.102928383112885,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.1016542179044336,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0938111932831816,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.1761252559022978,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -0.2027525610988959,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/full_solve_wpg_data_off_1000.csv b/regression/benchmarks/full_solve_wpg_data_off_1000.csv deleted file mode 100644 index 47b32ea06..000000000 --- a/regression/benchmarks/full_solve_wpg_data_off_1000.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -4.142862814525143,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -3.972703066887334,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -3.9499765183078126,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -3.935864896082785,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -11.48112585589988,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/get_optimization_data_state_empty.csv b/regression/benchmarks/get_optimization_data_state_empty.csv deleted file mode 100644 index bead231d4..000000000 --- a/regression/benchmarks/get_optimization_data_state_empty.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -0.0043799643055535,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0028748559881933,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0033562235999852,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0031923993839882,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -0.0073803732979285,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/health.csv b/regression/benchmarks/health.csv deleted file mode 100644 index 8d26fdb5e..000000000 --- a/regression/benchmarks/health.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -0.0044362302869558,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0026844964013434,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0031451011775061,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0029689648887142,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -0.0067414406003081,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/homberger_C1_10_1.csv b/regression/benchmarks/homberger_C1_10_1.csv deleted file mode 100644 index 08e2ebe14..000000000 --- a/regression/benchmarks/homberger_C1_10_1.csv +++ /dev/null @@ -1,22 +0,0 @@ -status,solver_run_time,etl_time,memory,err_msg,time_stamp,travel_cost,commit_hash -SUCCESS,202.94488906860352,6.14421534538269,3852,Try relaxing Time Window constraints,01_18_2023_13_24_39,42479.20703125,4a550c4d8a18beddaef2884e3321b54046d3ca5a -SUCCESS,200.23936939239505,6.696606159210205,3852,Try relaxing Time Window constraints,01_18_2023_14_32_48,42479.20703125,4a550c4d8a18beddaef2884e3321b54046d3ca5a -SUCCESS,202.26948261260983,5.718092441558838,3852,Try relaxing Time Window constraints,01_18_2023_16_24_33,42479.20703125,4a550c4d8a18beddaef2884e3321b54046d3ca5a -SUCCESS,202.1016023159027,6.568071603775024,3852,Try relaxing Time Window constraints,01_19_2023_00_36_42,42479.20703125,4a550c4d8a18beddaef2884e3321b54046d3ca5a -SUCCESS,200.4102230072021,4.566569805145264,3852,Try relaxing Time Window constraints,01_20_2023_10_12_50,42479.20703125,d8b5ac3dee312da96440a4879c7f258d955cae53 -SUCCESS,202.70853686332703,6.867353677749634,3852,Try relaxing Time Window constraints,01_21_2023_00_53_04,42479.20703125,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 -SUCCESS,200.31551885604856,5.373513460159302,3852,Try relaxing Time Window constraints,01_22_2023_00_14_13,42479.20703125,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 -SUCCESS,202.78480195999143,4.6431286334991455,3852,Try relaxing Time Window constraints,01_23_2023_00_16_16,42479.20703125,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 -SUCCESS,202.963894367218,5.030569076538086,3852,Try relaxing Time Window constraints,01_24_2023_00_23_59,42479.20703125,1e0f2faebb52900fd0a678d5e0a2b1b93112820d -SUCCESS,200.27961683273315,4.810116767883301,3852,Try relaxing Time Window constraints,01_24_2023_08_00_08,42479.20703125,1e0f2faebb52900fd0a678d5e0a2b1b93112820d -SUCCESS,202.98268461227417,5.974777460098267,3852,Try relaxing Time Window constraints,01_24_2023_08_50_41,42479.20703125,1e0f2faebb52900fd0a678d5e0a2b1b93112820d -SUCCESS,202.81959056854248,5.531239986419678,3852,Try relaxing Time Window constraints,01_25_2023_00_39_29,42479.20703125,1e0f2faebb52900fd0a678d5e0a2b1b93112820d -SUCCESS,200.29493069648743,4.704516410827637,3852,Try relaxing Time Window constraints,01_26_2023_00_16_03,42479.20703125,cec6da5b7083e002441900cfca186292f481936f -SUCCESS,202.062420129776,4.859360218048096,3852,Try relaxing Time Window constraints,01_27_2023_00_23_38,42479.20703125,a51fe6c34510cd20b93ec6cfc15b8509d0babfca -SUCCESS,202.6819722652436,7.775798320770264,3852,Try relaxing Time Window constraints,01_28_2023_00_46_05,42479.20703125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,202.8960475921631,7.011343240737915,3852,Try relaxing Time Window constraints,01_29_2023_00_58_14,42479.20703125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,203.09712195396423,5.212660074234009,3852,Try relaxing Time Window constraints,01_30_2023_00_47_19,42479.20703125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,202.71639442443848,6.375207424163818,3852,Try relaxing Time Window constraints,01_30_2023_12_41_09,42479.20703125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,202.4589099884033,5.115618705749512,3852,Try relaxing Time Window constraints,01_30_2023_15_50_22,42479.20703125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,202.56271409988403,5.815870761871338,3852,Try relaxing Time Window constraints,01_31_2023_01_10_25,42479.20703125,3354f792963b368a476c07ef11e1fcd66ca5ea5f -SUCCESS,203.2009189128876,12.784011125564575,3852,Try relaxing Time Window constraints,02_01_2023_00_28_37,42480.3984375,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/homberger_C1_10_4.csv b/regression/benchmarks/homberger_C1_10_4.csv deleted file mode 100644 index d01fd6f6a..000000000 --- a/regression/benchmarks/homberger_C1_10_4.csv +++ /dev/null @@ -1,22 +0,0 @@ -status,solver_run_time,etl_time,memory,err_msg,time_stamp,travel_cost,commit_hash -SUCCESS,202.4027383327484,4.14943790435791,3824,Try relaxing Time Window constraints,01_18_2023_13_24_39,41629.4140625,4a550c4d8a18beddaef2884e3321b54046d3ca5a -SUCCESS,202.7299354076385,3.9376816749572754,3824,Try relaxing Time Window constraints,01_18_2023_14_32_48,41629.4140625,4a550c4d8a18beddaef2884e3321b54046d3ca5a -SUCCESS,202.4123239517212,4.010818243026733,3824,Try relaxing Time Window constraints,01_18_2023_16_24_33,41629.4140625,4a550c4d8a18beddaef2884e3321b54046d3ca5a -SUCCESS,202.4237217903137,3.919779777526856,3824,Try relaxing Time Window constraints,01_19_2023_00_36_42,41629.4140625,4a550c4d8a18beddaef2884e3321b54046d3ca5a -SUCCESS,202.888254404068,3.9284555912017822,3824,Try relaxing Time Window constraints,01_20_2023_10_12_50,41629.4140625,d8b5ac3dee312da96440a4879c7f258d955cae53 -SUCCESS,202.5099935531616,3.868138551712036,3824,Try relaxing Time Window constraints,01_21_2023_00_53_04,41629.4140625,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 -SUCCESS,202.51063680648804,4.160922050476074,3824,Try relaxing Time Window constraints,01_22_2023_00_14_13,41629.4140625,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 -SUCCESS,202.65994668006897,3.8936829566955566,3824,Try relaxing Time Window constraints,01_23_2023_00_16_16,41629.4140625,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 -SUCCESS,202.6500322818756,4.055036783218384,3824,Try relaxing Time Window constraints,01_24_2023_00_23_59,41629.4140625,1e0f2faebb52900fd0a678d5e0a2b1b93112820d -SUCCESS,202.8837172985077,3.979902029037476,3824,Try relaxing Time Window constraints,01_24_2023_08_00_08,41629.4140625,1e0f2faebb52900fd0a678d5e0a2b1b93112820d -SUCCESS,202.54010653495789,4.112768888473511,3824,Try relaxing Time Window constraints,01_24_2023_08_50_41,41629.4140625,1e0f2faebb52900fd0a678d5e0a2b1b93112820d -SUCCESS,202.3439211845398,4.057533264160156,3824,Try relaxing Time Window constraints,01_25_2023_00_39_29,41629.4140625,1e0f2faebb52900fd0a678d5e0a2b1b93112820d -SUCCESS,202.5833644866944,4.075305938720703,3824,Try relaxing Time Window constraints,01_26_2023_00_16_03,41629.4140625,cec6da5b7083e002441900cfca186292f481936f -SUCCESS,202.6500098705292,4.032953500747681,3824,Try relaxing Time Window constraints,01_27_2023_00_23_38,41629.4140625,a51fe6c34510cd20b93ec6cfc15b8509d0babfca -SUCCESS,202.9884734153748,5.518364906311035,3824,Try relaxing Time Window constraints,01_28_2023_00_46_05,41629.4140625,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,202.73613834381104,4.132292747497559,3824,Try relaxing Time Window constraints,01_29_2023_00_58_14,41629.4140625,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,202.3717851638794,3.8987321853637695,3824,Try relaxing Time Window constraints,01_30_2023_00_47_19,41629.4140625,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,202.979766368866,5.730100393295288,3824,Try relaxing Time Window constraints,01_30_2023_12_41_09,41629.4140625,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,202.46156549453733,4.168130397796631,3824,Try relaxing Time Window constraints,01_30_2023_15_50_22,41629.4140625,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,202.65178418159485,4.034475564956665,3824,Try relaxing Time Window constraints,01_31_2023_01_10_25,41629.4140625,3354f792963b368a476c07ef11e1fcd66ca5ea5f -SUCCESS,206.1987981796265,10.41266632080078,3824,Try relaxing Time Window constraints,02_01_2023_00_28_37,41802.203125,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/homberger_C1_10_9.csv b/regression/benchmarks/homberger_C1_10_9.csv deleted file mode 100644 index 29f444004..000000000 --- a/regression/benchmarks/homberger_C1_10_9.csv +++ /dev/null @@ -1,22 +0,0 @@ -status,solver_run_time,etl_time,memory,err_msg,time_stamp,travel_cost,commit_hash -SUCCESS,202.1899116039276,4.372752666473389,3813,Try relaxing Time Window constraints,01_18_2023_13_24_39,42345.01953125,4a550c4d8a18beddaef2884e3321b54046d3ca5a -SUCCESS,201.80366277694705,4.024761915206909,3813,Try relaxing Time Window constraints,01_18_2023_14_32_48,42345.01953125,4a550c4d8a18beddaef2884e3321b54046d3ca5a -SUCCESS,202.53451490402225,4.239882469177246,3813,Try relaxing Time Window constraints,01_18_2023_16_24_33,42345.01953125,4a550c4d8a18beddaef2884e3321b54046d3ca5a -SUCCESS,202.2297282218933,4.069121360778809,3813,Try relaxing Time Window constraints,01_19_2023_00_36_42,42345.01953125,4a550c4d8a18beddaef2884e3321b54046d3ca5a -SUCCESS,202.15557503700256,3.991995334625244,3813,Try relaxing Time Window constraints,01_20_2023_10_12_50,42345.01953125,d8b5ac3dee312da96440a4879c7f258d955cae53 -SUCCESS,202.0832393169403,4.107094764709473,3813,Try relaxing Time Window constraints,01_21_2023_00_53_04,42345.01953125,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 -SUCCESS,202.35359001159668,4.070865869522095,3813,Try relaxing Time Window constraints,01_22_2023_00_14_13,42345.01953125,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 -SUCCESS,202.113706111908,3.918311357498169,3813,Try relaxing Time Window constraints,01_23_2023_00_16_16,42345.01953125,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 -SUCCESS,202.28220200538635,4.163920402526856,3813,Try relaxing Time Window constraints,01_24_2023_00_23_59,42345.01953125,1e0f2faebb52900fd0a678d5e0a2b1b93112820d -SUCCESS,202.4687077999115,3.939903259277344,3813,Try relaxing Time Window constraints,01_24_2023_08_00_08,42345.01953125,1e0f2faebb52900fd0a678d5e0a2b1b93112820d -SUCCESS,202.31099653244016,3.9958384037017822,3813,Try relaxing Time Window constraints,01_24_2023_08_50_41,42345.01953125,1e0f2faebb52900fd0a678d5e0a2b1b93112820d -SUCCESS,202.6687958240509,4.282079219818115,3813,Try relaxing Time Window constraints,01_25_2023_00_39_29,42345.01953125,1e0f2faebb52900fd0a678d5e0a2b1b93112820d -SUCCESS,202.5248498916626,4.124437808990479,3813,Try relaxing Time Window constraints,01_26_2023_00_16_03,42345.01953125,cec6da5b7083e002441900cfca186292f481936f -SUCCESS,202.1295850276947,3.97187089920044,3813,Try relaxing Time Window constraints,01_27_2023_00_23_38,42345.01953125,a51fe6c34510cd20b93ec6cfc15b8509d0babfca -SUCCESS,202.0128083229065,5.610944032669067,3813,Try relaxing Time Window constraints,01_28_2023_00_46_05,42345.01953125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,202.4298839569092,4.143335103988648,3813,Try relaxing Time Window constraints,01_29_2023_00_58_14,42345.01953125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,202.29705381393435,3.988018989562988,3813,Try relaxing Time Window constraints,01_30_2023_00_47_19,42345.01953125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,202.0680272579193,5.667192459106445,3813,Try relaxing Time Window constraints,01_30_2023_12_41_09,42345.01953125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,202.2976925373077,4.1305906772613525,3813,Try relaxing Time Window constraints,01_30_2023_15_50_22,42345.01953125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,202.4270453453064,4.296361923217773,3813,Try relaxing Time Window constraints,01_31_2023_01_10_25,42345.01953125,3354f792963b368a476c07ef11e1fcd66ca5ea5f -SUCCESS,207.36984372138977,11.430570125579834,3813,Try relaxing Time Window constraints,02_01_2023_00_28_37,42923.703125,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/linlim_LC1_10_1.csv b/regression/benchmarks/linlim_LC1_10_1.csv deleted file mode 100644 index 325e0dc75..000000000 --- a/regression/benchmarks/linlim_LC1_10_1.csv +++ /dev/null @@ -1,21 +0,0 @@ -status,solver_run_time,etl_time,memory,err_msg,time_stamp,travel_cost,commit_hash -SUCCESS,234.6376609802246,0.7243020534515381,1392,Try relaxing Time Window constraints,01_18_2023_13_35_08,132510.390625,4a550c4d8a18beddaef2884e3321b54046d3ca5a -SUCCESS,228.23448133468628,0.7411677837371826,1391,Try relaxing Time Window constraints,01_18_2023_14_43_14,132511.03125,4a550c4d8a18beddaef2884e3321b54046d3ca5a -SUCCESS,228.23107290267944,0.7489080429077148,1392,Try relaxing Time Window constraints,01_19_2023_00_47_10,132488.65625,4a550c4d8a18beddaef2884e3321b54046d3ca5a -SUCCESS,220.52945804595947,0.7301907539367676,1389,Try relaxing Time Window constraints,01_20_2023_10_23_12,132684.640625,d8b5ac3dee312da96440a4879c7f258d955cae53 -SUCCESS,233.1353704929352,0.7111175060272217,1392,Try relaxing Time Window constraints,01_21_2023_01_03_36,132511.03125,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 -SUCCESS,224.9602243900299,0.6813702583312988,1391,Try relaxing Time Window constraints,01_22_2023_00_24_36,132632.1875,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 -SUCCESS,229.9897944927216,0.6986827850341797,1392,Try relaxing Time Window constraints,01_23_2023_00_26_41,132555.84375,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 -SUCCESS,228.47381496429443,0.7829642295837402,1392,Try relaxing Time Window constraints,01_24_2023_00_34_26,132488.65625,1e0f2faebb52900fd0a678d5e0a2b1b93112820d -SUCCESS,237.6755757331848,0.7272443771362305,1392,Try relaxing Time Window constraints,01_24_2023_08_10_31,132555.84375,1e0f2faebb52900fd0a678d5e0a2b1b93112820d -SUCCESS,228.05768513679504,0.7051000595092773,1391,Try relaxing Time Window constraints,01_24_2023_09_01_07,132489.3125,1e0f2faebb52900fd0a678d5e0a2b1b93112820d -SUCCESS,226.24537992477417,0.737703800201416,1391,Try relaxing Time Window constraints,01_25_2023_00_50_09,132577.5625,1e0f2faebb52900fd0a678d5e0a2b1b93112820d -SUCCESS,210.6106126308441,0.7423005104064941,1389,Try relaxing Time Window constraints,01_26_2023_00_26_27,132488.65625,cec6da5b7083e002441900cfca186292f481936f -SUCCESS,215.53656339645383,0.7288351058959961,1391,Try relaxing Time Window constraints,01_27_2023_00_34_04,132556.5,a51fe6c34510cd20b93ec6cfc15b8509d0babfca -SUCCESS,212.8446328639984,1.0236175060272217,1389,Try relaxing Time Window constraints,01_28_2023_00_56_38,42806.44921875,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,212.08036494255063,0.7389419078826904,1388,Try relaxing Time Window constraints,01_29_2023_01_08_43,42489.30859375,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,215.55140352249143,0.7778847217559814,1388,Try relaxing Time Window constraints,01_30_2023_00_57_46,42488.65625,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,213.6677827835083,1.0266873836517334,1388,Try relaxing Time Window constraints,01_30_2023_12_51_42,42610.46875,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,214.6161751747132,0.7036209106445312,1388,Try relaxing Time Window constraints,01_30_2023_16_00_48,42510.3828125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,206.1508820056916,0.712501049041748,1389,Try relaxing Time Window constraints,01_31_2023_01_20_53,42510.3828125,3354f792963b368a476c07ef11e1fcd66ca5ea5f -SUCCESS,260.46342182159424,2.3488495349884038,1388,Try relaxing Time Window constraints,02_01_2023_00_39_43,43458.0546875,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/linlim_LC1_10_4.csv b/regression/benchmarks/linlim_LC1_10_4.csv deleted file mode 100644 index b3a181f1f..000000000 --- a/regression/benchmarks/linlim_LC1_10_4.csv +++ /dev/null @@ -1,21 +0,0 @@ -status,solver_run_time,etl_time,memory,err_msg,time_stamp,travel_cost,commit_hash -SUCCESS,228.4590950012207,0.4174575805664062,1366,Try relaxing Time Window constraints,01_18_2023_13_35_08,134437.984375,4a550c4d8a18beddaef2884e3321b54046d3ca5a -SUCCESS,230.9396343231201,0.4012937545776367,1363,Try relaxing Time Window constraints,01_18_2023_14_43_14,136835.859375,4a550c4d8a18beddaef2884e3321b54046d3ca5a -SUCCESS,225.7199718952179,0.413813829421997,1365,Try relaxing Time Window constraints,01_19_2023_00_47_10,135084.53125,4a550c4d8a18beddaef2884e3321b54046d3ca5a -SUCCESS,217.0161476135254,0.3954634666442871,1362,Try relaxing Time Window constraints,01_20_2023_10_23_12,133937.671875,d8b5ac3dee312da96440a4879c7f258d955cae53 -SUCCESS,220.2502839565277,0.4050805568695068,1359,Try relaxing Time Window constraints,01_21_2023_01_03_36,136410.484375,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 -SUCCESS,227.21949887275696,0.4711213111877441,1359,Try relaxing Time Window constraints,01_22_2023_00_24_36,135826.3125,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 -SUCCESS,227.33157467842105,0.3872089385986328,1360,Try relaxing Time Window constraints,01_23_2023_00_26_41,133772.546875,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 -SUCCESS,223.0627908706665,0.3764033317565918,1367,Try relaxing Time Window constraints,01_24_2023_00_34_26,134500.0625,1e0f2faebb52900fd0a678d5e0a2b1b93112820d -SUCCESS,229.52511739730835,0.4286582469940185,1362,Try relaxing Time Window constraints,01_24_2023_08_10_31,134388.734375,1e0f2faebb52900fd0a678d5e0a2b1b93112820d -SUCCESS,222.2460687160492,0.4110927581787109,1362,Try relaxing Time Window constraints,01_24_2023_09_01_07,136785.8125,1e0f2faebb52900fd0a678d5e0a2b1b93112820d -SUCCESS,225.35983300209045,0.4234979152679443,1361,Try relaxing Time Window constraints,01_25_2023_00_50_09,133834.765625,1e0f2faebb52900fd0a678d5e0a2b1b93112820d -SUCCESS,211.9453625679016,0.3975887298583984,1358,Try relaxing Time Window constraints,01_26_2023_00_26_27,135007.578125,cec6da5b7083e002441900cfca186292f481936f -SUCCESS,211.1832706928253,0.4141361713409424,1363,Try relaxing Time Window constraints,01_27_2023_00_34_04,135862.28125,a51fe6c34510cd20b93ec6cfc15b8509d0babfca -SUCCESS,212.13958954811096,0.5645616054534912,1364,Try relaxing Time Window constraints,01_28_2023_00_56_38,41154.609375,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,212.52610898017883,0.406682014465332,1360,Try relaxing Time Window constraints,01_29_2023_01_08_43,41272.10546875,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,212.9528636932373,0.4297752380371094,1356,Try relaxing Time Window constraints,01_30_2023_00_57_46,42294.234375,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,210.4439640045166,0.5646810531616211,1356,Try relaxing Time Window constraints,01_30_2023_12_51_42,42386.4609375,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,212.9664409160614,0.4054524898529053,1367,Try relaxing Time Window constraints,01_30_2023_16_00_48,42615.96484375,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,206.41025924682617,0.4145197868347168,1359,Try relaxing Time Window constraints,01_31_2023_01_20_53,41637.00390625,3354f792963b368a476c07ef11e1fcd66ca5ea5f -SUCCESS,270.8348495960236,1.1712639331817627,1357,Try relaxing Time Window constraints,02_01_2023_00_39_43,44848.16796875,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/linlim_LC1_10_9.csv b/regression/benchmarks/linlim_LC1_10_9.csv deleted file mode 100644 index 6d07bb554..000000000 --- a/regression/benchmarks/linlim_LC1_10_9.csv +++ /dev/null @@ -1,21 +0,0 @@ -status,solver_run_time,etl_time,memory,err_msg,time_stamp,travel_cost,commit_hash -SUCCESS,238.67641973495483,0.4070606231689453,1381,Try relaxing Time Window constraints,01_18_2023_13_35_08,139254.9375,4a550c4d8a18beddaef2884e3321b54046d3ca5a -SUCCESS,232.9940016269684,0.4224081039428711,1380,Try relaxing Time Window constraints,01_18_2023_14_43_14,140103.0,4a550c4d8a18beddaef2884e3321b54046d3ca5a -SUCCESS,234.68699979782104,0.4444656372070312,1381,Try relaxing Time Window constraints,01_19_2023_00_47_10,141163.640625,4a550c4d8a18beddaef2884e3321b54046d3ca5a -SUCCESS,218.58095908164967,0.4063160419464111,1380,Try relaxing Time Window constraints,01_20_2023_10_23_12,139411.203125,d8b5ac3dee312da96440a4879c7f258d955cae53 -SUCCESS,230.3954677581787,0.4042942523956299,1378,Try relaxing Time Window constraints,01_21_2023_01_03_36,139395.890625,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 -SUCCESS,232.57781553268435,0.4077394008636474,1378,Try relaxing Time Window constraints,01_22_2023_00_24_36,140732.28125,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 -SUCCESS,231.7747838497162,0.401658296585083,1379,Try relaxing Time Window constraints,01_23_2023_00_26_41,140143.421875,5a03ebecec6c3f3d83ecd6a5daa17f38f8b55376 -SUCCESS,232.3545906543732,0.4278278350830078,1378,Try relaxing Time Window constraints,01_24_2023_00_34_26,139762.8125,1e0f2faebb52900fd0a678d5e0a2b1b93112820d -SUCCESS,234.09125399589536,0.411196231842041,1378,Try relaxing Time Window constraints,01_24_2023_08_10_31,140568.75,1e0f2faebb52900fd0a678d5e0a2b1b93112820d -SUCCESS,226.61660289764404,0.4029746055603027,1379,Try relaxing Time Window constraints,01_24_2023_09_01_07,140093.953125,1e0f2faebb52900fd0a678d5e0a2b1b93112820d -SUCCESS,231.881952047348,0.4578347206115722,1378,Try relaxing Time Window constraints,01_25_2023_00_50_09,139465.359375,1e0f2faebb52900fd0a678d5e0a2b1b93112820d -SUCCESS,208.54942059516907,0.3924274444580078,1380,Try relaxing Time Window constraints,01_26_2023_00_26_27,141658.125,cec6da5b7083e002441900cfca186292f481936f -SUCCESS,218.20822954177856,0.3863475322723388,1381,Try relaxing Time Window constraints,01_27_2023_00_34_04,140597.359375,a51fe6c34510cd20b93ec6cfc15b8509d0babfca -SUCCESS,211.22141408920288,0.5680606365203857,1377,Try relaxing Time Window constraints,01_28_2023_00_56_38,49620.765625,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,214.280752658844,0.4076018333435058,1378,Try relaxing Time Window constraints,01_29_2023_01_08_43,46729.375,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,214.09263396263125,0.395796537399292,1376,Try relaxing Time Window constraints,01_30_2023_00_57_46,48296.01953125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,213.01783680915835,0.5552072525024414,1377,Try relaxing Time Window constraints,01_30_2023_12_51_42,49357.98828125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,218.2983613014221,0.3832075595855713,1375,Try relaxing Time Window constraints,01_30_2023_16_00_48,46253.51953125,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -SUCCESS,211.4424307346344,0.3975503444671631,1375,Try relaxing Time Window constraints,01_31_2023_01_20_53,48054.55859375,3354f792963b368a476c07ef11e1fcd66ca5ea5f -SUCCESS,268.6615102291107,1.1218979358673096,1377,Try relaxing Time Window constraints,02_01_2023_00_39_43,48655.6015625,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/set_config.csv b/regression/benchmarks/set_config.csv deleted file mode 100644 index c525c8585..000000000 --- a/regression/benchmarks/set_config.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -0.004645535396412,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0046047047129832,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0044730253051966,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.004629728605505,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -0.0077153033998911,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/set_fleet_2.csv b/regression/benchmarks/set_fleet_2.csv deleted file mode 100644 index 3dd3c225f..000000000 --- a/regression/benchmarks/set_fleet_2.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -0.0095658392179757,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.009450409177225,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0100178633118048,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0093642852152697,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -0.0184322164990589,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/set_fleet_800.csv b/regression/benchmarks/set_fleet_800.csv deleted file mode 100644 index fa582bf6a..000000000 --- a/regression/benchmarks/set_fleet_800.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -0.1390531376935541,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.1451090188929811,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.1381841997965239,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.1395055412198416,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -0.3440185967003344,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/set_matrix_10.csv b/regression/benchmarks/set_matrix_10.csv deleted file mode 100644 index f97ca7f13..000000000 --- a/regression/benchmarks/set_matrix_10.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -0.0318860427825711,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0271919017890468,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0215748190879821,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0207240985939279,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -0.0808484257991949,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/set_matrix_1000.csv b/regression/benchmarks/set_matrix_1000.csv deleted file mode 100644 index 3d6a7d99b..000000000 --- a/regression/benchmarks/set_matrix_1000.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -2.447241939394735,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -2.3234502929844894,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -2.338384240376763,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -2.24809804528486,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -6.911417219002033,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/set_tasks_10.csv b/regression/benchmarks/set_tasks_10.csv deleted file mode 100644 index 51c1f5ead..000000000 --- a/regression/benchmarks/set_tasks_10.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -0.0109893269021995,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0129346061963588,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0134648023871704,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0118026977172121,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -0.0226951214972359,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/set_tasks_1000.csv b/regression/benchmarks/set_tasks_1000.csv deleted file mode 100644 index 29772bb96..000000000 --- a/regression/benchmarks/set_tasks_1000.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -0.1538944556959904,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.1491747667896561,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.1730471987975761,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.1544712165719829,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -0.4287849125015782,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/set_waypoint_graph_10.csv b/regression/benchmarks/set_waypoint_graph_10.csv deleted file mode 100644 index ed4883933..000000000 --- a/regression/benchmarks/set_waypoint_graph_10.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -0.0056097029941156,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0032982378965243,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0038573885685764,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0037153065786696,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -0.0103513664995261,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/set_waypoint_graph_1000.csv b/regression/benchmarks/set_waypoint_graph_1000.csv deleted file mode 100644 index 644ae2231..000000000 --- a/regression/benchmarks/set_waypoint_graph_1000.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -3.4773945213062687,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -3.380432048998773,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -3.321970174077433,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -3.325099247705657,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -9.763971190099983,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/solve_delta_matrix_10.csv b/regression/benchmarks/solve_delta_matrix_10.csv deleted file mode 100644 index 2d481b3e9..000000000 --- a/regression/benchmarks/solve_delta_matrix_10.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -0.0660040138754993,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0658921508234925,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0601130882976576,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0595540319103747,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -0.1493235697002091,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/solve_delta_matrix_1000.csv b/regression/benchmarks/solve_delta_matrix_1000.csv deleted file mode 100644 index cd43dc82a..000000000 --- a/regression/benchmarks/solve_delta_matrix_1000.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -0.3474221469950862,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.3429003946832381,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.3333525741123594,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.33962144900579,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -1.0392802602997109,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/solve_delta_wpg_10.csv b/regression/benchmarks/solve_delta_wpg_10.csv deleted file mode 100644 index 301ffd427..000000000 --- a/regression/benchmarks/solve_delta_wpg_10.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -0.0767553822021,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.079037431103643,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0698185498942621,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.157676170382183,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -0.164265617098863,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/solve_delta_wpg_1000.csv b/regression/benchmarks/solve_delta_wpg_1000.csv deleted file mode 100644 index 45f4931ae..000000000 --- a/regression/benchmarks/solve_delta_wpg_1000.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -0.5354226037045009,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.5224156844080425,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.5350534689961932,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.5143750281655229,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -1.3945245039001748,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/update_config.csv b/regression/benchmarks/update_config.csv deleted file mode 100644 index ba59ede20..000000000 --- a/regression/benchmarks/update_config.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -0.0046170215005986,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0044824522337876,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0047105842968449,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0045994932879693,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -0.0075603434997901,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/update_fleet_2.csv b/regression/benchmarks/update_fleet_2.csv deleted file mode 100644 index ec6ae8ff4..000000000 --- a/regression/benchmarks/update_fleet_2.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -0.0095946694957092,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0095075284014455,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0096409979043528,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.009402271406725,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -0.0175724803993944,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/update_fleet_800.csv b/regression/benchmarks/update_fleet_800.csv deleted file mode 100644 index 3ddd46327..000000000 --- a/regression/benchmarks/update_fleet_800.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -0.1386791780241765,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.1419437378877774,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.1385688012931495,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.1430311175994575,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -0.3444252583984052,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/update_matrix_10.csv b/regression/benchmarks/update_matrix_10.csv deleted file mode 100644 index 17b382481..000000000 --- a/regression/benchmarks/update_matrix_10.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -0.0080373852746561,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0046605161041952,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0055735706002451,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0051797617808915,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -0.0133258155001385,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/update_matrix_1000.csv b/regression/benchmarks/update_matrix_1000.csv deleted file mode 100644 index 7db442f7b..000000000 --- a/regression/benchmarks/update_matrix_1000.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -2.4152652503806165,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -2.319457295385655,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -2.338757945317775,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -2.2231198537279853,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -6.86295339329954,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/update_tasks_10.csv b/regression/benchmarks/update_tasks_10.csv deleted file mode 100644 index 5adaeb373..000000000 --- a/regression/benchmarks/update_tasks_10.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -0.0095267685013823,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.011702395172324,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0130278801894746,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0122657145839184,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -0.0276424682000651,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/update_tasks_1000.csv b/regression/benchmarks/update_tasks_1000.csv deleted file mode 100644 index 832386cc1..000000000 --- a/regression/benchmarks/update_tasks_1000.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -0.1570440814946778,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.1466961717000231,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.1645322358002886,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.1534246206167154,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -0.3910466975998133,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/update_waypoint_graph_10.csv b/regression/benchmarks/update_waypoint_graph_10.csv deleted file mode 100644 index dca45caf9..000000000 --- a/regression/benchmarks/update_waypoint_graph_10.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -0.0051487428951077,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0031310768914408,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0035910105914808,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -0.0034982840297743,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -0.0092652412997267,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac diff --git a/regression/benchmarks/update_waypoint_graph_1000.csv b/regression/benchmarks/update_waypoint_graph_1000.csv deleted file mode 100644 index 8f7a39ca6..000000000 --- a/regression/benchmarks/update_waypoint_graph_1000.csv +++ /dev/null @@ -1,6 +0,0 @@ -run_time,date_time,commit_hash -2.170334866119083,01_30_2023_14_44_26,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -2.1527948976843616,01_30_2023_15_37_12,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -2.062339087412693,01_30_2023_16_19_59,a7b3b159b2f0c50c4614ebe3b21feee5985729bf -2.0358887692331336,01_31_2023_01_39_40,3354f792963b368a476c07ef11e1fcd66ca5ea5f -6.185019124099199,02_01_2023_01_13_49,9b496799f2c3d1e6f86b654523e0ff37d8693cac From 62ccccefe85000cdf1fd5830afa811a82b4eadae Mon Sep 17 00:00:00 2001 From: Ishika Roy Date: Thu, 23 Oct 2025 08:22:35 -0700 Subject: [PATCH 3/6] add regression changes --- regression/benchmark_scripts/benchmark.py | 161 +++++++--- .../benchmark_scripts/run_test_and_record.py | 167 +++++++++++ regression/benchmark_scripts/utils.py | 119 +++++--- regression/config.sh | 12 +- regression/create-html-reports.sh | 246 ++++++++++++++++ regression/cronjob.sh | 93 +++--- regression/get_datasets.sh | 275 ++++++++++++++++++ regression/lp_regression_test.sh | 2 +- regression/mip_regression_test.sh | 2 +- regression/routing_regression_test.sh | 2 +- regression/run_regression.sh | 20 +- regression/setup-benchmark-dir.sh | 29 -- regression/test-container.sh | 3 + regression/update_asv_database.py | 83 +++--- regression/write-cuopt-meta-data.sh | 38 --- regression/write-meta-data.sh | 33 +++ 16 files changed, 1031 insertions(+), 254 deletions(-) create mode 100644 regression/benchmark_scripts/run_test_and_record.py create mode 100755 regression/create-html-reports.sh create mode 100644 regression/get_datasets.sh delete mode 100755 regression/setup-benchmark-dir.sh delete mode 100755 regression/write-cuopt-meta-data.sh create mode 100755 regression/write-meta-data.sh diff --git a/regression/benchmark_scripts/benchmark.py b/regression/benchmark_scripts/benchmark.py index 56ef78fb2..411a33900 100644 --- a/regression/benchmark_scripts/benchmark.py +++ b/regression/benchmark_scripts/benchmark.py @@ -8,11 +8,12 @@ # without an express license agreement from NVIDIA CORPORATION or # its affiliates is strictly prohibited. - -from utils import get_configuration, LPMetrics, RoutingMetrics -from cuopt import linear_programming -from cuopt import routing -from cuopt import utilities +import os +from multiprocessing import Process +#from utils import get_configuration, LPMetrics, RoutingMetrics +#from cuopt import linear_programming +#from cuopt import routing +#from cuopt import utilities import rmm import time import pandas as pd @@ -22,6 +23,8 @@ import os import argparse +log.getLogger().setLevel(log.INFO) + def create_regression_markdown(data, regression_path, test_type_string): regression_md_file = regression_path + "/" + test_type_string + "_regressions.md" @@ -88,8 +91,8 @@ def get_bks_change( def record_result( test_name, metrics, required_metrics, csv_path, test_type_string ): - - file_path = csv_path + "/" + test_name + ".csv" + + file_path = csv_path + "/" + test_type_string + "_" + test_name + ".csv" bks_metrics = get_bks_change(metrics, required_metrics) # Add default metrics to data @@ -117,19 +120,25 @@ def run_benchmark( required_metrics, csv_path, git_commit, - test_status_file + test_status_file, + d_type ): + import rmm mr = rmm.mr.get_current_device_resource() + from utils import LPMetrics, RoutingMetrics + from cuopt import linear_programming + from cuopt import routing + start_time = time.time() - if test_name.startswith("LP_") or test_name.startswith("MIP_"): + if d_type=="lp" or d_type=="mip": metrics = LPMetrics()._asdict() solver_settings.set_parameter("infeasibility_detection", False) - solver_settings.set_parameter("time_limit", 180) + solver_settings.set_parameter("time_limit", 300) solution = linear_programming.Solve(data_model, solver_settings) else: metrics = RoutingMetrics()._asdict() - solution = routing.Solve(data_model, solver_settings) + solution = routing.Solve(data_model) end_time = time.time() metrics["gpu_memory_usage"] = int(mr.allocation_counts.peak_bytes/(1024*1024)) @@ -138,14 +147,14 @@ def run_benchmark( success_status = False - if test_name.startswith("LP_") or test_name.startswith("MIP_"): + if d_type=="lp" or d_type=="mip": ## Optimal solution - if solution.get_termination_reason() == 1: - test_type_string = "lp" if test_name.startswith("LP_") else "mip" + acceptable_termination = ["Optimal", "TimeLimit", "FeasibleFound"] + if solution.get_termination_reason() in acceptable_termination: success_status = True metrics["solver_time"] = solution.get_solve_time() metrics["primal_objective_value"] = solution.get_primal_objective() - if test_type_string == "lp": + if d_type == "lp": lp_stats = solution.get_lp_stats() metrics["nb_iterations"] = lp_stats["nb_iterations"] else: @@ -154,7 +163,7 @@ def run_benchmark( metrics["max_constraint_violation"] = milp_stats["max_constraint_violation"] metrics["max_int_violation"] = milp_stats["max_int_violation"] metrics["max_variable_bound_violation"] = milp_stats["max_variable_bound_violation"] - record_result(test_name, metrics, required_metrics, csv_path, test_type_string) + record_result(test_name, metrics, required_metrics, csv_path, d_type) else: if solution.get_status() == 0: success_status = True @@ -170,40 +179,49 @@ def run_benchmark( if "travel_time" in required_metrics: metrics["travel_time"] = objectives[routing.Objective.TRAVEL_TIME] - record_result(test_name, metrics, required_metrics, csv_path, "routing") + record_result(test_name, metrics, required_metrics, csv_path, d_type) return "SUCCESS" if success_status is True else "FAILED" - def reinitialize_rmm(): - + pool_size = 2**30 rmm.reinitialize(pool_allocator=True, initial_pool_size=pool_size) base_mr = rmm.mr.get_current_device_resource() stats_mr = rmm.mr.StatisticsResourceAdaptor(base_mr) - rmm.mr.set_current_device_resource(stats_mr) + rmm.mr.set_current_device_resource(stats_mr) return base_mr, stats_mr -def run(config_file_path, csv_path, git_commit, log_path, test_status_file): +def worker(gpu_id, dataset_file_path, csv_path, git_commit, log_path, test_status_file, n_gpus, d_type="routing"): + import os + os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id - config_files = glob.glob(config_file_path + "/*_config.json") - - for config in config_files: + import glob + from utils import get_configuration + data_files = [] + if d_type == "lp" or d_type == "mip": + data_files = glob.glob(dataset_file_path + "/*.mps") + else: + data_files = glob.glob(dataset_file_path + "/*_config.json") + idx = int(gpu_id) + n_files = len(data_files) + while idx < n_files: mr, stats_mr = reinitialize_rmm() - test_name = str(config) + from rmm._cuda.gpu import CUDARuntimeError, getDevice, setDevice + + data_file = data_files[idx] + test_name = str(data_file) status = "FAILED" try: - test_name, data_model, solver_settings, requested_metrics = get_configuration(config, config_file_path) - + test_name, data_model, solver_settings, requested_metrics = get_configuration(data_file, dataset_file_path, d_type) log.basicConfig(level=log.INFO, filename=log_path+"/"+test_name+"_log.txt", filemode="a+", format="%(asctime)-15s %(levelname)-8s %(message)s") - log.info(f"------------- Test Start : {test_name} -------------------") - + log.info(f"------------- Test Start : {test_name} gpu id : {gpu_id} -------------------") status = run_benchmark( test_name, data_model, @@ -211,7 +229,8 @@ def run(config_file_path, csv_path, git_commit, log_path, test_status_file): requested_metrics, csv_path, git_commit, - test_status_file + test_status_file, + d_type ) except Exception as e: @@ -225,7 +244,78 @@ def run(config_file_path, csv_path, git_commit, log_path, test_status_file): del mr del stats_mr - log.info(f"------------- Test End : {test_name} -------------------") + log.info(f"------------- Test End : {test_name} gpu id : {gpu_id} -------------------") + idx = idx + n_gpus + +def run(dataset_file_path, csv_path, git_commit, log_path, test_status_file, n_gpus, d_type): + + """def worker(gpu_id, n_gpus): + import os + #log.info(f"------------- GPU id : {gpu_id} -------------------") + os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id + import rmm + pool = rmm.mr.PoolMemoryResource( + rmm.mr.CudaMemoryResource() + ) + + rmm.mr.set_current_device_resource(pool) + idx = int(gpu_id) + n_files = len(config_files) + + def reinitialize_rmm(): + + pool = rmm.mr.PoolMemoryResource( + rmm.mr.CudaMemoryResource() + ) + + rmm.mr.set_current_device_resource(pool) + #rmm.reinitialize(pool_allocator=True, initial_pool_size=pool_size) + + #base_mr = rmm.mr.get_current_device_resource() + #stats_mr = rmm.mr.StatisticsResourceAdaptor(base_mr) + #rmm.mr.set_current_device_resource(stats_mr) + + return "", "" + + while idx < n_files: + config = config_files[idx] + + test_name = str(config) + status = "FAILED" + try: + + test_name, data_model, solver_settings, requested_metrics = get_configuration(config, config_file_path) + + log.basicConfig(level=log.INFO, filename=log_path+"/"+test_name+"_log.txt", filemode="a+", + format="%(asctime)-15s %(levelname)-8s %(message)s") + log.info(f"------------- Test Start : {test_name} -------------------") + log.info(f"------------- GPU id : {gpu_id} -------------------") + #status = run_benchmark( + # test_name, + # data_model, + # solver_settings, + # requested_metrics, + # csv_path, + # git_commit, + # test_status_file + #) + + except Exception as e: + log.error(str(e)) + + with open(test_status_file, "a") as f: + f.write("\n") + f.write(test_name +": " + status)""" + + procs = [] + for gpu_id in range(int(n_gpus)): + p = Process(target=worker, args=(str(gpu_id), dataset_file_path, csv_path, git_commit, log_path, test_status_file, int(n_gpus), d_type)) + p.start() + procs.append(p) + + for p in procs: + p.join() + print("All processes finished.") if __name__ == "__main__": @@ -246,6 +336,11 @@ def run(config_file_path, csv_path, git_commit, log_path, test_status_file): parser.add_argument( "-s", "--test-status-file", type=str, help="All test status will be stored in this file" ) - + parser.add_argument( + "-n", "--num-gpus", type=str, help="Number of GPUs available" + ) + parser.add_argument( + "-t", "--type", type=str, help="Type of benchmark" + ) args = parser.parse_args() - run(args.config_path, args.csv_path, args.git_commit, args.log_path, args.test_status_file) + run(args.config_path, args.csv_path, args.git_commit, args.log_path, args.test_status_file, args.num_gpus, args.type) diff --git a/regression/benchmark_scripts/run_test_and_record.py b/regression/benchmark_scripts/run_test_and_record.py new file mode 100644 index 000000000..67cd4306d --- /dev/null +++ b/regression/benchmark_scripts/run_test_and_record.py @@ -0,0 +1,167 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + + +from utils import get_configuration, LPMetrics, RoutingMetrics +from cuopt import linear_programming +from cuopt import routing +from cuopt import utilities +import rmm +import time +import pandas as pd +import glob +import logging as log +from datetime import datetime +import os +import argparse + +def create_regression_markdown(data, regression_path, test_type_string): + regression_md_file = regression_path + "/" + test_type_string + "_regressions.md" + + md_data = "*No regressions*" + # This to reduce size of slack message + limit_no_of_regression_list = 5 + + if len(data) > 0: + status = "*!! Regressions found !!*" + end_msg = "\n*Continues ...*" if len(data) > limit_no_of_regression_list else "" + table = data[:limit_no_of_regression_list].to_string(index=False) + md_data = status + f'\n```\n{table}\n```' + end_msg + + with open(regression_md_file, "w") as fp: + fp.write(md_data) + +def record_regressions(test_name, data, req_metrics, regression_path, test_type_string): + + regression_file = regression_path + "/" + test_type_string + "_regressions.csv" + + regression_df = pd.DataFrame({"Test Name":[], "Metric Name":[], "Value":[], "Avg Value":[], "Regression(%)":[]}) + for name in req_metrics: + if name.startswith("bks_change_"): + pchange = data[name].iloc[-1].item() + metric_name = name.replace("bks_change_", "") + limit = req_metrics[metric_name]["bks"].get("threshold", 5) + prev_val_mean = pchange + latest_val = pchange + else: + limit = req_metrics[name].get("threshold", 5) + prev_val_mean = data[name][:-1][-30:].mean().item() if len(data) > 1 else data[name].iloc[-1].item() + latest_val = data[name].iloc[-1].item() + + if prev_val_mean == 0: + pchange = latest_val + else: + pchange = ((latest_val - prev_val_mean)/prev_val_mean) * 100 + + if abs(pchange) >= limit: + regression_df.loc[len(regression_df)] = [test_name, name, latest_val, prev_val_mean, pchange] + + regression_df.to_csv(regression_file) + create_regression_markdown(regression_df, regression_path, test_type_string) + +def get_bks_change( + metrics, required_metrics +): + bks_metrics = {} + for metric, value in required_metrics.items(): + if "bks" in value.keys(): + bks = value["bks"]["value"] + if bks == None: + continue + current = metrics[metric] + if bks == 0: + bks_metrics["bks_change_" + metric] = abs(current) * 100 + elif current == 0: + bks_metrics["bks_change_" + metric] = abs(bks) * 100 + else: + bks_metrics["bks_change_" + metric] = abs(((current - bks)/bks) * 100) + + return bks_metrics + +def record_result( + test_name, metrics, required_metrics, csv_path, test_type_string +): + + file_path = csv_path + "/" + test_name + ".csv" + + bks_metrics = get_bks_change(metrics, required_metrics) + # Add default metrics to data + required_metrics.update(bks_metrics) + metrics.update(bks_metrics) + + req_metrics = list(required_metrics.keys()) + ["date_time", "git_commit"] + + current_data = pd.DataFrame({key : [metrics[key]] for key in sorted(req_metrics)}) + if os.path.isfile(file_path): + previous_data = pd.read_csv(file_path, index_col=0) + updated_data = pd.concat([previous_data, current_data], ignore_index=True) + else: + updated_data = current_data + + record_regressions(test_name, updated_data, required_metrics, csv_path, test_type_string) + + updated_data.to_csv(file_path) + + +def run_benchmark( + test_name, + data_model, + solver_settings, + required_metrics, + csv_path, + git_commit, + test_status_file +): + mr = rmm.mr.get_current_device_resource() + + start_time = time.time() + if test_name.startswith("LP_") or test_name.startswith("MIP_"): + metrics = LPMetrics()._asdict() + solver_settings.set_infeasibility_detection(False) + solver_settings.set_time_limit(600) + solution = linear_programming.Solve(data_model, solver_settings) + else: + metrics = RoutingMetrics()._asdict() + solution = routing.Solve(data_model, solver_settings) + end_time = time.time() + + metrics["gpu_memory_usage"] = int(mr.allocation_counts.peak_bytes/(1024*1024)) + metrics["date_time"] = datetime.now().strftime("%m_%d_%Y_%H_%M_%S") + metrics["git_commit"] = git_commit + + success_status = False + + if test_name.startswith("LP_") or test_name.startswith("MIP_"): + ## Optimal solution + if solution.get_termination_reason() == 1: + test_type_string = "lp" if test_name.startswith("LP_") else "mip" + success_status = True + metrics["solver_time"] = solution.get_solve_time() + metrics["primal_objective_value"] = solution.get_primal_objective() + + record_result(test_name, metrics, required_metrics, csv_path, test_type_string) + else: + if solution.get_status() == 0: + success_status = True + metrics["solver_time"] = end_time - start_time + metrics["total_objective_value"] = solution.get_total_objective() + metrics["vehicle_count"] = solution.get_vehicle_count() + + objectives = solution.get_objective_values() + if "prize" in required_metrics: + metrics["prize"] = objectives[routing.Objective.PRIZE] + if "cost" in required_metrics: + metrics["cost"] = objectives[routing.Objective.COST] + if "travel_time" in required_metrics: + metrics["travel_time"] = objectives[routing.Objective.TRAVEL_TIME] + + record_result(test_name, metrics, required_metrics, csv_path, "routing") + + return "SUCCESS" if success_status is True else "FAILED" diff --git a/regression/benchmark_scripts/utils.py b/regression/benchmark_scripts/utils.py index eeb48b990..c4160f7d0 100644 --- a/regression/benchmark_scripts/utils.py +++ b/regression/benchmark_scripts/utils.py @@ -8,9 +8,10 @@ # without an express license agreement from NVIDIA CORPORATION or # its affiliates is strictly prohibited. -#from cuopt_server.utils.utils import build_routing_datamodel_from_json, build_lp_datamodel_from_json +from cuopt_server.utils.utils import build_routing_datamodel_from_json from cuopt import routing - +from cuopt.linear_programming.solver_settings import SolverSettings +import cuopt_mps_parser import os import json from typing import NamedTuple @@ -35,12 +36,6 @@ import json import os -from cuopt_server.utils.job_queue import SolverLPJob -from cuopt_server.utils.linear_programming.data_definition import LPData -from cuopt_server.utils.linear_programming.solver import ( - create_data_model as lp_create_data_model, - create_solver as lp_create_solver, -) from cuopt_server.utils.routing.data_definition import OptimizedRoutingData from cuopt_server.utils.routing.solver import ( create_data_model as routing_create_data_model, @@ -85,41 +80,25 @@ def build_routing_datamodel_from_json(data): return data_model, solver_settings -def build_lp_datamodel_from_json(data): +def build_datamodel_from_mps(data): """ - data: A valid dictionary or a json file-path with - valid format as per open-api spec. + data: A file in mps format """ - if isinstance(data, dict): - data = LPData.parse_obj(data) - elif os.path.isfile(data): - with open(data, "r") as f: - data = json.loads(f.read()) - # Remove this once we support variable names - data.pop("variable_names") - data = LPData.parse_obj(data) + if os.path.isfile(data): + data_model = cuopt_mps_parser.ParseMps(data) else: raise ValueError( f"Invalid type : {type(data)} has been provided as input, " - "requires json input" + "requires mps input" ) - - stub_id = 9999 - stub_warnings = [] - job = SolverLPJob(stub_id, data, None, stub_warnings) - # transform data into digestible format - job._transform(job.LP_data) - data = job.get_data() - - _, data_model = lp_create_data_model(data) - _, solver_settings = lp_create_solver(data, None) + solver_settings = SolverSettings() return data_model, solver_settings class RoutingMetrics(NamedTuple): - + total_objective_value:float = -1 vehicle_count:int = -1 cost:float = -1 @@ -139,27 +118,71 @@ class LPMetrics(NamedTuple): date_time: str = "" -def get_configuration(config_file, data_file_path): +def get_metrics(d_type): + if d_type == "mip": + return { + "primal_objective_value": { + "threshold": 1, + "unit": "primal_objective_value" + }, + "solver_time": { + "threshold": 1, + "unit": "seconds" + }, + "mip_gap": { + "threshold": 1, + "unit": "mip_gap" + }, + "max_constraint_violation": { + "threshold": 1, + "unit": "max" + }, + "max_int_violation": { + "threshold": 1, + "unit": "max" + }, + "max_variable_bound_violation": { + "threshold": 1, + "unit": "max" + } + } + elif dtype == "lp": + return { + "primal_objective_value": { + "threshold": 1, + "unit": "primal_objective_value", + "bks": { + "value": -282.9604743, + "threshold": 1 + } + }, + "solver_time": { + "threshold": 1, + "unit": "seconds" + }, + "nb_iterations": { + "threshold": 1, + "unit": "num_iterations" + } + } + +def get_configuration(data_file, data_file_path, d_type): data = {} - if os.path.isfile(config_file): - with open(config_file) as f: - data = json.load(f) - else: - raise ValueError(f"Invalid type : {type(data)} has been provided as input, requires json input") - + test_name = None + requested_metrics = {} - test_name = data["test_name"] - - if data["file_name"].startswith("LP_") or data["file_name"].startswith("MIP_"): - data_model, solver_settings = build_lp_datamodel_from_json(data_file_path+"/"+data["file_name"]) + if d_type == "lp" or d_type == "mip": + with open(data_file_path+"/"+d_type+"_config.json") as f: + data = json.load(f) + test_name = data_file.split('/')[-1].split('.')[0] + data_model, solver_settings = build_datamodel_from_mps(data_file) + requested_metrics = data["metrics"] else: + with open(data_file) as f: + data = json.load(f) + test_name = data["test_name"] data_model, solver_settings = build_routing_datamodel_from_json(data_file_path+"/"+data["file_name"]) - - - requested_metrics = data["metrics"] + requested_metrics = data["metrics"] return test_name, data_model, solver_settings, requested_metrics - - - diff --git a/regression/config.sh b/regression/config.sh index 3b073555a..d7f2ab4b3 100644 --- a/regression/config.sh +++ b/regression/config.sh @@ -1,4 +1,3 @@ -# SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: LicenseRef-NvidiaProprietary # # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual @@ -14,16 +13,20 @@ CUOPT_SCRIPTS_DIR=$THIS_DIR OUTPUT_DIR=$SCRATCH_DIR/benchmark_runs/ ACCOUNT=datascience_rapids_testing -PARTITION="batch_dgx2h_m2" -GPUS_PER_NODE=1 +PARTITION="batch" +GPUS_PER_NODE=8 # Path to the squashs file containing the container image IMAGE="nvidia/cuopt:25.10.0a-cuda12.9-py3.12" #SQSH_IMAGE=$SCRATCH_DIR/container_state/cuopt.sqsh +ALL_CONFIGS_PATH=$SCRATCH_DIR/configs/ ROUTING_CONFIGS_PATH=$SCRATCH_DIR/routing_configs/ LP_CONFIGS_PATH=$SCRATCH_DIR/lp_configs/ MIP_CONFIGS_PATH=$SCRATCH_DIR/mip_configs/ +ROUTING_DATASETS_PATH=$SCRATCH_DIR/routing_datasets/ +LP_DATASETS_PATH=$SCRATCH_DIR/lp_datasets/ +MIP_DATASETS_PATH=$SCRATCH_DIR/mip_datasets/ STATUS_FILE=$OUTPUT_DIR/status.txt WORKER_RMM_POOL_SIZE=${WORKER_RMM_POOL_SIZE:-24G} @@ -35,7 +38,7 @@ RESULT_DIR_NAME=cuopt-regression SSH_CREDS=/home/iroy/.ssh/ # Assume CUOPT_SLACK_APP_ID is defined! -CUOPT_SLACK_APP_ID="T04SYRAP3/B04BKLJ7R0F/8EPiEMTDcXFeB5FzQVEJp8t2" +CUOPT_SLACK_APP_ID="XYZ" WEBHOOK_URL=${WEBHOOK_URL:-https://hooks.slack.com/services/${CUOPT_SLACK_APP_ID}} S3_FILE_PREFIX=s3://reopt-testing-public/regression_tests S3_URL_PREFIX=https://reopt-testing-public.s3.amazonaws.com/regression_tests @@ -65,3 +68,4 @@ DATE=${DATE:-$(date --utc "+%Y-%m-%d_%H:%M:%S")_UTC} # therefore cannot be overridden by a project. TESTING_RESULTS_DIR=${RESULTS_DIR}/tests BENCHMARK_RESULTS_DIR=${RESULTS_DIR}/benchmarks + diff --git a/regression/create-html-reports.sh b/regression/create-html-reports.sh new file mode 100755 index 000000000..18632f38d --- /dev/null +++ b/regression/create-html-reports.sh @@ -0,0 +1,246 @@ +#!/bin/bash +# Copyright (c) 2021, NVIDIA CORPORATION. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Must ensure PROJECT_DIR is exported first then load env +export PROJECT_DIR=${PROJECT_DIR:-$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)} +source ${PROJECT_DIR}/config.sh +source ${PROJECT_DIR}/functions.sh + +# FIXME: this assumes all reports are from running pytests +ALL_REPORTS=$(find ${RESULTS_DIR}/benchmarks/results/ -name "*status.txt") + +# Create the html describing the build and test run +REPORT_METADATA_HTML="" +PROJECT_VERSION="unknown" +PROJECT_BUILD="" +PROJECT_CHANNEL="unknown" +PROJECT_REPO_URL="unknown" +PROJECT_REPO_BRANCH="unknown" +if [ -f $METADATA_FILE ]; then + source $METADATA_FILE +fi +# Assume if PROJECT_BUILD is set then a conda version string should be +# created, else a git version string. +if [[ "$PROJECT_BUILD" != "" ]]; then + REPORT_METADATA_HTML=" + + + +
conda version$PROJECT_VERSION
build$PROJECT_BUILD
channel$PROJECT_CHANNEL
+
" +else + REPORT_METADATA_HTML=" + + + +
commit hash$PROJECT_VERSION
repo$PROJECT_REPO_URL
branch$PROJECT_REPO_BRANCH
+
" +fi + + +################################################################################ +# create the html reports for each individual run (each +# pytest-results*.txt file) +if [ "$ALL_REPORTS" != "" ]; then + for report in $ALL_REPORTS; do + # Get the individual report name, and use the .txt file path + # to form the html report being generated (same location as + # the .txt file). This will be an abs path since it is a file + # on disk being written. + report_name=$(basename -s .txt $report) + html_report_abs_path=$(dirname $report)/${report_name}.html + echo " + + + ${report_name} + + +

${report_name}


" > $html_report_abs_path + echo "$REPORT_METADATA_HTML" >> $html_report_abs_path + echo " + + + +" >> $html_report_abs_path + awk '{ if($2 == "FAILED") { + color = "red" + } else { + color = "green" + } + printf "\n", $1, color, $2, $3, $3 + }' $report >> $html_report_abs_path + echo "
test filestatuslogs
%s%s%s
+ + + " >> $html_report_abs_path + done +fi + +################################################################################ +# Create a .html file for each *_log.txt file, which is just the contents +# of the log with a line number and anchor id for each line that can +# be used for sharing links to lines. +ALL_LOGS=$(find -L ${BENCHMARK_RESULTS_DIR} -type f -name "*_log.txt" -print) + +for f in $ALL_LOGS; do + base_no_extension=$(basename ${f: 0:-4}) + html=${f: 0:-4}.html + echo " + + + $base_no_extension + + + +

${base_no_extension}


+" > $html + awk '{ print ""NR":
"$0"

"}' $f >> $html + echo " + +" >> $html +done + +################################################################################ +# create the top-level report +STATUS='FAILED' +STATUS_IMG='https://img.icons8.com/cotton/80/000000/cancel--v1.png' +if [ "$ALL_REPORTS" != "" ]; then + if ! (grep -w FAILED $ALL_REPORTS > /dev/null); then + STATUS='PASSED' + STATUS_IMG='https://img.icons8.com/bubbles/100/000000/approval.png' + fi +fi +BUILD_LOG_HTML="(build log not available or build not run)" +BUILD_STATUS="" +if [ -f $BUILD_LOG_FILE ]; then + if [ -f ${BUILD_LOG_FILE: 0:-4}.html ]; then + BUILD_LOG_HTML="log (plain text)" + else + BUILD_LOG_HTML="log" + fi + tail -3 $BUILD_LOG_FILE | grep -w "done." + if (tail -3 $BUILD_LOG_FILE | grep -qw "done."); then + BUILD_STATUS="PASSED" + else + BUILD_STATUS="FAILED" + fi +fi + +report=${RESULTS_DIR}/report.html +echo " + + + test report + + +" > $report +echo "$REPORT_METADATA_HTML" >> $report +echo "\"${STATUS}\"/ Overall status: $STATUS
" >> $report +echo "Build: ${BUILD_STATUS} ${BUILD_LOG_HTML}
" >> $report +if [ "$ALL_REPORTS" != "" ]; then + echo "
Test Status
" >>$report + echo " + + + + " >> $report + for f in $ALL_REPORTS; do + report_name=$(basename -s .txt $f) + # report_path should be of the form "tests/foo.html" + prefix_to_remove="$RESULTS_DIR/" + report_rel_path=${f/$prefix_to_remove} + report_path=$(dirname $report_rel_path)/${report_name}.html + + if (grep -w FAILED $f > /dev/null); then + status="FAILED" + color="red" + else + status="PASSED" + color="green" + fi + echo "" >> $report + done + echo "
TestStatus
${report_name}${status}
" >> $report +else + echo "Tests were not run." >> $report +fi +prefix_to_remove="$RESULTS_DIR/" +plot_rel_path=${f/$prefix_to_remove} +plot_path=$(dirname $plot_rel_path)/asv/html/index.html +prefix_to_remove="$RESULTS_DIR/benchmarks/results/" +log_rel_path=${f/$prefix_to_remove} +log_path=$(dirname $log_rel_path)/index.html +echo "

\"Plots\"
Plots : Rgression test results


" >>$report +echo "

\"Plots\"
Logs and Details : All the data for this run


" >>$report +echo " + +" >> $report + +################################################################################ +# (optional) generate the ASV html +if hasArg --run-asv; then + asv_config_file=$(find ${BENCHMARK_RESULTS_DIR}/results/asv -name "asv.conf.json") + if [ "$asv_config_file" != "" ]; then + asv update --config $asv_config_file + asv publish --config $asv_config_file + fi +fi + +################################################################################ +# Create an index.html for each dir (ALL_DIRS plus ".", but EXCLUDE +# the asv html) This is needed since S3 (and probably others) will not +# show the contents of a hosted directory by default, but will instead +# return the index.html if present. +# The index.html will just contain links to the individual files and +# subdirs present in each dir, just as if browsing in a file explorer. +ALL_DIRS=$(find -L ${RESULTS_DIR} -path ${BENCHMARK_RESULTS_DIR}/results/asv/html -prune -o -type d -printf "%P\n") + +for d in "." $ALL_DIRS; do + index=${RESULTS_DIR}/${d}/index.html + echo " + + + $d + + +

${d}


+" > $index + for f in ${RESULTS_DIR}/$d/*; do + b=$(basename $f) + # Do not include index.html in index.html (it's a link to itself) + if [[ "$b" == "index.html" ]]; then + continue + fi + if [ -d "$f" ]; then + echo "$b
" >> $index + # special case: if the file is a *_log.txt and has a corresponding .html + elif [[ "${f: -8}" == "_log.txt" ]] && [[ -f "${f: 0:-4}.html" ]]; then + markup="${b: 0:-4}.html" + plaintext=$b + echo "$markup (plain text)
" >> $index + elif [[ "${f: -9}" == "_log.html" ]] && [[ -f "${f: 0:-5}.txt" ]]; then + continue + else + echo "$b
" >> $index + fi + done + echo " + +" >> $index +done diff --git a/regression/cronjob.sh b/regression/cronjob.sh index b69b50686..affc44b11 100755 --- a/regression/cronjob.sh +++ b/regression/cronjob.sh @@ -53,24 +53,23 @@ setupResultsDir set +e ################################################################################ -#logger "Testing cuOpt in container..." -#srun \ -# --account $ACCOUNT \ -# --partition $PARTITION \ -# --nv-meta ml-model.dlss,dcgm_opt_out.yes \ -# --job-name=test-container.testing \ -# --nodes 1 \ -# --gpus-per-node 1 \ -# --time=120 \ -# --export=ALL \ -# --exclusive -K \ -# --container-mounts=${CUOPT_SCRIPTS_DIR}:${CUOPT_SCRIPTS_DIR},${OUTPUT_DIR}:${OUTPUT_DIR},${SSH_CREDS}:/root/.ssh \ -# --container-image=$IMAGE \ -# --output=$BUILD_LOG_FILE \ -# bash ${PROJECT_DIR}/test-container.sh +logger "Testing cuOpt in container..." +srun \ + --account $ACCOUNT \ + --partition $PARTITION \ + --job-name=test-container.testing \ + --nodes 1 \ + --gpus-per-node 1 \ + --time=120 \ + --export=ALL \ + --container-mounts=${CUOPT_SCRIPTS_DIR}:${CUOPT_SCRIPTS_DIR},${OUTPUT_DIR}:${OUTPUT_DIR} \ + --container-image=$IMAGE \ + --output=$BUILD_LOG_FILE \ + bash ${PROJECT_DIR}/test-container.sh TESTING_FAILED=$? logger "done testing container, return code was $TESTING_FAILED" + if [[ $TESTING_FAILED == 0 ]]; then ############################################################################ @@ -79,27 +78,25 @@ if [[ $TESTING_FAILED == 0 ]]; then logger "Running benchmarks..." logger "GPUs per node : $GPUS_PER_NODE" # SNMG tests - run in parallel - #srun \ - # --account $ACCOUNT \ - # --partition $PARTITION \ - # --nv-meta ml-model.dlss,dcgm_opt_out.yes \ - # --job-name=run-nightly-benchmarks \ - # --nodes 1 \ - # --gpus-per-node $GPUS_PER_NODE \ - # --time=4:00:00 \ - # --export=ALL \ - # --exclusive -K\ - # --container-mounts ${ROUTING_CONFIGS_PATH}:${ROUTING_CONFIGS_PATH},${CUOPT_SCRIPTS_DIR}:${CUOPT_SCRIPTS_DIR},${OUTPUT_DIR}:${OUTPUT_DIR} \ - # --container-image=$IMAGE \ - # --output=${BENCHMARK_RESULTS_DIR}/benchmark_routing_log.txt \ - # bash ${CUOPT_SCRIPTS_DIR}/routing_regression_test.sh & - #PID_1=$! - #logger "Process ID $PID_1 in background" + srun \ + --account $ACCOUNT \ + --partition $PARTITION \ + --job-name=run-nightly-benchmarks \ + --nodes 1 \ + --gpus-per-node $GPUS_PER_NODE \ + --time=4:00:00 \ + --export=ALL \ + --exclusive -K\ + --container-mounts ${ROUTING_CONFIGS_PATH}:${ROUTING_CONFIGS_PATH},${CUOPT_SCRIPTS_DIR}:${CUOPT_SCRIPTS_DIR},${OUTPUT_DIR}:${OUTPUT_DIR} \ + --container-image=$IMAGE \ + --output=${BENCHMARK_RESULTS_DIR}/benchmark_routing_log.txt \ + bash ${CUOPT_SCRIPTS_DIR}/routing_regression_test.sh & + PID_1=$! + logger "Process ID $PID_1 in background" srun \ --account $ACCOUNT \ --partition $PARTITION \ - --nv-meta ml-model.dlss,dcgm_opt_out.yes \ --job-name=run-nightly-benchmarks \ --nodes 1 \ --gpus-per-node $GPUS_PER_NODE \ @@ -115,19 +112,19 @@ if [[ $TESTING_FAILED == 0 ]]; then srun \ --account $ACCOUNT \ --partition $PARTITION \ - --nv-meta ml-model.dlss,dcgm_opt_out.yes \ --job-name=run-nightly-benchmarks \ --nodes 1 \ --gpus-per-node $GPUS_PER_NODE \ --time=4:00:00 \ --export=ALL \ --exclusive -K\ - --container-mounts ${MIP_CONFIGS_PATH}:${MIP_CONFIGS_PATH},${CUOPT_SCRIPTS_DIR}:${CUOPT_SCRIPTS_DIR},${OUTPUT_DIR}:${OUTPUT_DIR} \ + --container-mounts ${MIP_DATASETS_PATH}:${MIP_DATASETS_PATH},${CUOPT_SCRIPTS_DIR}:${CUOPT_SCRIPTS_DIR},${OUTPUT_DIR}:${OUTPUT_DIR} \ --container-image=$IMAGE \ --output=${BENCHMARK_RESULTS_DIR}/benchmark_mip_log.txt \ bash ${CUOPT_SCRIPTS_DIR}/mip_regression_test.sh & PID_3=$! + wait $PID_3 wait $PID_0 $PID_1 $PID_2 $PID_3 fi @@ -135,7 +132,6 @@ else # if [[ $TESTING_FAILED == 0 ]] logger "Container testing Failed!" fi -: <<'END' ################################################################################ # Send report based on contents of $RESULTS_DIR # These steps do not require a worker node. @@ -149,18 +145,16 @@ if [ -f $METADATA_FILE ]; then source $METADATA_FILE fi -activateCondaEnv - -if [[ $BUILD_FAILED == 0 ]]; then - if [[ $RUN_BENCHMARKS == 1 ]]; then - # Push regression tests to repo - cd ${WORKSPACE}/${RESULT_DIR_NAME}; git add data/*; git commit -m "Update for commit : ${PROJECT_VERSION}"; git push; cd - - # bash ${CUOPT_SCRIPTS_DIR}/save_benchmarks.sh $PROJECT_VERSION - fi -fi +#if [[ $BUILD_FAILED == 0 ]]; then +# if [[ $RUN_BENCHMARKS == 1 ]]; then +# # Push regression tests to repo +# cd ${WORKSPACE}/${RESULT_DIR_NAME}; git add data/*; git commit -m "Update for commit : ${PROJECT_VERSION}"; git push; cd - +# # bash ${CUOPT_SCRIPTS_DIR}/save_benchmarks.sh $PROJECT_VERSION +# fi +#fi # Copy all config files to one folder -cp $ROUTING_CONFIGS_PATH/*config.json $LP_CONFIGS_PATH/*config.json $MIP_CONFIGS_PATH/*config.json $ALL_CONFIGS_PATH/ +cp $ROUTING_CONFIGS_PATH/*config.json $LP_CONFIGS_PATH/*config.json $MIP_DATASETS_PATH/*config.json $ALL_CONFIGS_PATH/ RUN_ASV_OPTION="" if hasArg --skip-asv; then @@ -172,7 +166,7 @@ else if [[ "$PROJECT_BUILD" == "" ]]; then # Update/create the ASV database logger "Updating ASV database" - python $PROJECT_DIR/update_asv_database.py --commitHash=$PROJECT_VERSION --repo-url=$PROJECT_REPO_URL --branch=$PROJECT_REPO_BRANCH --commitTime=$PROJECT_REPO_TIME --results-dir=$RESULTS_DIR --machine-name> + python $PROJECT_DIR/update_asv_database.py --commitHash=$PROJECT_VERSION --repo-url=$PROJECT_REPO_URL --branch=$PROJECT_REPO_BRANCH --commitTime=$PROJECT_REPO_TIME --results-dir=$RESULTS_DIR --machine-name=$MACHINE --gpu-type=$GPU_TYPE --configs=$ALL_CONFIGS_PATH RUN_ASV_OPTION=--run-asv logger "Updated ASV database" else @@ -180,18 +174,13 @@ else fi fi -if hasArg --spreadsheet; then - logger "Generating spreadsheet" - export SPREADSHEET_URL=$(python $PROJECT_DIR/gsheet-report.py --results-dir=$RESULTS_DIR |grep "spreadsheet url is"|cut -d ' ' -f4) - #python $PROJECT_DIR/gsheet-report.py --results-dir=$RESULTS_DIR -fi - # The cuopt pull has missing .git folder which causes subsequent runs, lets delete and pull it fresh everytime. rm -rf $RESULTS_DIR/benchmarks/results/asv/cuopt/ rm -rf $RESULTS_DIR/tests ${SCRIPTS_DIR}/create-html-reports.sh $RUN_ASV_OPTION +: <<'END' if hasArg --skip-sending-report; then logger "Skipping sending report." else diff --git a/regression/get_datasets.sh b/regression/get_datasets.sh new file mode 100644 index 000000000..5359cda8f --- /dev/null +++ b/regression/get_datasets.sh @@ -0,0 +1,275 @@ +#!/bin/bash +# SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +# Abort script on first error +set -e + +DELAY=30 + +# Must ensure PROJECT_DIR is exported first then load rapids-mg-tools env +export PROJECT_DIR=${PROJECT_DIR:-$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)} +source ${PROJECT_DIR}/config.sh + +INSTANCES=( +"30n20b8.mps" +"cryptanalysiskb128n5obj14.mps" +"graph20-20-1rand.mps" +"n2seq36q.mps" +"neos-4338804-snowy.mps" +"neos-957323.mps" +"rail01.mps" +"splice1k1.mps" +"50v-10.mps" +"cryptanalysiskb128n5obj16.mps" +"graphdraw-domain.mps" +"n3div36.mps" +"neos-4387871-tavua.mps" +"neos-960392.mps" +"rail02.mps" +"square41.mps" +"academictimetablesmall.mps" +"csched007.mps" +"h80x6320d.mps" +"n5-3.mps" +"neos-4413714-turia.mps" +"net12.mps" +"rail507.mps" +"square47.mps" +"air05.mps" +"csched008.mps" +"highschool1-aigio.mps" +"neos-1122047.mps" +"neos-4532248-waihi.mps" +"netdiversion.mps" +"ran14x18-disj-8.mps" +"supportcase10.mps" +"app1-1.mps" +"cvs16r128-89.mps" +"hypothyroid-k1.mps" +"neos-1171448.mps" +"neos-4647030-tutaki.mps" +"nexp-150-20-8-5.mps" +"rd-rplusc-21.mps" +"supportcase12.mps" +"app1-2.mps" +"dano3_3.mps" +"ic97_potential.mps" +"neos-1171737.mps" +"neos-4722843-widden.mps" +"ns1116954.mps" +"reblock115.mps" +"supportcase18.mps" +"assign1-5-8.mps" +"dano3_5.mps" +"icir97_tension.mps" +"neos-1354092.mps" +"neos-4738912-atrato.mps" +"ns1208400.mps" +"rmatr100-p10.mps" +"supportcase19.mps" +"atlanta-ip.mps" +"decomp2.mps" +"irish-electricity.mps" +"neos-1445765.mps" +"neos-4763324-toguru.mps" +"ns1644855.mps" +"rmatr200-p5.mps" +"supportcase22.mps" +"b1c1s1.mps" +"drayage-100-23.mps" +"irp.mps" +"neos-1456979.mps" +"neos-4954672-berkel.mps" +"ns1760995.mps" +"rocI-4-11.mps" +"supportcase26.mps" +"bab2.mps" +"drayage-25-23.mps" +"istanbul-no-cutoff.mps" +"neos-1582420.mps" +"neos-5049753-cuanza.mps" +"ns1830653.mps" +"rocII-5-11.mps" +"supportcase33.mps" +"bab6.mps" +"dws008-01.mps" +"k1mushroom.mps" +"neos17.mps" +"neos-5052403-cygnet.mps" +"ns1952667.mps" +"rococoB10-011000.mps" +"supportcase40.mps" +"beasleyC3.mps" +"eil33-2.mps" +"lectsched-5-obj.mps" +"neos-2075418-temuka.mps" +"neos-5093327-huahum.mps" +"nu25-pr12.mps" +"rococoC10-001000.mps" +"supportcase42.mps" +"binkar10_1.mps" +"eilA101-2.mps" +"leo1.mps" +"neos-2657525-crna.mps" +"neos-5104907-jarama.mps" +"nursesched-medium-hint03.mps" +"roi2alpha3n4.mps" +"supportcase6.mps" +"blp-ar98.mps" +"enlight_hard.mps" +"leo2.mps" +"neos-2746589-doon.mps" +"neos-5107597-kakapo.mps" +"nursesched-sprint02.mps" +"roi5alpha10n8.mps" +"supportcase7.mps" +"blp-ic98.mps" +"ex10.mps" +"lotsize.mps" +"neos-2978193-inde.mps" +"neos-5114902-kasavu.mps" +"nw04.mps" +"roll3000.mps" +"swath1.mps" +"bnatt400.mps" +"ex9.mps" +"mad.mps" +"neos-2987310-joes.mps" +"neos-5188808-nattai.mps" +"opm2-z10-s4.mps" +"s100.mps" +"swath3.mps" +"bnatt500.mps" +"exp-1-500-5-5.mps" +"map10.mps" +"neos-3004026-krka.mps" +"neos-5195221-niemur.mps" +"p200x1188c.mps" +"s250r10.mps" +"tbfp-network.mps" +"bppc4-08.mps" +"fast0507.mps" +"map16715-04.mps" +"neos-3024952-loue.mps" +"neos5.mps" +"peg-solitaire-a3.mps" +"satellites2-40.mps" +"thor50dday.mps" +"brazil3.mps" +"fastxgemm-n2r6s0t2.mps" +"markshare2.mps" +"neos-3046615-murg.mps" +"neos-631710.mps" +"pg5_34.mps" +"satellites2-60-fs.mps" +"timtab1.mps" +"buildingenergy.mps" +"fhnw-binpack4-48.mps" +"markshare_4_0.mps" +"neos-3083819-nubu.mps" +"neos-662469.mps" +"pg.mps" +"savsched1.mps" +"tr12-30.mps" +"cbs-cta.mps" +"fhnw-binpack4-4.mps" +"mas74.mps" +"neos-3216931-puriri.mps" +"neos-787933.mps" +"physiciansched3-3.mps" +"sct2.mps" +"traininstance2.mps" +"chromaticindex1024-7.mps" +"fiball.mps" +"mas76.mps" +"neos-3381206-awhea.mps" +"neos-827175.mps" +"physiciansched6-2.mps" +"seymour1.mps" +"traininstance6.mps" +"chromaticindex512-7.mps" +"gen-ip002.mps" +"mc11.mps" +"neos-3402294-bobin.mps" +"neos-848589.mps" +"piperout-08.mps" +"seymour.mps" +"trento1.mps" +"cmflsp50-24-8-8.mps" +"gen-ip054.mps" +"mcsched.mps" +"neos-3402454-bohle.mps" +"neos859080.mps" +"piperout-27.mps" +"sing326.mps" +"triptim1.mps" +"CMS750_4.mps" +"germanrr.mps" +"mik-250-20-75-4.mps" +"neos-3555904-turama.mps" +"neos-860300.mps" +"pk1.mps" +"sing44.mps" +"uccase12.mps" +"co-100.mps" +"gfd-schedulen180f7d50m30k18.mps" +"milo-v12-6-r2-40-1.mps" +"neos-3627168-kasai.mps" +"neos-873061.mps" +"proteindesign121hz512p9.mps" +"snp-02-004-104.mps" +"uccase9.mps" +"cod105.mps" +"glass4.mps" +"momentum1.mps" +"neos-3656078-kumeu.mps" +"neos8.mps" +"proteindesign122trx11p8.mps" +"sorrell3.mps" +"uct-subprob.mps" +"comp07-2idx.mps" +"glass-sc.mps" +"mushroom-best.mps" +"neos-3754480-nidda.mps" +"neos-911970.mps" +"qap10.mps" +"sp150x300d.mps" +"unitcal_7.mps" +"comp21-2idx.mps" +"gmu-35-40.mps" +"mzzv11.mps" +"neos-3988577-wolgan.mps" +"neos-933966.mps" +"radiationm18-12-05.mps" +"sp97ar.mps" +"var-smallemery-m6j6.mps" +"cost266-UUE.mps" +"gmu-35-50.mps" +"mzzv42z.mps" +"neos-4300652-rahue.mps" +"neos-950242.mps" +"radiationm40-10-02.mps" +"sp98ar.mps" +"wachplan.mps" +) + +BASE_URL="https://miplib.zib.de/WebData/instances" + +for INSTANCE in "${INSTANCES[@]}"; do + URL="${BASE_URL}/${INSTANCE}.gz" + OUTFILE="${MIP_DATASETS_PATH}/${INSTANCE}.gz" + + wget -4 --tries=3 --continue --progress=dot:mega --retry-connrefused "${URL}" -O "${OUTFILE}" || { + echo "Failed to download: ${URL}" + continue + } + gunzip -f "${OUTFILE}" +done diff --git a/regression/lp_regression_test.sh b/regression/lp_regression_test.sh index 58c413c6e..f710dce3b 100644 --- a/regression/lp_regression_test.sh +++ b/regression/lp_regression_test.sh @@ -35,7 +35,7 @@ mkdir -p ${RESULTS_DIR}/benchmarks/results/csvs/ #rm -rf ${WORKSPACE}/${RESULT_DIR_NAME}/data/regressions.csv logger "Running lp tests ........" -python ${CUOPT_SCRIPTS_DIR}/benchmark_scripts/benchmark.py -c ${LP_CONFIGS_PATH} -r ${RESULTS_DIR}/benchmarks/results/csvs/ -g ${GIT_COMMIT} -l ${LOG_PATH} -s ${RESULTS_DIR}/benchmarks/results/lp_tests_status.txt +python ${CUOPT_SCRIPTS_DIR}/benchmark_scripts/benchmark.py -c ${LP_CONFIGS_PATH} -r ${RESULTS_DIR}/benchmarks/results/csvs/ -g ${GIT_COMMIT} -l ${LOG_PATH} -s ${RESULTS_DIR}/benchmarks/results/lp_tests_status.txt -n ${GPUS_PER_NODE} logger "Completed lp tests ........" diff --git a/regression/mip_regression_test.sh b/regression/mip_regression_test.sh index 09a14cd27..8018014d1 100644 --- a/regression/mip_regression_test.sh +++ b/regression/mip_regression_test.sh @@ -35,7 +35,7 @@ mkdir -p ${RESULTS_DIR}/benchmarks/results/csvs/ #rm -rf ${WORKSPACE}/${RESULT_DIR_NAME}/data/regressions.csv logger "Running mip tests ........" -python ${CUOPT_SCRIPTS_DIR}/benchmark_scripts/benchmark.py -c ${MIP_CONFIGS_PATH} -r ${RESULTS_DIR}/benchmarks/results/csvs/ -g ${GIT_COMMIT} -l ${LOG_PATH} -s ${RESULTS_DIR}/benchmarks/results/mip_tests_status.txt +python ${CUOPT_SCRIPTS_DIR}/benchmark_scripts/benchmark.py -c ${MIP_DATASETS_PATH} -r ${RESULTS_DIR}/benchmarks/results/csvs/ -g ${GIT_COMMIT} -l ${LOG_PATH} -s ${RESULTS_DIR}/benchmarks/results/mip_tests_status.txt -n ${GPUS_PER_NODE} -t mip logger "Completed mip tests ........" diff --git a/regression/routing_regression_test.sh b/regression/routing_regression_test.sh index 8bc335625..57029c5a5 100644 --- a/regression/routing_regression_test.sh +++ b/regression/routing_regression_test.sh @@ -36,7 +36,7 @@ mkdir -p ${RESULTS_DIR}/benchmarks/results/csvs/ logger "Running routing tests ........" -python ${CUOPT_SCRIPTS_DIR}/benchmark_scripts/benchmark.py -c ${ROUTING_CONFIGS_PATH} -r ${RESULTS_DIR}/benchmarks/results/csvs/ -g ${GIT_COMMIT} -l ${LOG_PATH} -s ${RESULTS_DIR}/benchmarks/results/routing_tests_status.txt +python ${CUOPT_SCRIPTS_DIR}/benchmark_scripts/benchmark.py -c ${ROUTING_CONFIGS_PATH} -r ${RESULTS_DIR}/benchmarks/results/csvs/ -g ${GIT_COMMIT} -l ${LOG_PATH} -s ${RESULTS_DIR}/benchmarks/results/routing_tests_status.txt -n ${GPUS_PER_NODE} logger "Completed routing tests ........" diff --git a/regression/run_regression.sh b/regression/run_regression.sh index 05f8917b8..63a2b4c64 100644 --- a/regression/run_regression.sh +++ b/regression/run_regression.sh @@ -9,19 +9,15 @@ # its affiliates is strictly prohibited. # Get latest set of datasets -#rm -rf $SCRATCH_DIR/routing_configs/* -#rm -rf $SCRATCH_DIR/lp_configs/* -#rm -rf $SCRATCH_DIR/mip_configs/* +rm -rf $SCRATCH_DIR/routing_configs/* +rm -rf $SCRATCH_DIR/lp_configs/* +rm -rf $SCRATCH_DIR/mip_configs/* -#aws s3 cp s3://cuopt-datasets/regression_datasets/ $SCRATCH_DIR/routing_configs/ --recursive -#aws s3 cp s3://cuopt-datasets/lp_datasets/ $SCRATCH_DIR/lp_configs/ --recursive -#aws s3 cp s3://cuopt-datasets/mip_datasets/ $SCRATCH_DIR/mip_configs/ --recursive +aws s3 cp s3://cuopt-datasets/regression_datasets/ $SCRATCH_DIR/routing_configs/ --recursive +aws s3 cp s3://cuopt-datasets/lp_datasets/ $SCRATCH_DIR/lp_configs/ --recursive +aws s3 cp s3://cuopt-datasets/mip_datasets/ $SCRATCH_DIR/mip_configs/ --recursive -# Git clone multi gpu tools - -##rm -rf $SCRATCH_DIR/multi-gpu-tools - -##git clone ssh://git@gitlab-master.nvidia.com:12051/ramakrishnap/multi-gpu-tools.git $SCRATCH_DIR/multi-gpu-tools +bash $SCRATCH_DIR/cuopt/regression/get_datasets.sh # Run build and test -bash $SCRATCH_DIR/cuopt/regression/cronjob.sh --build-cuopt-env --benchmark --skip-spreadsheet +bash $SCRATCH_DIR/cuopt/regression/cronjob.sh --benchmark --skip-spreadsheet diff --git a/regression/setup-benchmark-dir.sh b/regression/setup-benchmark-dir.sh deleted file mode 100755 index c51e46211..000000000 --- a/regression/setup-benchmark-dir.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -# Copyright (c) 2021, NVIDIA CORPORATION. - -# Abort script on first error -set -e - -# Must ensure PROJECT_DIR is exported first then load rapids-mg-tools env -export PROJECT_DIR=${PROJECT_DIR:-$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)} -if [ -n "$RAPIDS_MG_TOOLS_DIR" ]; then - source ${RAPIDS_MG_TOOLS_DIR}/script-env.sh -elif [ -n "$(which script-env.sh)" ]; then - source $(which script-env.sh) -else - echo "Error: \$RAPIDS_MG_TOOLS_DIR/script-env.sh could not be read nor was script-env.sh in PATH." - exit 1 -fi - -################################################################################ - - - -if [ ! -d ${WORKSPACE}/${REPO_DIR_NAME} ]; then - cloneRepo "$CUGRAPH_REPO_URL" $REPO_DIR_NAME $WORKSPACE -fi - - -rm -rf ${BENCHMARK_DIR} -mkdir -p ${BENCHMARK_DIR} -cp -r ${WORKSPACE}/${REPO_DIR_NAME}/benchmarks/python_e2e ${BENCHMARK_DIR} \ No newline at end of file diff --git a/regression/test-container.sh b/regression/test-container.sh index 85732ec2e..b2eb90422 100644 --- a/regression/test-container.sh +++ b/regression/test-container.sh @@ -16,6 +16,7 @@ set -e # Must ensure PROJECT_DIR is exported first then load rapids-mg-tools env export PROJECT_DIR=${PROJECT_DIR:-$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)} +source ${PROJECT_DIR}/config.sh ################################################################################ @@ -23,6 +24,8 @@ export PROJECT_DIR=${PROJECT_DIR:-$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)} logger "Testing container image $IMAGE" python -c "import cuopt; print(cuopt)" +trap "${SCRIPTS_DIR}/write-meta-data.sh" EXIT + # Other scripts look for this to be the last line to determine if this # script completed successfully. This is only possible because of the # "set -e" above. diff --git a/regression/update_asv_database.py b/regression/update_asv_database.py index f505ed9ee..df9883f1e 100644 --- a/regression/update_asv_database.py +++ b/regression/update_asv_database.py @@ -1,15 +1,12 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +# SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary # -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. from pathlib import Path import platform @@ -26,7 +23,8 @@ def update_asv_db(commitHash=None, repo_url=None, results_dir=None, machine_name=None, - gpu_type=None): + gpu_type=None, + configs=None): """ Read the benchmark_result* files in results_dir/benchmarks and update an existing asv benchmark database or create one if one @@ -36,39 +34,49 @@ def update_asv_db(commitHash=None, """ # commitHash = commitHash + str(int(time.time())) - benchmark_dir_path = Path(results_dir)/"benchmarks" - asv_dir_path = benchmark_dir_path/"asv" + benchmark_dir_path = Path(results_dir)/"benchmarks"/"results"/"csvs" + asv_dir_path = Path(results_dir)/"benchmarks"/"results"/"asv" # List all benchmark_result files - benchmark_result_list = benchmark_dir_path.glob("results*.csv") + benchmark_result_list = benchmark_dir_path.glob("*.csv") bResultList = [] + # Skip these columns from benchmarking + skip_columns = ["date_time", "git_commit"] + print("AAA") # Create result objects for each benchmark result and store it in a list for file_name in benchmark_result_list: + # skip if it's regression file + if "regressions.csv" in str(file_name): + continue with open(file_name, 'r') as openfile: - data = pd.read_csv(openfile, index_col="test") - if "service_endpoint" in str(file_name) or "service_method" in str(file_name): - name = "Service_Endpoint" if "service_endpoint" in str(file_name) else "Service_Method" - for index, rows in data.iterrows(): - bResult = BenchmarkResult(funcName=name+"."+index+"_runtime", result=rows["run_time"], unit="Seconds") - bResultList.append(bResult) + data = pd.read_csv(openfile, index_col=0).iloc[-1] + test_name = str(file_name).split("/")[-1].split(".")[-2] + config_file = None + if test_name.startswith("lp"): + config_file = configs + "/" + "lp_config.json" + elif test_name.startswith("mip"): + config_file = configs + "/" + "mip_config.json" else: - for index, rows in data.iterrows(): - bResult = BenchmarkResult(funcName=index+"_solver_runtime", result=rows["solver_run_time"], unit="Seconds") - bResultList.append(bResult) - bResult = BenchmarkResult(funcName=index+"_etl_runtime", result=rows["etl_time"], unit="Seconds") - bResultList.append(bResult) - bResult = BenchmarkResult(funcName=index+"_memory", result=rows["memory"], unit="MB") - bResultList.append(bResult) - bResult = BenchmarkResult(funcName=index+"_travel_cost", result=rows["travel_cost"], unit="Distance") + config_file = configs + "/" + test_name + "_config.json" + metrics = {} + with open(config_file, 'r') as fp: + metrics = json.load(fp)["metrics"] + for col_name in data.index: + if col_name not in skip_columns: + bResult = BenchmarkResult( + funcName=test_name+"."+col_name, + result=data[col_name].item(), + unit="percentage" if "bks" in col_name else metrics[col_name]["unit"] + ) bResultList.append(bResult) if len(bResultList) == 0: - print("Could not find files matching 'benchmark_result*' in " + print("Could not find files matching 'csv' in " f"{benchmark_dir_path}, not creating/updating ASV database " f"in {asv_dir_path}.") return - + print("BBB") uname = platform.uname() # Maybe also write those metadata to metadata.sh ? osType = "%s %s" % (uname.system, uname.release) @@ -92,17 +100,19 @@ def update_asv_db(commitHash=None, 'ram' : "%d" % psutil.virtual_memory().total } bInfo = BenchmarkInfo(**bInfo_dict) - + print("CCC") # extract the branch name branch = bInfo_dict['branch'] db = ASVDb(dbDir=str(asv_dir_path), repo=repo_url, branches=[branch]) - + print("DDD") for res in bResultList: + print(bInfo) + print(res) db.addResult(bInfo, res) - + print("EEE") if __name__ == "__main__": import argparse @@ -121,6 +131,8 @@ def update_asv_db(commitHash=None, help="Slurm cluster name") ap.add_argument("--gpu-type", type=str, required=True, help="the official product name of the GPU") + ap.add_argument("--configs", type=str, required=True, + help="the config file for all the tests") args = ap.parse_args() update_asv_db(commitHash=args.commitHash, @@ -129,4 +141,5 @@ def update_asv_db(commitHash=None, repo_url=args.repo_url, results_dir=args.results_dir, machine_name=args.machine_name, - gpu_type=args.gpu_type) + gpu_type=args.gpu_type, + configs=args.configs) diff --git a/regression/write-cuopt-meta-data.sh b/regression/write-cuopt-meta-data.sh deleted file mode 100755 index 2fcaf6c3f..000000000 --- a/regression/write-cuopt-meta-data.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash -# Copyright (c) 2021, NVIDIA CORPORATION. - -# Abort script on first error -set -e - -# Must ensure PROJECT_DIR is exported first then load rapids-mg-tools env -export PROJECT_DIR=${PROJECT_DIR:-$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)} -if [ -n "$RAPIDS_MG_TOOLS_DIR" ]; then - source ${RAPIDS_MG_TOOLS_DIR}/script-env.sh -elif [ -n "$(which script-env.sh)" ]; then - source $(which script-env.sh) -else - echo "Error: \$RAPIDS_MG_TOOLS_DIR/script-env.sh could not be read nor was script-env.sh in PATH." - exit 1 -fi - -################################################################################ - -# Extract the build meta-data from either the conda environment or the -# cugraph source dir and write out a file which can be read by other -# scripts. If the cugraph conda packages are present, those take -# precedence, otherwise meta-data will be extracted from the sources. - -#module load cuda/11.0.3 -activateCondaEnv - -nvidia-smi - - -# auto-detect based on if the libcugraph conda pacakge is installed -# (a from-source build does not have a libcugraph package registered -# in the conda env since it is installed directly via the build). -if (conda list | grep -q libcuopt); then - ${SCRIPTS_DIR}/write-meta-data.sh --from-conda -else - ${SCRIPTS_DIR}/write-meta-data.sh --from-source -fi diff --git a/regression/write-meta-data.sh b/regression/write-meta-data.sh new file mode 100755 index 000000000..26c5102ea --- /dev/null +++ b/regression/write-meta-data.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: LicenseRef-NvidiaProprietary +# +# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual +# property and proprietary rights in and to this material, related +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or +# its affiliates is strictly prohibited. + +# Abort script on first error +set -e + +DELAY=30 + +# Must ensure PROJECT_DIR is exported first then load rapids-mg-tools env +export PROJECT_DIR=${PROJECT_DIR:-$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)} +source ${PROJECT_DIR}/config.sh +source ${PROJECT_DIR}/functions.sh + +PROJECT_VERSION=$(> $METADATA_FILE +echo "PROJECT_VERSION=\"$PROJECT_VERSION\"" >> $METADATA_FILE +echo "PROJECT_BUILD=\"$PROJECT_BUILD\"" >> $METADATA_FILE +echo "PROJECT_CHANNEL=\"$PROJECT_CHANNEL\"" >> $METADATA_FILE +echo "PROJECT_REPO_URL=\"$PROJECT_REPO_URL\"" >> $METADATA_FILE +echo "PROJECT_REPO_BRANCH=\"$PROJECT_REPO_BRANCH\"" >> $METADATA_FILE +echo "PROJECT_REPO_TIME=\"$PROJECT_REPO_TIME\"" >> $METADATA_FILE From 3b6be0770fae55681b35575762dbc12e7d036961 Mon Sep 17 00:00:00 2001 From: Ishika Roy Date: Wed, 29 Oct 2025 22:39:44 -0700 Subject: [PATCH 4/6] update configs and test --- regression/benchmark_scripts/benchmark.py | 2 +- regression/benchmark_scripts/transform.py | 115 --- regression/config.sh | 8 +- regression/cronjob.sh | 7 +- regression/get_datasets.py | 943 ++++++++++++++++++++++ regression/get_datasets.sh | 275 ------- regression/gsheet-report.py | 178 ---- regression/lp_config.json | 13 + regression/lp_regression_test.sh | 2 +- regression/mip_config.json | 29 + regression/report.sh | 67 -- regression/run_regression.sh | 7 +- regression/update_asv_database.py | 11 +- 13 files changed, 999 insertions(+), 658 deletions(-) delete mode 100644 regression/benchmark_scripts/transform.py create mode 100644 regression/get_datasets.py delete mode 100644 regression/get_datasets.sh delete mode 100755 regression/gsheet-report.py create mode 100644 regression/lp_config.json create mode 100644 regression/mip_config.json delete mode 100755 regression/report.sh diff --git a/regression/benchmark_scripts/benchmark.py b/regression/benchmark_scripts/benchmark.py index 411a33900..9cc8c75fa 100644 --- a/regression/benchmark_scripts/benchmark.py +++ b/regression/benchmark_scripts/benchmark.py @@ -134,7 +134,7 @@ def run_benchmark( if d_type=="lp" or d_type=="mip": metrics = LPMetrics()._asdict() solver_settings.set_parameter("infeasibility_detection", False) - solver_settings.set_parameter("time_limit", 300) + solver_settings.set_parameter("time_limit", 60) solution = linear_programming.Solve(data_model, solver_settings) else: metrics = RoutingMetrics()._asdict() diff --git a/regression/benchmark_scripts/transform.py b/regression/benchmark_scripts/transform.py deleted file mode 100644 index 29384e5e2..000000000 --- a/regression/benchmark_scripts/transform.py +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/python - -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-NvidiaProprietary -# -# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual -# property and proprietary rights in and to this material, related -# documentation and any modifications thereto. Any use, reproduction, -# disclosure or distribution of this material and related documentation -# without an express license agreement from NVIDIA CORPORATION or -# its affiliates is strictly prohibited. - -import argparse -from pathlib import Path -import json -import cuopt_mps_parser - -def _mps_parse(LP_problem_data, tolerances, time_limit, iteration_limit): - - if isinstance(LP_problem_data, cuopt_mps_parser.parser_wrapper.DataModel): - model = LP_problem_data - else: - model = cuopt_mps_parser.ParseMps(LP_problem_data) - - problem_data = cuopt_mps_parser.toDict(model, json=True) - #variable_names = problem_data.pop("variable_names") - - problem_data["solver_config"] = {} - if tolerances is not None: - problem_data["solver_config"]["tolerances"] = tolerances - if time_limit is not None: - problem_data["solver_config"]["time_limit"] = time_limit - if iteration_limit is not None: - problem_data["solver_config"]["iteration_limit"] = iteration_limit - return problem_data - - -def create_config_and_data(input_directory, file_name, output_directory, prefix, time_limit=None, tolerances=None, iteration_limit=None): - - file_path = input_directory/file_name - data = _mps_parse(file_path.as_posix(), tolerances, time_limit, iteration_limit) - - base_file_name = file_name.split(".")[0] - - config_file_name = prefix +"_" +base_file_name+"_config.json" - data_file_name = prefix +"_" +base_file_name+"_data.json" - - config_data = { - "test_name": prefix +"_" +base_file_name, - "file_name": data_file_name, - "metrics": { - "primal_objective_value": { - "threshold": 1, - "unit": "primal_objective_value", - }, - "solver_time": { - "threshold": 1, - "unit": "seconds" - } - }, - "details": base_file_name + " test" - } - - with open(output_directory/config_file_name, "w") as fp: - json.dump(config_data, fp, indent=4, sort_keys=True) - - with open(output_directory/data_file_name, "w") as fp: - json.dump(data, fp, indent=4, sort_keys=True) - - -if __name__ == "__main__": - - parser = argparse.ArgumentParser( - description="Solve a cuOpt problem using a managed service client." - ) - parser.add_argument( - "folder", - type=str, - help="Folder path" - ) - parser.add_argument( - "-o", - "--output", - type=str, - default="", - help="Output folder path" - ) - parser.add_argument( - "-tl", - "--time-limit", - default=None, - type=int, - help="LP timit in milliseconds" - ) - parser.add_argument( - "-p", - "--prefix", - type=str, - default="", - help="Prefix for config and data" - ) - - args = parser.parse_args() - input_directory = Path(args.folder) - output_directory = Path(args.output) - - # List all files with .mps extension - mps_files = [f.name for f in input_directory.glob('*.mps')] - list_of_files = [ - "50v-10", "lotsize", "swath1", "nursesched-medium-hint03", "academictimetablesmall", "dano3_3", - "neos-4338804-snowy", "istanbul-no-cutoff", "s100", "traininstance2" - ] - for mps_file in mps_files: - if mps_file.split(".")[0] in list_of_files: - create_config_and_data(input_directory, mps_file, output_directory, args.prefix, args.time_limit) diff --git a/regression/config.sh b/regression/config.sh index d7f2ab4b3..cb0e0be61 100644 --- a/regression/config.sh +++ b/regression/config.sh @@ -22,8 +22,6 @@ IMAGE="nvidia/cuopt:25.10.0a-cuda12.9-py3.12" ALL_CONFIGS_PATH=$SCRATCH_DIR/configs/ ROUTING_CONFIGS_PATH=$SCRATCH_DIR/routing_configs/ -LP_CONFIGS_PATH=$SCRATCH_DIR/lp_configs/ -MIP_CONFIGS_PATH=$SCRATCH_DIR/mip_configs/ ROUTING_DATASETS_PATH=$SCRATCH_DIR/routing_datasets/ LP_DATASETS_PATH=$SCRATCH_DIR/lp_datasets/ MIP_DATASETS_PATH=$SCRATCH_DIR/mip_datasets/ @@ -38,10 +36,10 @@ RESULT_DIR_NAME=cuopt-regression SSH_CREDS=/home/iroy/.ssh/ # Assume CUOPT_SLACK_APP_ID is defined! -CUOPT_SLACK_APP_ID="XYZ" +CUOPT_SLACK_APP_ID="MY_SLACK_APP_ID" WEBHOOK_URL=${WEBHOOK_URL:-https://hooks.slack.com/services/${CUOPT_SLACK_APP_ID}} -S3_FILE_PREFIX=s3://reopt-testing-public/regression_tests -S3_URL_PREFIX=https://reopt-testing-public.s3.amazonaws.com/regression_tests +S3_FILE_PREFIX="MY_S3_FILE_PREFIX" +S3_URL_PREFIX="MY_S3_URL_PREFIX" # Most are defined using the bash := or :- syntax, which means they # will be set only if they were previously unset. The project config diff --git a/regression/cronjob.sh b/regression/cronjob.sh index affc44b11..dbaf9b36f 100755 --- a/regression/cronjob.sh +++ b/regression/cronjob.sh @@ -103,7 +103,7 @@ if [[ $TESTING_FAILED == 0 ]]; then --time=4:00:00 \ --export=ALL \ --exclusive -K\ - --container-mounts ${LP_CONFIGS_PATH}:${LP_CONFIGS_PATH},${CUOPT_SCRIPTS_DIR}:${CUOPT_SCRIPTS_DIR},${OUTPUT_DIR}:${OUTPUT_DIR} \ + --container-mounts ${LP_DATASETS_PATH}:${LP_DATASETS_PATH},${CUOPT_SCRIPTS_DIR}:${CUOPT_SCRIPTS_DIR},${OUTPUT_DIR}:${OUTPUT_DIR} \ --container-image=$IMAGE \ --output=${BENCHMARK_RESULTS_DIR}/benchmark_lp_log.txt \ bash ${CUOPT_SCRIPTS_DIR}/lp_regression_test.sh & @@ -124,8 +124,7 @@ if [[ $TESTING_FAILED == 0 ]]; then bash ${CUOPT_SCRIPTS_DIR}/mip_regression_test.sh & PID_3=$! - wait $PID_3 - wait $PID_0 $PID_1 $PID_2 $PID_3 + wait $PID_1 $PID_2 $PID_3 fi else # if [[ $TESTING_FAILED == 0 ]] @@ -154,7 +153,7 @@ fi #fi # Copy all config files to one folder -cp $ROUTING_CONFIGS_PATH/*config.json $LP_CONFIGS_PATH/*config.json $MIP_DATASETS_PATH/*config.json $ALL_CONFIGS_PATH/ +cp $ROUTING_CONFIGS_PATH/*config.json $PROJECT_DIR/*config.json $ALL_CONFIGS_PATH/ RUN_ASV_OPTION="" if hasArg --skip-asv; then diff --git a/regression/get_datasets.py b/regression/get_datasets.py new file mode 100644 index 000000000..f6136d695 --- /dev/null +++ b/regression/get_datasets.py @@ -0,0 +1,943 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # noqa +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import urllib.request +import urllib.parse +import ssl +import subprocess + + +# From: https://plato.asu.edu/bench.html +# Folder containg instances: +# - https://miplib2010.zib.de/miplib2010.php +# - https://www.netlib.org/lp/data/ +# - https://old.sztaki.hu/~meszaros/public_ftp/lptestset/ (and it's subfolders) +# - https://plato.asu.edu/ftp/lptestset/ (and it's subfolders) +# - https://miplib.zib.de/tag_benchmark.html +# - https://miplib.zib.de/tag_collection.html + +LPFeasibleMittelmannSet = [ + "L1_sixm250obs", + "Linf_520c", + "a2864", + "bdry2", + "cont1", + "cont11", + "datt256_lp", + "dlr1", + "ex10", + "fhnw-binschedule1", + "fome13", + "graph40-40", + "irish-electricity", + "neos", + "neos3", + "neos-3025225", + "neos-5052403-cygnet", + "neos-5251015", + "ns1687037", + "ns1688926", + "nug08-3rd", + "pds-100", + "physiciansched3-3", + "qap15", + "rail02", + "rail4284", + "rmine15", + "s82", + "s100", + "s250r10", + "savsched1", + "scpm1", + "shs1023", + "square41", + "stat96v2", + "stormG2_1000", + "stp3d", + "supportcase10", + "tpl-tub-ws1617", + "woodlands09", + "Dual2_5000", + "Primal2_1000", + "thk_48", + "thk_63", + "L1_sixm1000obs", + "L2CTA3D", + "degme", + "dlr2", + "set-cover-model" +] + +MiplibInstances = [ +"30n20b8.mps", +"cryptanalysiskb128n5obj14.mps", +"graph20-20-1rand.mps", +"n2seq36q.mps", +"neos-4338804-snowy.mps", +"neos-957323.mps", +"rail01.mps", +"splice1k1.mps", +"50v-10.mps", +"cryptanalysiskb128n5obj16.mps", +"graphdraw-domain.mps", +"n3div36.mps", +"neos-4387871-tavua.mps", +"neos-960392.mps", +"rail02.mps", +"square41.mps", +"academictimetablesmall.mps", +"csched007.mps", +"h80x6320d.mps", +"n5-3.mps", +"neos-4413714-turia.mps", +"net12.mps", +"rail507.mps", +"square47.mps", +"air05.mps", +"csched008.mps", +"highschool1-aigio.mps", +"neos-1122047.mps", +"neos-4532248-waihi.mps", +"netdiversion.mps", +"ran14x18-disj-8.mps", +"supportcase10.mps", +"app1-1.mps", +"cvs16r128-89.mps", +"hypothyroid-k1.mps", +"neos-1171448.mps", +"neos-4647030-tutaki.mps", +"nexp-150-20-8-5.mps", +"rd-rplusc-21.mps", +"supportcase12.mps", +"app1-2.mps", +"dano3_3.mps", +"ic97_potential.mps", +"neos-1171737.mps", +"neos-4722843-widden.mps", +"ns1116954.mps", +"reblock115.mps", +"supportcase18.mps", +"assign1-5-8.mps", +"dano3_5.mps", +"icir97_tension.mps", +"neos-1354092.mps", +"neos-4738912-atrato.mps", +"ns1208400.mps", +"rmatr100-p10.mps", +"supportcase19.mps", +"atlanta-ip.mps", +"decomp2.mps", +"irish-electricity.mps", +"neos-1445765.mps", +"neos-4763324-toguru.mps", +"ns1644855.mps", +"rmatr200-p5.mps", +"supportcase22.mps", +"b1c1s1.mps", +"drayage-100-23.mps", +"irp.mps", +"neos-1456979.mps", +"neos-4954672-berkel.mps", +"ns1760995.mps", +"rocI-4-11.mps", +"supportcase26.mps", +"bab2.mps", +"drayage-25-23.mps", +"istanbul-no-cutoff.mps", +"neos-1582420.mps", +"neos-5049753-cuanza.mps", +"ns1830653.mps", +"rocII-5-11.mps", +"supportcase33.mps", +"bab6.mps", +"dws008-01.mps", +"k1mushroom.mps", +"neos17.mps", +"neos-5052403-cygnet.mps", +"ns1952667.mps", +"rococoB10-011000.mps", +"supportcase40.mps", +"beasleyC3.mps", +"eil33-2.mps", +"lectsched-5-obj.mps", +"neos-2075418-temuka.mps", +"neos-5093327-huahum.mps", +"nu25-pr12.mps", +"rococoC10-001000.mps", +"supportcase42.mps", +"binkar10_1.mps", +"eilA101-2.mps", +"leo1.mps", +"neos-2657525-crna.mps", +"neos-5104907-jarama.mps", +"neos-5104907-jarama.mps", +"nursesched-medium-hint03.mps", +"roi2alpha3n4.mps", +"supportcase6.mps", +"blp-ar98.mps", +"enlight_hard.mps", +"leo2.mps", +"neos-2746589-doon.mps", +"neos-5107597-kakapo.mps", +"nursesched-sprint02.mps", +"roi5alpha10n8.mps", +"supportcase7.mps", +"blp-ic98.mps", +"ex10.mps", +"lotsize.mps", +"neos-2978193-inde.mps", +"neos-5114902-kasavu.mps", +"nw04.mps", +"roll3000.mps", +"swath1.mps", +"bnatt400.mps", +"ex9.mps", +"mad.mps", +"neos-2987310-joes.mps", +"neos-5188808-nattai.mps", +"opm2-z10-s4.mps", +"s100.mps", +"swath3.mps", +"bnatt500.mps", +"exp-1-500-5-5.mps", +"map10.mps", +"neos-3004026-krka.mps", +"neos-5195221-niemur.mps", +"p200x1188c.mps", +"s250r10.mps", +"tbfp-network.mps", +"bppc4-08.mps", +"fast0507.mps", +"map16715-04.mps", +"neos-3024952-loue.mps", +"neos5.mps", +"peg-solitaire-a3.mps", +"satellites2-40.mps", +"thor50dday.mps", +"brazil3.mps", +"fastxgemm-n2r6s0t2.mps", +"markshare2.mps", +"neos-3046615-murg.mps", +"neos-631710.mps", +"pg5_34.mps", +"satellites2-60-fs.mps", +"timtab1.mps", +"buildingenergy.mps", +"fhnw-binpack4-48.mps", +"markshare_4_0.mps", +"neos-3083819-nubu.mps", +"neos-662469.mps", +"pg.mps", +"savsched1.mps", +"tr12-30.mps", +"cbs-cta.mps", +"fhnw-binpack4-4.mps", +"mas74.mps", +"neos-3216931-puriri.mps", +"neos-787933.mps", +"physiciansched3-3.mps", +"sct2.mps", +"traininstance2.mps", +"chromaticindex1024-7.mps", +"fiball.mps", +"mas76.mps", +"neos-3381206-awhea.mps", +"neos-827175.mps", +"physiciansched6-2.mps", +"seymour1.mps", +"traininstance6.mps", +"chromaticindex512-7.mps", +"gen-ip002.mps", +"mc11.mps", +"neos-3402294-bobin.mps", +"neos-848589.mps", +"piperout-08.mps", +"seymour.mps", +"trento1.mps", +"cmflsp50-24-8-8.mps", +"gen-ip054.mps", +"mcsched.mps", +"neos-3402454-bohle.mps", +"neos859080.mps", +"piperout-27.mps", +"sing326.mps", +"triptim1.mps", +"CMS750_4.mps", +"germanrr.mps", +"mik-250-20-75-4.mps", +"neos-3555904-turama.mps", +"neos-860300.mps", +"pk1.mps", +"sing44.mps", +"uccase12.mps", +"co-100.mps", +"gfd-schedulen180f7d50m30k18.mps", +"milo-v12-6-r2-40-1.mps", +"neos-3627168-kasai.mps", +"neos-873061.mps", +"proteindesign121hz512p9.mps", +"snp-02-004-104.mps", +"uccase9.mps", +"cod105.mps", +"glass4.mps", +"momentum1.mps", +"neos-3656078-kumeu.mps", +"neos8.mps", +"proteindesign122trx11p8.mps", +"sorrell3.mps", +"uct-subprob.mps", +"comp07-2idx.mps", +"glass-sc.mps", +"mushroom-best.mps", +"neos-3754480-nidda.mps", +"neos-911970.mps", +"qap10.mps", +"sp150x300d.mps", +"unitcal_7.mps", +"comp21-2idx.mps", +"gmu-35-40.mps", +"mzzv11.mps", +"neos-3988577-wolgan.mps", +"neos-933966.mps", +"radiationm18-12-05.mps", +"sp97ar.mps", +"var-smallemery-m6j6.mps", +"cost266-UUE.mps", +"gmu-35-50.mps", +"mzzv42z.mps", +"neos-4300652-rahue.mps", +"neos-950242.mps", +"radiationm40-10-02.mps", +"sp98ar.mps", +"wachplan.mps", +] + +MittelmannInstances = { + "emps": "https://old.sztaki.hu/~meszaros/public_ftp/lptestset/emps.c", + "problems" : { + "irish-electricity" : [ + "https://plato.asu.edu/ftp/lptestset/irish-electricity.mps.bz2", + "mps" + ], + "physiciansched3-3" : [ + "https://plato.asu.edu/ftp/lptestset/physiciansched3-3.mps.bz2", + "mps" + ], + "16_n14" : [ + "https://plato.asu.edu/ftp/lptestset/network/16_n14.mps.bz2", + "mps" + ], + "Dual2_5000" : [ + "https://plato.asu.edu/ftp/lptestset/Dual2_5000.mps.bz2", + "mps" + ], + "L1_six1000" : [ + "https://plato.asu.edu/ftp/lptestset/L1_sixm1000obs.bz2", + "netlib" + ], + "L1_sixm" : ["", "mps"], + "L1_sixm1000obs" : [ + "https://plato.asu.edu/ftp/lptestset/L1_sixm1000obs.bz2", + "netlib" + ], + "L1_sixm250" : ["", "netlib"], + "L1_sixm250obs" : [ + "https://plato.asu.edu/ftp/lptestset/L1_sixm250obs.bz2", + "netlib" + ], + "L2CTA3D" : [ + "https://plato.asu.edu/ftp/lptestset/L2CTA3D.mps.bz2", + "mps" + ], + "Linf_520c" : [ + "https://plato.asu.edu/ftp/lptestset/Linf_520c.bz2", + "netlib" + ], + "Primal2_1000" : [ + "https://plato.asu.edu/ftp/lptestset/Primal2_1000.mps.bz2", + "mps" + ], + "a2864" : [ + "https://plato.asu.edu/ftp/lptestset/a2864.mps.bz2", + "mps" + ], + "bdry2" : [ + "https://plato.asu.edu/ftp/lptestset/bdry2.bz2", + "netlib" + ], + "braun" : ["", "mps"], + "cont1" : [ + "https://plato.asu.edu/ftp/lptestset/misc/cont1.bz2", + "netlib" + ], + "cont11" : [ + "https://plato.asu.edu/ftp/lptestset/misc/cont11.bz2", + "netlib" + ], + "datt256" : [ + "https://plato.asu.edu/ftp/lptestset/datt256_lp.mps.bz2", + "mps" + ], + "datt256_lp" : [ + "https://plato.asu.edu/ftp/lptestset/datt256_lp.mps.bz2", + "mps" + ], + "degme" : [ + "https://old.sztaki.hu/~meszaros/public_ftp/lptestset/New/degme.gz", + "netlib" + ], + "dlr1" : [ + "https://plato.asu.edu/ftp/lptestset/dlr1.mps.bz2", + "mps" + ], + "dlr2" : [ + "https://plato.asu.edu/ftp/lptestset/dlr2.mps.bz2", + "mps" + ], + "energy1" : ["", "mps"], # Kept secret by Mittlemman + "energy2" : ["", "mps"], + "ex10" : [ + "https://plato.asu.edu/ftp/lptestset/ex10.mps.bz2", + "mps" + ], + "fhnw-binschedule1" : [ + "https://plato.asu.edu/ftp/lptestset/fhnw-binschedule1.mps.bz2", + "mps" + ], + "fome13" : [ + "https://plato.asu.edu/ftp/lptestset/fome/fome13.bz2", + "netlib" + ], + "gamora" : ["", "mps"], # Kept secret by Mittlemman + "goto14_256_1" : ["", "mps"], + "goto14_256_2" : ["", "mps"], + "goto14_256_3" : ["", "mps"], + "goto14_256_4" : ["", "mps"], + "goto14_256_5" : ["", "mps"], + "goto16_64_1" : ["", "mps"], + "goto16_64_2" : ["", "mps"], + "goto16_64_3" : ["", "mps"], + "goto16_64_4" : ["", "mps"], + "goto16_64_5" : ["", "mps"], + "goto32_512_1" : ["", "mps"], + "goto32_512_2" : ["", "mps"], + "goto32_512_3" : ["", "mps"], + "goto32_512_4" : ["", "mps"], + "goto32_512_5" : ["", "mps"], + "graph40-40" : [ + "https://plato.asu.edu/ftp/lptestset/graph40-40.mps.bz2", + "mps" + ], + "graph40-40_lp" : [ + "https://plato.asu.edu/ftp/lptestset/graph40-40.mps.bz2", + "mps" + ], + "groot" : ["", "mps"], # Kept secret by Mittlemman + "heimdall" : ["", "mps"], # Kept secret by Mittlemman + "hulk" : ["", "mps"], # Kept secret by Mittlemman + "i_n13" : [ + "https://plato.asu.edu/ftp/lptestset/network/i_n13.mps.bz2", + "mps" + ], + "irish-e" : ["", "mps"], + "karted" : [ + "https://old.sztaki.hu/~meszaros/public_ftp/lptestset/New/karted.gz", + "netlib" + ], + "lo10" : [ + "https://plato.asu.edu/ftp/lptestset/network/lo10.mps.bz2", + "mps" + ], + "loki" : ["", "mps"], # Kept secret by Mittlemman + "long15" : [ + "https://plato.asu.edu/ftp/lptestset/network/long15.mps.bz2", + "mps" + ], + "nebula" : ["", "mps"], # Kept secret by Mittlemman + "neos" : [ + "https://plato.asu.edu/ftp/lptestset/misc/neos.bz2", + "netlib" + ], + "neos-3025225" : [ + "https://plato.asu.edu/ftp/lptestset/neos-3025225.mps.bz2", + "mps" + ], + "neos-3025225_lp" : [ + "https://plato.asu.edu/ftp/lptestset/neos-3025225.mps.bz2", + "mps" + ], + "neos-5251015" : [ + "https://plato.asu.edu/ftp/lptestset/neos-5251015.mps.bz2", + "mps" + ], + "neos-5251015_lp" : [ + "https://plato.asu.edu/ftp/lptestset/neos-5251015.mps.bz2", + "mps" + ], + "neos3" : [ + "https://plato.asu.edu/ftp/lptestset/misc/neos3.bz2", + "netlib" + ], + "neos-5052403-cygnet" : [ + "https://plato.asu.edu/ftp/lptestset/neos-5052403-cygnet.mps.bz2", + "mps" + ], + "neos5251015_lp" : [ + "https://plato.asu.edu/ftp/lptestset/neos-5251015.mps.bz2", + "mps" + ], + "neos5251915" : [ + "https://plato.asu.edu/ftp/lptestset/neos-5251015.mps.bz2", + "mps" + ], + "netlarge1" : [ + "https://plato.asu.edu/ftp/lptestset/network/netlarge1.mps.bz2", + "mps" + ], + "netlarge2" : [ + "https://plato.asu.edu/ftp/lptestset/network/netlarge2.mps.bz2", + "mps" + ], + "netlarge3" : [ + "https://plato.asu.edu/ftp/lptestset/network/netlarge3.mps.bz2", + "mps" + ], + "netlarge6" : [ + "https://plato.asu.edu/ftp/lptestset/network/netlarge6.mps.bz2", + "mps" + ], + "ns1687037" : [ + "https://plato.asu.edu/ftp/lptestset/misc/ns1687037.bz2", + "netlib" + ], + "ns1688926" : [ + "https://plato.asu.edu/ftp/lptestset/misc/ns1688926.bz2", + "netlib" + ], + "nug08-3rd" : [ + "https://plato.asu.edu/ftp/lptestset/nug/nug08-3rd.bz2", + "netlib" + ], + "pds-100" : [ + "https://plato.asu.edu/ftp/lptestset/pds/pds-100.bz2", + "netlib" + ], + "psched3-3" : ["", "mps"], + "qap15" : [ + "https://plato.asu.edu/ftp/lptestset/qap15.mps.bz2", + "mps" + ], + "rail02" : [ + "https://miplib2010.zib.de/download/rail02.mps.gz", + "mps" + ], + "rail4284" : [ + "https://plato.asu.edu/ftp/lptestset/rail/rail4284.bz2", + "netlib" + ], + "rmine15" : [ + "https://plato.asu.edu/ftp/lptestset/rmine15.mps.bz2", + "mps" + ], + "s100" : [ + "https://plato.asu.edu/ftp/lptestset/s100.mps.bz2", + "mps" + ], + "s250r10" : [ + "https://plato.asu.edu/ftp/lptestset/s250r10.mps.bz2", + "mps" + ], + "s82" : [ + "https://plato.asu.edu/ftp/lptestset/s82.mps.bz2", + "mps" + ], + "savsched1" : [ + "https://plato.asu.edu/ftp/lptestset/savsched1.mps.bz2", + "mps" + ], + "scpm1" : [ + "https://plato.asu.edu/ftp/lptestset/scpm1.mps.bz2", + "mps" + ], + "set-cover-model" : [ + "https://plato.asu.edu/ftp/lptestset/set-cover-model.mps.bz2", + "mps" + ], + "shs1023" : [ + "https://miplib2010.zib.de/download/shs1023.mps.gz", + "mps" + ], + "square15" : [ + "https://plato.asu.edu/ftp/lptestset/network/square15.mps.bz2", + "mps" + ], + "square41" : [ + "https://plato.asu.edu/ftp/lptestset/square41.mps.bz2", + "mps" + ], + "stat96v2" : [ + "https://old.sztaki.hu/~meszaros/public_ftp/lptestset/misc/stat96v2.gz", + "netlib" + ], + "stormG2_1000" : [ + "https://plato.asu.edu/ftp/lptestset/misc/stormG2_1000.bz2", + "netlib" + ], + "storm_1000" : ["", "mps"], + "stp3d" : [ + "https://miplib.zib.de/WebData/instances/stp3d.mps.gz", + "mps" + ], + "supportcase10" : [ + "https://plato.asu.edu/ftp/lptestset/supportcase10.mps.bz2", + "mps" + ], + "support19" : [ + "https://plato.asu.edu/ftp/lptestset/supportcase19.mps.bz2", + "mps" + ], + "supportcase19" : [ + "https://plato.asu.edu/ftp/lptestset/supportcase19.mps.bz2", + "mps" + ], + "test03" : ["", "mps"], # Kept secret by Mittlemman + "test13" : ["", "mps"], # Kept secret by Mittlemman + "test23" : ["", "mps"], # Kept secret by Mittlemman + "test33" : ["", "mps"], # Kept secret by Mittlemman + "test43" : ["", "mps"], # Kept secret by Mittlemman + "test53" : ["", "mps"], # Kept secret by Mittlemman + "test63" : ["", "mps"], # Kept secret by Mittlemman + "test83" : ["", "mps"], # Kept secret by Mittlemman + "test93" : ["", "mps"], # Kept secret by Mittlemman + "mars" : ["", "mps"], # Kept secret by Mittlemman + "thk_48" : ["https://plato.asu.edu/ftp/lptestset/thk_48.mps.bz2", "mps"], + "thk_63" : [ + "https://plato.asu.edu/ftp/lptestset/thk_63.mps.bz2", + "mps" + ], + "thor" : ["", "mps"], # Kept secret by Mittlemman + "tpl-tub-ws" : ["", "mps"], + "tpl-tub-ws1617" : [ + "https://plato.asu.edu/ftp/lptestset/tpl-tub-ws1617.mps.bz2", + "mps" + ], + "wide15" : [ + "https://plato.asu.edu/ftp/lptestset/network/wide15.mps.bz2", + "mps" + ], + "woodlands09" : [ + "https://plato.asu.edu/ftp/lptestset/woodlands09.mps.bz2", + "mps" + ] + }, + "benchmarks" : { + "simplex" : [ + "L1_sixm", + "L1_sixm250obs", + "Linf_520c", + "a2864", + "bdry2", + "braun", + "cont1", + "cont11", + "datt256", + "dlr1", + "energy1", + "energy2", + "ex10", + "fhnw-binschedule1", + "fome13", + "gamora", + "graph40-40", + "groot", + "heimdall", + "hulk", + "irish-e", + "loki", + "nebula", + "neos", + "neos-3025225_lp", + "neos-5251015_lp", + "neos3", + "neos3025225", + "neos5052403", + "neos5251015_lp", + "ns1687037", + "ns1688926", + "nug08-3rd", + "pds-100", + "psched3-3", + "qap15", + "rail02", + "rail4284", + "rmine15", + "s100", + "s250r10", + "s82", + "savsched1", + "scpm1", + "shs1023", + "square41", + "stat96v2", + "stormG2_1000", + "storm_1000", + "stp3d", + "support10", + "test03", + "test13", + "test23", + "test33", + "test43", + "test53", + "thor", + "tpl-tub-ws", + "tpl-tub-ws16", + "woodlands09" + ], + "barrier" : [ + "Dual2_5000", + "L1_six1000", + "L1_sixm1000obs", + "L1_sixm250", + "L1_sixm250obs", + "L2CTA3D", + "Linf_520c", + "Primal2_1000", + "a2864", + "bdry2", + "cont1", + "cont11", + "datt256", + "degme", + "dlr1", + "dlr2", + "ex10", + "fhnw-binschedule1", + "fome13", + "graph40-40", + "irish-e", + "karted", + "neos", + "neos-3025225_lp", + "neos-5251015_lp", + "neos3", + "neos3025225", + "neos5052403", + "neos5251915", + "ns1687037", + "ns1688926", + "nug08-3rd", + "pds-100", + "psched3-3", + "qap15", + "rail02", + "rail4284", + "rmine15", + "s100", + "s250r10", + "s82", + "savsched1", + "scpm1", + "set-cover-model", + "shs1023", + "square41", + "stat96v2", + "stormG2_1000", + "storm_1000", + "stp3d", + "support10", + "support19", + "supportcase19", + "thk_63", + "tpl-tub-ws", + "tpl-tub-ws16", + "woodlands09" + ], + "large" : [ + "16_n14", + "goto14_256_1", + "goto14_256_2", + "goto14_256_3", + "goto14_256_4", + "goto14_256_5", + "goto16_64_1", + "goto16_64_2", + "goto16_64_3", + "goto16_64_4", + "goto16_64_5", + "goto32_512_1", + "goto32_512_2", + "goto32_512_3", + "goto32_512_4", + "goto32_512_5", + "i_n13", + "lo10", + "long15", + "netlarge1", + "netlarge2", + "netlarge3", + "netlarge6", + "square15", + "wide15" + ], + # <=100s in bench: https://plato.asu.edu/ftp/lpbar.html + "L0" : [ + "ex10", + "datt256", + "graph40-40", + "neos5251915", + "nug08-3rd", + "qap15", + "savsched1", + "scpm1", + "a2864", + "support10", + "rmine15", + "fome13", + "L2CTA3D", + "neos5052403", + "karted", + "stp3d", + "woodlands09", + "rail4284", + "L1_sixm250", + "tpl-tub-ws" + ], + #>100 <1000 + "L1" : [ + "s250r10", + "pds-100", + "set-cover-model", + "neos3025225", + "rail02", + "square41", + "degme", + "Linf_520c", + "cont1", + "neos", + "stat96v2", + "support19", + "shs1023", + "storm_1000" + ], + # >1000 + "L2" : [ + "thk_63", + "Primal2_1000", + "L1_six1000", + "Dual2_5000", + "s100", + "fhnw-binschedule1", + "cont11", + "psched3-3" + ], + #t -> >15000 + "L3" : [ + "dlr2", + "bdry2", + "dlr1", + "irish-e", + "ns1687037", + "ns1688926", + "s82" + ], + } +} + +def download(url, dst): + print("HERE") + if os.path.exists(dst): + return + print(f"Downloading {url} into {dst}...") + # Bypass SSL verification for plato.asu.edu URLs + if "plato.asu.edu" in url: + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + response = urllib.request.urlopen(url, context=context) + else: + response = urllib.request.urlopen(url) + data = response.read() + with open(dst, "wb") as fp: + fp.write(data) + +def extract(file, dir, type): + basefile = os.path.basename(file) + outfile = "" + unzippedfile = "" + if basefile.endswith(".bz2"): + outfile = basefile.replace(".bz2", ".mps") + unzippedfile = basefile.replace(".bz2", "") + subprocess.run(f"cd {dir} && bzip2 -d {basefile}", shell=True) + elif basefile.endswith(".gz"): + outfile = basefile.replace(".gz", ".mps") + unzippedfile = basefile.replace(".gz", "") + subprocess.run(f"cd {dir} && gunzip -c {basefile} > {unzippedfile}", + shell=True) + subprocess.run(f"cd {dir} && rm -rf {basefile}", + shell=True) + else: + raise Exception(f"Unknown file extension found for extraction {file}") + # download emps and compile + # Disable emps for now + if type == "netlib": + url = MittelmannInstances["emps"] + file = os.path.join(dir, "emps.c") + download(url, file) + subprocess.run(f"cd {dir} && gcc -Wno-implicit-int emps.c -o emps", + shell=True) + # determine output file and run emps + subprocess.run(f"cd {dir} && ./emps {unzippedfile} > {outfile}", + shell=True) + subprocess.run(f"cd {dir} && rm -rf {unzippedfile}", + shell=True) + # cleanup emps and emps.c + subprocess.run(f"rm -rf {dir}/emps*", + shell=True) + +def download_lp_dataset(name, dir): + if name not in MittelmannInstances["problems"]: + raise Exception(f"Unknown dataset {name} passed") + if os.path.exists(dir): + if os.path.exists(os.path.join(dir, f"{name}.mps")): + print(f"Dir for dataset {name} exists and contains {name}.mps. Skipping...") + return + url, type = MittelmannInstances["problems"][name] + if url == "": + print(f"Dataset {name} doesn't have a URL. Skipping...") + return + file = os.path.join(dir, os.path.basename(url)) + download(url, file) + extract(file, dir, type) + + +def download_mip_dataset(name, dir): + base_url = "https://miplib.zib.de/WebData/instances" + url = f"{base_url}/{name}.gz" + outfile = f"{dir}/{name}.gz" + download(url, outfile) + extract(outfile, dir, "") + +datasets_path = sys.argv[1] +dataset_type = sys.argv[2] + +if dataset_type == "lp": + for name in LPFeasibleMittelmannSet: + download_lp_dataset(name, datasets_path) +elif dataset_type == "mip": + for name in MiplibInstances: + download_mip_dataset(name, datasets_path) diff --git a/regression/get_datasets.sh b/regression/get_datasets.sh deleted file mode 100644 index 5359cda8f..000000000 --- a/regression/get_datasets.sh +++ /dev/null @@ -1,275 +0,0 @@ -#!/bin/bash -# SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-NvidiaProprietary -# -# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual -# property and proprietary rights in and to this material, related -# documentation and any modifications thereto. Any use, reproduction, -# disclosure or distribution of this material and related documentation -# without an express license agreement from NVIDIA CORPORATION or -# its affiliates is strictly prohibited. - -# Abort script on first error -set -e - -DELAY=30 - -# Must ensure PROJECT_DIR is exported first then load rapids-mg-tools env -export PROJECT_DIR=${PROJECT_DIR:-$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)} -source ${PROJECT_DIR}/config.sh - -INSTANCES=( -"30n20b8.mps" -"cryptanalysiskb128n5obj14.mps" -"graph20-20-1rand.mps" -"n2seq36q.mps" -"neos-4338804-snowy.mps" -"neos-957323.mps" -"rail01.mps" -"splice1k1.mps" -"50v-10.mps" -"cryptanalysiskb128n5obj16.mps" -"graphdraw-domain.mps" -"n3div36.mps" -"neos-4387871-tavua.mps" -"neos-960392.mps" -"rail02.mps" -"square41.mps" -"academictimetablesmall.mps" -"csched007.mps" -"h80x6320d.mps" -"n5-3.mps" -"neos-4413714-turia.mps" -"net12.mps" -"rail507.mps" -"square47.mps" -"air05.mps" -"csched008.mps" -"highschool1-aigio.mps" -"neos-1122047.mps" -"neos-4532248-waihi.mps" -"netdiversion.mps" -"ran14x18-disj-8.mps" -"supportcase10.mps" -"app1-1.mps" -"cvs16r128-89.mps" -"hypothyroid-k1.mps" -"neos-1171448.mps" -"neos-4647030-tutaki.mps" -"nexp-150-20-8-5.mps" -"rd-rplusc-21.mps" -"supportcase12.mps" -"app1-2.mps" -"dano3_3.mps" -"ic97_potential.mps" -"neos-1171737.mps" -"neos-4722843-widden.mps" -"ns1116954.mps" -"reblock115.mps" -"supportcase18.mps" -"assign1-5-8.mps" -"dano3_5.mps" -"icir97_tension.mps" -"neos-1354092.mps" -"neos-4738912-atrato.mps" -"ns1208400.mps" -"rmatr100-p10.mps" -"supportcase19.mps" -"atlanta-ip.mps" -"decomp2.mps" -"irish-electricity.mps" -"neos-1445765.mps" -"neos-4763324-toguru.mps" -"ns1644855.mps" -"rmatr200-p5.mps" -"supportcase22.mps" -"b1c1s1.mps" -"drayage-100-23.mps" -"irp.mps" -"neos-1456979.mps" -"neos-4954672-berkel.mps" -"ns1760995.mps" -"rocI-4-11.mps" -"supportcase26.mps" -"bab2.mps" -"drayage-25-23.mps" -"istanbul-no-cutoff.mps" -"neos-1582420.mps" -"neos-5049753-cuanza.mps" -"ns1830653.mps" -"rocII-5-11.mps" -"supportcase33.mps" -"bab6.mps" -"dws008-01.mps" -"k1mushroom.mps" -"neos17.mps" -"neos-5052403-cygnet.mps" -"ns1952667.mps" -"rococoB10-011000.mps" -"supportcase40.mps" -"beasleyC3.mps" -"eil33-2.mps" -"lectsched-5-obj.mps" -"neos-2075418-temuka.mps" -"neos-5093327-huahum.mps" -"nu25-pr12.mps" -"rococoC10-001000.mps" -"supportcase42.mps" -"binkar10_1.mps" -"eilA101-2.mps" -"leo1.mps" -"neos-2657525-crna.mps" -"neos-5104907-jarama.mps" -"nursesched-medium-hint03.mps" -"roi2alpha3n4.mps" -"supportcase6.mps" -"blp-ar98.mps" -"enlight_hard.mps" -"leo2.mps" -"neos-2746589-doon.mps" -"neos-5107597-kakapo.mps" -"nursesched-sprint02.mps" -"roi5alpha10n8.mps" -"supportcase7.mps" -"blp-ic98.mps" -"ex10.mps" -"lotsize.mps" -"neos-2978193-inde.mps" -"neos-5114902-kasavu.mps" -"nw04.mps" -"roll3000.mps" -"swath1.mps" -"bnatt400.mps" -"ex9.mps" -"mad.mps" -"neos-2987310-joes.mps" -"neos-5188808-nattai.mps" -"opm2-z10-s4.mps" -"s100.mps" -"swath3.mps" -"bnatt500.mps" -"exp-1-500-5-5.mps" -"map10.mps" -"neos-3004026-krka.mps" -"neos-5195221-niemur.mps" -"p200x1188c.mps" -"s250r10.mps" -"tbfp-network.mps" -"bppc4-08.mps" -"fast0507.mps" -"map16715-04.mps" -"neos-3024952-loue.mps" -"neos5.mps" -"peg-solitaire-a3.mps" -"satellites2-40.mps" -"thor50dday.mps" -"brazil3.mps" -"fastxgemm-n2r6s0t2.mps" -"markshare2.mps" -"neos-3046615-murg.mps" -"neos-631710.mps" -"pg5_34.mps" -"satellites2-60-fs.mps" -"timtab1.mps" -"buildingenergy.mps" -"fhnw-binpack4-48.mps" -"markshare_4_0.mps" -"neos-3083819-nubu.mps" -"neos-662469.mps" -"pg.mps" -"savsched1.mps" -"tr12-30.mps" -"cbs-cta.mps" -"fhnw-binpack4-4.mps" -"mas74.mps" -"neos-3216931-puriri.mps" -"neos-787933.mps" -"physiciansched3-3.mps" -"sct2.mps" -"traininstance2.mps" -"chromaticindex1024-7.mps" -"fiball.mps" -"mas76.mps" -"neos-3381206-awhea.mps" -"neos-827175.mps" -"physiciansched6-2.mps" -"seymour1.mps" -"traininstance6.mps" -"chromaticindex512-7.mps" -"gen-ip002.mps" -"mc11.mps" -"neos-3402294-bobin.mps" -"neos-848589.mps" -"piperout-08.mps" -"seymour.mps" -"trento1.mps" -"cmflsp50-24-8-8.mps" -"gen-ip054.mps" -"mcsched.mps" -"neos-3402454-bohle.mps" -"neos859080.mps" -"piperout-27.mps" -"sing326.mps" -"triptim1.mps" -"CMS750_4.mps" -"germanrr.mps" -"mik-250-20-75-4.mps" -"neos-3555904-turama.mps" -"neos-860300.mps" -"pk1.mps" -"sing44.mps" -"uccase12.mps" -"co-100.mps" -"gfd-schedulen180f7d50m30k18.mps" -"milo-v12-6-r2-40-1.mps" -"neos-3627168-kasai.mps" -"neos-873061.mps" -"proteindesign121hz512p9.mps" -"snp-02-004-104.mps" -"uccase9.mps" -"cod105.mps" -"glass4.mps" -"momentum1.mps" -"neos-3656078-kumeu.mps" -"neos8.mps" -"proteindesign122trx11p8.mps" -"sorrell3.mps" -"uct-subprob.mps" -"comp07-2idx.mps" -"glass-sc.mps" -"mushroom-best.mps" -"neos-3754480-nidda.mps" -"neos-911970.mps" -"qap10.mps" -"sp150x300d.mps" -"unitcal_7.mps" -"comp21-2idx.mps" -"gmu-35-40.mps" -"mzzv11.mps" -"neos-3988577-wolgan.mps" -"neos-933966.mps" -"radiationm18-12-05.mps" -"sp97ar.mps" -"var-smallemery-m6j6.mps" -"cost266-UUE.mps" -"gmu-35-50.mps" -"mzzv42z.mps" -"neos-4300652-rahue.mps" -"neos-950242.mps" -"radiationm40-10-02.mps" -"sp98ar.mps" -"wachplan.mps" -) - -BASE_URL="https://miplib.zib.de/WebData/instances" - -for INSTANCE in "${INSTANCES[@]}"; do - URL="${BASE_URL}/${INSTANCE}.gz" - OUTFILE="${MIP_DATASETS_PATH}/${INSTANCE}.gz" - - wget -4 --tries=3 --continue --progress=dot:mega --retry-connrefused "${URL}" -O "${OUTFILE}" || { - echo "Failed to download: ${URL}" - continue - } - gunzip -f "${OUTFILE}" -done diff --git a/regression/gsheet-report.py b/regression/gsheet-report.py deleted file mode 100755 index 63077f650..000000000 --- a/regression/gsheet-report.py +++ /dev/null @@ -1,178 +0,0 @@ -# -# Copyright (c) 2021, NVIDIA CORPORATION. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from pathlib import Path -import json -import gspread -from oauth2client.service_account import ServiceAccountCredentials -import time -import os -from datetime import datetime - -class Gsheet_Report: - def __init__(self, results_dir): - self.benchmark_dir_path = Path(results_dir)/"benchmarks" - self.benchmark_result_list = list(self.benchmark_dir_path.glob("benchmark_result*")) - # FIXME: This is a default list of the current MNMG algos benchmarkee, this is subject to change - self.map_algo_sheet = {'bfs':"BFS", "sssp":"SSSP", "louvain":"Louvain", "pagerank":"Pagerank", "wcc":"WCC", "katz":"Katz"} - self.algos = None - self.sheet_names = None - self.spreadsheet = None - self.gc = None - - def _setup_authentication(self): - # Setup authentication and open the spreasheet - # Before running cronjob, run a script setting the credential path - if os.environ.get("GOOGLE_SHEETS_CREDENTIALS_PATH", None): - credentials_path = os.environ["GOOGLE_SHEETS_CREDENTIALS_PATH"] - self.gc = gspread.service_account(filename=credentials_path) - else: - raise Exception("Invalid credentials path") - - def _import_sample_worksheet(self): - # import a sample benchmark result table and copy it to the new benchmark result spreadsheet - date_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S') - spreadsheet_name = f"MNMG-benchmark-results {date_time}" - self.spreadsheet = self.gc.create(spreadsheet_name) - - sample_spreadsheet_name = "sample" - - def from_sample_spreadsheet(gc, sample_spreadsheet_name, spreadsheet): - sample_spreadsheet = self.gc.open(sample_spreadsheet_name) - sample_worksheet = sample_spreadsheet.worksheet('sample') - sample_worksheet.copy_to(spreadsheet.id) - - from_sample_spreadsheet(self.gc, sample_spreadsheet_name, self.spreadsheet) - - # The new create spreadsheet has a default worksheet - # delete that worksheet - self.spreadsheet.del_worksheet(self.spreadsheet.get_worksheet(0)) - # Rename the only worksheet to sheet to sample - # This worksheet containing an empty result table will be copied to as - # many sheet as there algos in the benchmark result dir - self.spreadsheet.get_worksheet(0).update_title('sample') - - # Send the new create spreadsheet to my google drive - self.spreadsheet.share('jnke2016@gmail.com', perm_type='user', role='writer') - - def _extract_sheet_names(self): - # From the benchmark result dir, get the list of algo's name that were run - # Those will be used to as worksheet's name - - # if the benchmark result list is empty, no benchmark were run - if len(self.benchmark_result_list) == 0: - return - - algos = map(lambda x: str(x).split('.')[1].split('.')[0], list(self.benchmark_result_list)) - # remove duplicates - self.algos = list(set(algos)) - self.sheet_names = map(lambda x:self.map_algo_sheet[x], self.algos) - return True - - - def _create_worksheets(self, sheet_names=None): - # Create as many worksheet as there are algos in the benchmark results dir - if not isinstance(sheet_names, list) and sheet_names is not None: - sheet_names = [sheet_names] - - if sheet_names is not None: - valid_algos_benchmarked = set(self.algos) & set(sheet_names) - # Do not create the spreadsheet of an algo which wasn't benchmarked - if len(valid_algos_benchmarked) < len(sheet_names): - raise Exception(f"Invalid algo(s) specified: \n" - "The list of algos benchmarked are "f"{self.algos}") - - worksheet = self.spreadsheet.worksheet('sample') - # If no sheet names provided, create worksheets for all MNMG algos in - if sheet_names is None: - sheet_names = self.sheet_names - - for sheet_name in sheet_names: - if sheet_name in self.map_algo_sheet.keys(): - self.spreadsheet.duplicate_sheet(source_sheet_id=worksheet.id, new_sheet_name=self.map_algo_sheet[sheet_name]) - else: - self.spreadsheet.duplicate_sheet(source_sheet_id=worksheet.id, new_sheet_name=sheet_name) - - def _write_gsheet(self, algos=None): - # Write the results from the json to the corresponding cell in the worksheet - def extract_cell(spreadsheet, sheet_name, scale, ngpus): - worksheet = spreadsheet.worksheet(sheet_name) - # The row containing the number of GPUs is 'ngpus_row'+1 - ngpus_row = worksheet.find("Number of GPUs").row - # Find the number of GPUs cell in that row - ngpus_algo_col = worksheet.find(str(ngpus), in_row=ngpus_row+1).col - # Get the column containing the scale - scale_col = worksheet.find("Scale").col - # Find the scale's row within 'scale_col' - scale_algo_row = worksheet.find(str(scale), in_column=scale_col).row - return worksheet, scale_algo_row , ngpus_algo_col - - if algos is not None: - if not isinstance(algos, list): - algos = [algos] - # ensure the algos specified were benchmarked - valid_algos_benchmarked = set(self.algos) & set(algos) - if len(valid_algos_benchmarked) == 0: - raise Exception("Invalid algo(s) specified:\n" - f"{algos}" " not a subset of " f"{self.algos}") - benchmark_result_list=[] - # Get a list of the json files that will be scan to update the spreadsheet - for file_name in self.benchmark_result_list: - algo_file = str(file_name).split('.')[1].split('.')[0] - # Only create/update spreadsheet of the algos specified - if algo_file in algos: - benchmark_result_list.append(file_name) - self.benchmark_result_list = benchmark_result_list - for file_name in self.benchmark_result_list: - time.sleep(5) - with open(file_name, 'r') as openfile: - bResult_dic = json.load(openfile) - sheet_name = bResult_dic["funcName"].split('.')[1] - scale = bResult_dic["argNameValuePairs"][0][1] - ngpus = bResult_dic["argNameValuePairs"][1][1] - result = bResult_dic["result"] - worksheet, row, col = extract_cell(self.spreadsheet, self.map_algo_sheet[sheet_name], scale, ngpus) - worksheet.update_cell(row, col, result) - - # delete sample worksheet - self.spreadsheet.del_worksheet(self.spreadsheet.worksheet("sample")) - - def _get_spreadsheet_url(self): - url_prefix = "https://docs.google.com/spreadsheets/d/" - spreadsheet_url = f"{url_prefix}{self.spreadsheet.id}" - print("spreadsheet url is", spreadsheet_url) - - - def update_spreadsheet(self, algos=None): - self._setup_authentication() - self._import_sample_worksheet() - self._get_spreadsheet_url() - benchmark_json = self._extract_sheet_names() - # Only proceed if there are benchmark results - if benchmark_json : - self._create_worksheets(algos) - self._write_gsheet(algos) - - -if __name__ == "__main__": - import argparse - ap = argparse.ArgumentParser() - ap.add_argument("--results-dir", type=str, required=True, - help="directory to store the results in json files") - args = ap.parse_args() - - gsheet_report = Gsheet_Report(results_dir=args.results_dir) - gsheet_report.update_spreadsheet() - diff --git a/regression/lp_config.json b/regression/lp_config.json new file mode 100644 index 000000000..d0a921701 --- /dev/null +++ b/regression/lp_config.json @@ -0,0 +1,13 @@ +{ + "details": "LP test", + "metrics": { + "primal_objective_value": { + "threshold": 1, + "unit": "primal_objective_value", + }, + "solver_time": { + "threshold": 1, + "unit": "seconds" + } + } +} diff --git a/regression/lp_regression_test.sh b/regression/lp_regression_test.sh index f710dce3b..3ebaa4f21 100644 --- a/regression/lp_regression_test.sh +++ b/regression/lp_regression_test.sh @@ -35,7 +35,7 @@ mkdir -p ${RESULTS_DIR}/benchmarks/results/csvs/ #rm -rf ${WORKSPACE}/${RESULT_DIR_NAME}/data/regressions.csv logger "Running lp tests ........" -python ${CUOPT_SCRIPTS_DIR}/benchmark_scripts/benchmark.py -c ${LP_CONFIGS_PATH} -r ${RESULTS_DIR}/benchmarks/results/csvs/ -g ${GIT_COMMIT} -l ${LOG_PATH} -s ${RESULTS_DIR}/benchmarks/results/lp_tests_status.txt -n ${GPUS_PER_NODE} +python ${CUOPT_SCRIPTS_DIR}/benchmark_scripts/benchmark.py -c ${LP_DATASETS_PATH} -r ${RESULTS_DIR}/benchmarks/results/csvs/ -g ${GIT_COMMIT} -l ${LOG_PATH} -s ${RESULTS_DIR}/benchmarks/results/lp_tests_status.txt -n ${GPUS_PER_NODE} logger "Completed lp tests ........" diff --git a/regression/mip_config.json b/regression/mip_config.json new file mode 100644 index 000000000..5e4398de6 --- /dev/null +++ b/regression/mip_config.json @@ -0,0 +1,29 @@ +{ + "details": "MIP test", + "metrics": { + "primal_objective_value": { + "threshold": 1, + "unit": "primal_objective_value" + }, + "solver_time": { + "threshold": 1, + "unit": "seconds" + }, + "mip_gap": { + "threshold": 1, + "unit": "mip_gap" + }, + "max_constraint_violation": { + "threshold": 1, + "unit": "max" + }, + "max_int_violation": { + "threshold": 1, + "unit": "max" + }, + "max_variable_bound_violation": { + "threshold": 1, + "unit": "max" + } + } +} diff --git a/regression/report.sh b/regression/report.sh deleted file mode 100755 index 288a88c6c..000000000 --- a/regression/report.sh +++ /dev/null @@ -1,67 +0,0 @@ -#!/bin/bash -# Copyright (c) 2021, NVIDIA CORPORATION. - -# Creates a conda environment to be used for cuopt benchmarking. - -# Abort script on first error -set -e -# Must ensure PROJECT_DIR is exported first then load rapids-mg-tools env -export PROJECT_DIR=${PROJECT_DIR:-$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)} -if [ -n "$RAPIDS_MG_TOOLS_DIR" ]; then - source ${RAPIDS_MG_TOOLS_DIR}/script-env.sh -elif [ -n "$(which script-env.sh)" ]; then - source $(which script-env.sh) -else - echo "Error: \$RAPIDS_MG_TOOLS_DIR/script-env.sh could not be read nor was script-env.sh in PATH." - exit 1 -fi - -activateCondaEnv - -################################################################################ -# Send report based on contents of $RESULTS_DIR -# These steps do not require a worker node. - -# When running both testing and benchmark and if some benchmarks fail, -# the entire nightly will fail. The benchmark logs reported on Slack -# contains information about the failures. -logger "Generating report" - -if [ -f $METADATA_FILE ]; then - source $METADATA_FILE -fi - -RUN_ASV_OPTION="" -if hasArg --skip-asv; then - logger "Skipping running ASV" -else - # Only create/update the asv database if there is both a commit Hash and a branch otherwise - # asv will return an error. If there is $PROJECT_BUILD, that implies there is Neither the - # git commit hash nor the branch which are required to create/update the asv db - if [[ "$PROJECT_BUILD" == "" ]]; then - # Update/create the ASV database - logger "Updating ASV database" - python $PROJECT_DIR/update_asv_database.py --commitHash=$PROJECT_VERSION --repo-url=$PROJECT_REPO_URL --branch=$PROJECT_REPO_BRANCH --commitTime=$PROJECT_REPO_TIME --results-dir=$RESULTS_DIR --machine-name=$MACHINE --gpu-type=$GPU_TYPE - RUN_ASV_OPTION=--run-asv - else - logger "Detected a conda install, cannot run ASV since a commit hash/time is needed." - fi -fi - -if hasArg --spreadsheet; then - logger "Generating spreadsheet" - export SPREADSHEET_URL=$(python $PROJECT_DIR/gsheet-report.py --results-dir=$RESULTS_DIR |grep "spreadsheet url is"|cut -d ' ' -f4) - #python $PROJECT_DIR/gsheet-report.py --results-dir=$RESULTS_DIR - -fi - -${SCRIPTS_DIR}/create-html-reports.sh $RUN_ASV_OPTION - -if hasArg --skip-sending-report; then - logger "Skipping sending report." -else - logger "Uploading to S3, posting to Slack" - ${PROJECT_DIR}/send-slack-report.sh -fi - -logger "cronjob.sh done." diff --git a/regression/run_regression.sh b/regression/run_regression.sh index 63a2b4c64..a24ad5b20 100644 --- a/regression/run_regression.sh +++ b/regression/run_regression.sh @@ -10,14 +10,11 @@ # Get latest set of datasets rm -rf $SCRATCH_DIR/routing_configs/* -rm -rf $SCRATCH_DIR/lp_configs/* -rm -rf $SCRATCH_DIR/mip_configs/* aws s3 cp s3://cuopt-datasets/regression_datasets/ $SCRATCH_DIR/routing_configs/ --recursive -aws s3 cp s3://cuopt-datasets/lp_datasets/ $SCRATCH_DIR/lp_configs/ --recursive -aws s3 cp s3://cuopt-datasets/mip_datasets/ $SCRATCH_DIR/mip_configs/ --recursive -bash $SCRATCH_DIR/cuopt/regression/get_datasets.sh +python $SCRATCH_DIR/cuopt/regression/get_datasets.py $SCRATCH_DIR/lp_datasets lp +python $SCRATCH_DIR/cuopt/regression/get_datasets.py $SCRATCH_DIR/mip_datasets mip # Run build and test bash $SCRATCH_DIR/cuopt/regression/cronjob.sh --benchmark --skip-spreadsheet diff --git a/regression/update_asv_database.py b/regression/update_asv_database.py index df9883f1e..13aa91ee2 100644 --- a/regression/update_asv_database.py +++ b/regression/update_asv_database.py @@ -43,7 +43,7 @@ def update_asv_db(commitHash=None, bResultList = [] # Skip these columns from benchmarking skip_columns = ["date_time", "git_commit"] - print("AAA") + # Create result objects for each benchmark result and store it in a list for file_name in benchmark_result_list: # skip if it's regression file @@ -76,7 +76,7 @@ def update_asv_db(commitHash=None, f"{benchmark_dir_path}, not creating/updating ASV database " f"in {asv_dir_path}.") return - print("BBB") + uname = platform.uname() # Maybe also write those metadata to metadata.sh ? osType = "%s %s" % (uname.system, uname.release) @@ -100,19 +100,16 @@ def update_asv_db(commitHash=None, 'ram' : "%d" % psutil.virtual_memory().total } bInfo = BenchmarkInfo(**bInfo_dict) - print("CCC") + # extract the branch name branch = bInfo_dict['branch'] db = ASVDb(dbDir=str(asv_dir_path), repo=repo_url, branches=[branch]) - print("DDD") + for res in bResultList: - print(bInfo) - print(res) db.addResult(bInfo, res) - print("EEE") if __name__ == "__main__": import argparse From c0a5902b0e7b96add1e0b094471114c68ffd4129 Mon Sep 17 00:00:00 2001 From: Ishika Roy Date: Mon, 15 Dec 2025 18:49:55 -0800 Subject: [PATCH 5/6] update regression --- regression/benchmark_scripts/benchmark.py | 81 ++++------------------- regression/config.sh | 8 +-- regression/create-html-reports.sh | 1 - regression/cronjob.sh | 6 +- regression/get_datasets.py | 5 +- regression/lp_config.json | 2 +- regression/lp_regression_test.sh | 2 +- regression/routing_regression_test.sh | 2 +- regression/run_regression.sh | 2 + 9 files changed, 28 insertions(+), 81 deletions(-) diff --git a/regression/benchmark_scripts/benchmark.py b/regression/benchmark_scripts/benchmark.py index 9cc8c75fa..9adfa0287 100644 --- a/regression/benchmark_scripts/benchmark.py +++ b/regression/benchmark_scripts/benchmark.py @@ -91,14 +91,16 @@ def get_bks_change( def record_result( test_name, metrics, required_metrics, csv_path, test_type_string ): - - file_path = csv_path + "/" + test_type_string + "_" + test_name + ".csv" - + file_path = csv_path + "/" + if test_type_string=="lp" or test_type_string=="mip": + file_path += test_type_string + "_" + test_name + ".csv" + else: + file_path += test_name + ".csv" bks_metrics = get_bks_change(metrics, required_metrics) + # Add default metrics to data required_metrics.update(bks_metrics) metrics.update(bks_metrics) - req_metrics = list(required_metrics.keys()) + ["date_time", "git_commit"] current_data = pd.DataFrame({key : [metrics[key]] for key in sorted(req_metrics)}) @@ -107,9 +109,7 @@ def record_result( updated_data = pd.concat([previous_data, current_data], ignore_index=True) else: updated_data = current_data - record_regressions(test_name, updated_data, required_metrics, csv_path, test_type_string) - updated_data.to_csv(file_path) @@ -178,9 +178,7 @@ def run_benchmark( metrics["cost"] = objectives[routing.Objective.COST] if "travel_time" in required_metrics: metrics["travel_time"] = objectives[routing.Objective.TRAVEL_TIME] - record_result(test_name, metrics, required_metrics, csv_path, d_type) - return "SUCCESS" if success_status is True else "FAILED" def reinitialize_rmm(): @@ -207,7 +205,7 @@ def worker(gpu_id, dataset_file_path, csv_path, git_commit, log_path, test_statu else: data_files = glob.glob(dataset_file_path + "/*_config.json") idx = int(gpu_id) - n_files = len(data_files) + n_files = 3 #len(data_files) while idx < n_files: mr, stats_mr = reinitialize_rmm() @@ -247,66 +245,10 @@ def worker(gpu_id, dataset_file_path, csv_path, git_commit, log_path, test_statu log.info(f"------------- Test End : {test_name} gpu id : {gpu_id} -------------------") idx = idx + n_gpus -def run(dataset_file_path, csv_path, git_commit, log_path, test_status_file, n_gpus, d_type): - - """def worker(gpu_id, n_gpus): - import os - #log.info(f"------------- GPU id : {gpu_id} -------------------") - os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id - import rmm - pool = rmm.mr.PoolMemoryResource( - rmm.mr.CudaMemoryResource() - ) - - rmm.mr.set_current_device_resource(pool) - idx = int(gpu_id) - n_files = len(config_files) - - def reinitialize_rmm(): - - pool = rmm.mr.PoolMemoryResource( - rmm.mr.CudaMemoryResource() - ) - - rmm.mr.set_current_device_resource(pool) - #rmm.reinitialize(pool_allocator=True, initial_pool_size=pool_size) - - #base_mr = rmm.mr.get_current_device_resource() - #stats_mr = rmm.mr.StatisticsResourceAdaptor(base_mr) - #rmm.mr.set_current_device_resource(stats_mr) - - return "", "" - - while idx < n_files: - config = config_files[idx] - - test_name = str(config) - status = "FAILED" - try: - - test_name, data_model, solver_settings, requested_metrics = get_configuration(config, config_file_path) - - log.basicConfig(level=log.INFO, filename=log_path+"/"+test_name+"_log.txt", filemode="a+", - format="%(asctime)-15s %(levelname)-8s %(message)s") - log.info(f"------------- Test Start : {test_name} -------------------") - log.info(f"------------- GPU id : {gpu_id} -------------------") - #status = run_benchmark( - # test_name, - # data_model, - # solver_settings, - # requested_metrics, - # csv_path, - # git_commit, - # test_status_file - #) - - except Exception as e: - log.error(str(e)) - - with open(test_status_file, "a") as f: - f.write("\n") - f.write(test_name +": " + status)""" +def run(dataset_file_path, csv_path, git_commit, log_path, test_status_file, n_gpus, d_type): + # Restricting n_gpus to one to avoid resource sharing + n_gpus = 1 procs = [] for gpu_id in range(int(n_gpus)): p = Process(target=worker, args=(str(gpu_id), dataset_file_path, csv_path, git_commit, log_path, test_status_file, int(n_gpus), d_type)) @@ -317,6 +259,7 @@ def reinitialize_rmm(): p.join() print("All processes finished.") + if __name__ == "__main__": parser = argparse.ArgumentParser() @@ -340,7 +283,7 @@ def reinitialize_rmm(): "-n", "--num-gpus", type=str, help="Number of GPUs available" ) parser.add_argument( - "-t", "--type", type=str, help="Type of benchmark" + "-t", "--type", type=str, default="", help="Type of benchmark" ) args = parser.parse_args() run(args.config_path, args.csv_path, args.git_commit, args.log_path, args.test_status_file, args.num_gpus, args.type) diff --git a/regression/config.sh b/regression/config.sh index cb0e0be61..88efc8483 100644 --- a/regression/config.sh +++ b/regression/config.sh @@ -17,7 +17,7 @@ PARTITION="batch" GPUS_PER_NODE=8 # Path to the squashs file containing the container image -IMAGE="nvidia/cuopt:25.10.0a-cuda12.9-py3.12" +IMAGE="nvidia/cuopt:26.2.0a-cuda12.9-py3.13" #SQSH_IMAGE=$SCRATCH_DIR/container_state/cuopt.sqsh ALL_CONFIGS_PATH=$SCRATCH_DIR/configs/ @@ -36,10 +36,10 @@ RESULT_DIR_NAME=cuopt-regression SSH_CREDS=/home/iroy/.ssh/ # Assume CUOPT_SLACK_APP_ID is defined! -CUOPT_SLACK_APP_ID="MY_SLACK_APP_ID" +CUOPT_SLACK_APP_ID="T04SYRAP3/B04BKLJ7R0F/8EPiEMTDcXFeB5FzQVEJp8t2" WEBHOOK_URL=${WEBHOOK_URL:-https://hooks.slack.com/services/${CUOPT_SLACK_APP_ID}} -S3_FILE_PREFIX="MY_S3_FILE_PREFIX" -S3_URL_PREFIX="MY_S3_URL_PREFIX" +S3_FILE_PREFIX=s3://reopt-testing-public/regression_tests +S3_URL_PREFIX=https://reopt-testing-public.s3.amazonaws.com/regression_tests # Most are defined using the bash := or :- syntax, which means they # will be set only if they were previously unset. The project config diff --git a/regression/create-html-reports.sh b/regression/create-html-reports.sh index 18632f38d..07f4372c0 100755 --- a/regression/create-html-reports.sh +++ b/regression/create-html-reports.sh @@ -48,7 +48,6 @@ else
" fi - ################################################################################ # create the html reports for each individual run (each # pytest-results*.txt file) diff --git a/regression/cronjob.sh b/regression/cronjob.sh index dbaf9b36f..1ec89b2fa 100755 --- a/regression/cronjob.sh +++ b/regression/cronjob.sh @@ -86,7 +86,6 @@ if [[ $TESTING_FAILED == 0 ]]; then --gpus-per-node $GPUS_PER_NODE \ --time=4:00:00 \ --export=ALL \ - --exclusive -K\ --container-mounts ${ROUTING_CONFIGS_PATH}:${ROUTING_CONFIGS_PATH},${CUOPT_SCRIPTS_DIR}:${CUOPT_SCRIPTS_DIR},${OUTPUT_DIR}:${OUTPUT_DIR} \ --container-image=$IMAGE \ --output=${BENCHMARK_RESULTS_DIR}/benchmark_routing_log.txt \ @@ -124,7 +123,8 @@ if [[ $TESTING_FAILED == 0 ]]; then bash ${CUOPT_SCRIPTS_DIR}/mip_regression_test.sh & PID_3=$! - wait $PID_1 $PID_2 $PID_3 + #wait $PID_1 $PID_2 $PID_3 + wait $PID_1 fi else # if [[ $TESTING_FAILED == 0 ]] @@ -153,7 +153,7 @@ fi #fi # Copy all config files to one folder -cp $ROUTING_CONFIGS_PATH/*config.json $PROJECT_DIR/*config.json $ALL_CONFIGS_PATH/ +cp $ROUTING_CONFIGS_PATH/*config.json $LP_DATASETS_PATH/*config.json $MIP_DATASETS_PATH/*config.json $ALL_CONFIGS_PATH/ RUN_ASV_OPTION="" if hasArg --skip-asv; then diff --git a/regression/get_datasets.py b/regression/get_datasets.py index f6136d695..bdb5f7f1d 100644 --- a/regression/get_datasets.py +++ b/regression/get_datasets.py @@ -859,7 +859,6 @@ } def download(url, dst): - print("HERE") if os.path.exists(dst): return print(f"Downloading {url} into {dst}...") @@ -929,6 +928,10 @@ def download_mip_dataset(name, dir): base_url = "https://miplib.zib.de/WebData/instances" url = f"{base_url}/{name}.gz" outfile = f"{dir}/{name}.gz" + if os.path.exists(dir): + if os.path.exists(os.path.join(dir, f"{name}")): + print(f"Dir for dataset {name} exists and contains {name}.mps. Skipping...") + return download(url, outfile) extract(outfile, dir, "") diff --git a/regression/lp_config.json b/regression/lp_config.json index d0a921701..e2f8a9e93 100644 --- a/regression/lp_config.json +++ b/regression/lp_config.json @@ -3,7 +3,7 @@ "metrics": { "primal_objective_value": { "threshold": 1, - "unit": "primal_objective_value", + "unit": "primal_objective_value" }, "solver_time": { "threshold": 1, diff --git a/regression/lp_regression_test.sh b/regression/lp_regression_test.sh index 3ebaa4f21..806e79d15 100644 --- a/regression/lp_regression_test.sh +++ b/regression/lp_regression_test.sh @@ -35,7 +35,7 @@ mkdir -p ${RESULTS_DIR}/benchmarks/results/csvs/ #rm -rf ${WORKSPACE}/${RESULT_DIR_NAME}/data/regressions.csv logger "Running lp tests ........" -python ${CUOPT_SCRIPTS_DIR}/benchmark_scripts/benchmark.py -c ${LP_DATASETS_PATH} -r ${RESULTS_DIR}/benchmarks/results/csvs/ -g ${GIT_COMMIT} -l ${LOG_PATH} -s ${RESULTS_DIR}/benchmarks/results/lp_tests_status.txt -n ${GPUS_PER_NODE} +python ${CUOPT_SCRIPTS_DIR}/benchmark_scripts/benchmark.py -c ${LP_DATASETS_PATH} -r ${RESULTS_DIR}/benchmarks/results/csvs/ -g ${GIT_COMMIT} -l ${LOG_PATH} -s ${RESULTS_DIR}/benchmarks/results/lp_tests_status.txt -n ${GPUS_PER_NODE} -t lp logger "Completed lp tests ........" diff --git a/regression/routing_regression_test.sh b/regression/routing_regression_test.sh index 57029c5a5..2ce4a02b4 100644 --- a/regression/routing_regression_test.sh +++ b/regression/routing_regression_test.sh @@ -36,7 +36,7 @@ mkdir -p ${RESULTS_DIR}/benchmarks/results/csvs/ logger "Running routing tests ........" -python ${CUOPT_SCRIPTS_DIR}/benchmark_scripts/benchmark.py -c ${ROUTING_CONFIGS_PATH} -r ${RESULTS_DIR}/benchmarks/results/csvs/ -g ${GIT_COMMIT} -l ${LOG_PATH} -s ${RESULTS_DIR}/benchmarks/results/routing_tests_status.txt -n ${GPUS_PER_NODE} +python ${CUOPT_SCRIPTS_DIR}/benchmark_scripts/benchmark.py -c ${ROUTING_CONFIGS_PATH} -r ${RESULTS_DIR}/benchmarks/results/csvs/ -g ${GIT_COMMIT} -l ${LOG_PATH} -s ${RESULTS_DIR}/benchmarks/results/routing_tests_status.txt -n ${GPUS_PER_NODE} -t routing logger "Completed routing tests ........" diff --git a/regression/run_regression.sh b/regression/run_regression.sh index a24ad5b20..c397552a0 100644 --- a/regression/run_regression.sh +++ b/regression/run_regression.sh @@ -15,6 +15,8 @@ aws s3 cp s3://cuopt-datasets/regression_datasets/ $SCRATCH_DIR/routing_configs/ python $SCRATCH_DIR/cuopt/regression/get_datasets.py $SCRATCH_DIR/lp_datasets lp python $SCRATCH_DIR/cuopt/regression/get_datasets.py $SCRATCH_DIR/mip_datasets mip +cp $SCRATCH_DIR/cuopt/regression/lp_config.json $SCRATCH_DIR/lp_datasets/ +cp $SCRATCH_DIR/cuopt/regression/mip_config.json $SCRATCH_DIR/mip_datasets/ # Run build and test bash $SCRATCH_DIR/cuopt/regression/cronjob.sh --benchmark --skip-spreadsheet From 01fb88fc7423b1aff2ef6bfc90448a492c55cfee Mon Sep 17 00:00:00 2001 From: Ishika Roy Date: Mon, 22 Dec 2025 09:52:04 -0800 Subject: [PATCH 6/6] regression updates --- regression/benchmark_scripts/benchmark.py | 7 +++---- regression/config.sh | 6 +++--- regression/cronjob.sh | 6 ++---- 3 files changed, 8 insertions(+), 11 deletions(-) diff --git a/regression/benchmark_scripts/benchmark.py b/regression/benchmark_scripts/benchmark.py index 9adfa0287..c636d0f23 100644 --- a/regression/benchmark_scripts/benchmark.py +++ b/regression/benchmark_scripts/benchmark.py @@ -205,7 +205,7 @@ def worker(gpu_id, dataset_file_path, csv_path, git_commit, log_path, test_statu else: data_files = glob.glob(dataset_file_path + "/*_config.json") idx = int(gpu_id) - n_files = 3 #len(data_files) + n_files = 1 #len(data_files) while idx < n_files: mr, stats_mr = reinitialize_rmm() @@ -215,10 +215,10 @@ def worker(gpu_id, dataset_file_path, csv_path, git_commit, log_path, test_statu test_name = str(data_file) status = "FAILED" try: - test_name, data_model, solver_settings, requested_metrics = get_configuration(data_file, dataset_file_path, d_type) log.basicConfig(level=log.INFO, filename=log_path+"/"+test_name+"_log.txt", filemode="a+", format="%(asctime)-15s %(levelname)-8s %(message)s") + log.getLogger().setLevel(log.INFO) log.info(f"------------- Test Start : {test_name} gpu id : {gpu_id} -------------------") status = run_benchmark( test_name, @@ -230,7 +230,6 @@ def worker(gpu_id, dataset_file_path, csv_path, git_commit, log_path, test_statu test_status_file, d_type ) - except Exception as e: log.error(str(e)) @@ -248,7 +247,7 @@ def worker(gpu_id, dataset_file_path, csv_path, git_commit, log_path, test_statu def run(dataset_file_path, csv_path, git_commit, log_path, test_status_file, n_gpus, d_type): # Restricting n_gpus to one to avoid resource sharing - n_gpus = 1 + #n_gpus = 1 procs = [] for gpu_id in range(int(n_gpus)): p = Process(target=worker, args=(str(gpu_id), dataset_file_path, csv_path, git_commit, log_path, test_status_file, int(n_gpus), d_type)) diff --git a/regression/config.sh b/regression/config.sh index 88efc8483..2a5b57f51 100644 --- a/regression/config.sh +++ b/regression/config.sh @@ -36,10 +36,10 @@ RESULT_DIR_NAME=cuopt-regression SSH_CREDS=/home/iroy/.ssh/ # Assume CUOPT_SLACK_APP_ID is defined! -CUOPT_SLACK_APP_ID="T04SYRAP3/B04BKLJ7R0F/8EPiEMTDcXFeB5FzQVEJp8t2" +CUOPT_SLACK_APP_ID="MY_SLACK_APP_ID" WEBHOOK_URL=${WEBHOOK_URL:-https://hooks.slack.com/services/${CUOPT_SLACK_APP_ID}} -S3_FILE_PREFIX=s3://reopt-testing-public/regression_tests -S3_URL_PREFIX=https://reopt-testing-public.s3.amazonaws.com/regression_tests +S3_FILE_PREFIX="MY_S3_FILE_PREFIX" +S3_URL_PREFIX="MY_S3_URL_PREFIX" # Most are defined using the bash := or :- syntax, which means they # will be set only if they were previously unset. The project config diff --git a/regression/cronjob.sh b/regression/cronjob.sh index 1ec89b2fa..76aec8535 100755 --- a/regression/cronjob.sh +++ b/regression/cronjob.sh @@ -123,8 +123,7 @@ if [[ $TESTING_FAILED == 0 ]]; then bash ${CUOPT_SCRIPTS_DIR}/mip_regression_test.sh & PID_3=$! - #wait $PID_1 $PID_2 $PID_3 - wait $PID_1 + wait $PID_1 $PID_2 $PID_3 fi else # if [[ $TESTING_FAILED == 0 ]] @@ -179,13 +178,12 @@ rm -rf $RESULTS_DIR/tests ${SCRIPTS_DIR}/create-html-reports.sh $RUN_ASV_OPTION -: <<'END' if hasArg --skip-sending-report; then logger "Skipping sending report." else logger "Uploading to S3, posting to Slack" ${PROJECT_DIR}/send-slack-report.sh fi -END + logger "cronjob.sh done."