Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions .idea/SPRINT.iml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 4 additions & 0 deletions .idea/encodings.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

7 changes: 7 additions & 0 deletions .idea/misc.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 8 additions & 0 deletions .idea/modules.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

33 changes: 33 additions & 0 deletions .idea/workspace.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

19 changes: 16 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ Incorporating the **C**onstellation **I**nvestigation **R**epository with **C**o

# General Setup

1. Clone the repo: `git clone git@github.com:MIT-STARLab/SPRINT.git`
1. Clone the repo: `git clone git@github.mit.edu:star-lab/SPRINT.git`
1. Init the appropriate submodules:
1. `cd SPRINT/source`
1. `git submodule init circinus_global_planner circinus_orbit_link_public circinus_orbit_propagation circinus_orbit_viz circinus_sim circinus_tools`
Expand Down Expand Up @@ -40,7 +40,20 @@ Incorporating the **C**onstellation **I**nvestigation **R**epository with **C**o
1. Navigate to `SPRINT/scripts`
1. Run simulation: <br>
a. `./run_const_sim.sh --use orig_circinus_zhou` to specify a case corresponding to `inputs/cases/orig_circinus_zhou`.<br>
b. `./run_const_sim.sh --help` for a description of the other options.<br>
b. `./run_const_sim.sh --help` for a description of the other options.<br>

## Separated Simulation Demo
This simulation can be run such that each satellite runs on separate hardware, say a Raspberry Pi. The following demo is for a 2-satellite constellation.
1. For the ground station network: <br>
a. Navigate to `SRPINT/scripts`
b. `./run_const_sim.sh --use circinus_zhou_2_sats --ground` to specify a two-satellite case and to run the ground station network part
2. For each satellite: <br>
a. Navigate to `SPRINT/scripts`
b. `./run_const_sim.sh --use circinus_zhou_2_sats --satellite` to specify a two-satellite case and to run the satellite part

The satellites can be initialized before the ground station network; however, satellites are given 100 tries to connect to the ground station network, once every second. If the ground station network isn't initialized in time, the satellite program exits.



# Submodule dependencies
* [circinus_global_planner](https://github.com/MIT-STARLab/circinus_global_planner)
Expand All @@ -53,4 +66,4 @@ Incorporating the **C**onstellation **I**nvestigation **R**epository with **C**o
These should be managed as if they are independent and up to date with their own master, before committing the folder from the the SPRINT main repository (which then tracks the commit of the subrepo).

# History
SPRINT was initiated as CIRCINUS, by [apollokit](https://github.com/apollokit)
SPRINT was initiated by CIRCINUS, by [apollokit](https://github.com/apollokit).
177 changes: 177 additions & 0 deletions inputs/SRP_Zhou_scripts/analyze_multirun_tests.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,177 @@
# this file is for generating plots / outputs from
# the json files in this folder
import json
import matplotlib.pyplot as plt
import numpy as np

SRP_settings = [True, False]
GS_disruptions = ['None','G0','G1','G2']

# grab all data
all_data = {}
for SRP_setting in SRP_settings:
for GS_disruption in GS_disruptions:
cur_str = 'SRP_Test_SRP_%s_GS_%s' % (SRP_setting, GS_disruption)

with open('.\\multirun_tests\\' + cur_str + '.json', "r") as jsonFile:
all_data[cur_str] = json.load(jsonFile)

print('All Data Loaded')

print('test time')
# initialize all data structs
total_failures = []

median_data_margin_prcnt = []
prcntl25_ave_d_margin_prcnt = []
prcntl75_ave_d_margin_prcnt = []

median_energy_margin_prcnt = []
prcntl25_ave_e_margin_prcnt = []
prcntl75_ave_e_margin_prcnt = []

exec_over_poss = []
total_exec_dv = []
total_poss_dv = []

median_obs_initial_lat_exec = [] # initial means the first part of the data container downlinked
prcntl25_obs_initial_lat_exec = []
prcntl75_obs_initial_lat_exec = []

median_av_aoi_exec = []
prcntl25_av_aoi_exec = []
prcntl75_av_aoi_exec = []

# MAKE DATA STRUCTS FOR BAR CHARTS
for ind,SRP_setting in enumerate(SRP_settings):
total_failures.append([])
median_data_margin_prcnt.append([])
prcntl25_ave_d_margin_prcnt.append([])
prcntl75_ave_d_margin_prcnt.append([])
median_energy_margin_prcnt.append([])
prcntl25_ave_e_margin_prcnt.append([])
prcntl75_ave_e_margin_prcnt.append([])
exec_over_poss.append([])
median_obs_initial_lat_exec.append([])
prcntl25_obs_initial_lat_exec.append([])
prcntl75_obs_initial_lat_exec.append([])
median_av_aoi_exec.append([])
prcntl25_av_aoi_exec.append([])
prcntl75_av_aoi_exec.append([])
for GS_disruption in GS_disruptions:
cur_str = 'SRP_Test_BDT_False_SRP_%s_GS_%s' % (SRP_setting, GS_disruption)
cur_data = all_data[cur_str]
# Activity Failures
total_failures[ind].append(sum(cur_data['Num Failures by Type'].values()))

# Data Margin levels
median_data_margin_prcnt[ind].append(cur_data['d_rsrc_stats']['median_ave_d_margin_prcnt'])
prcntl25_ave_d_margin_prcnt[ind].append(cur_data['d_rsrc_stats']['median_ave_d_margin_prcnt'] - cur_data['d_rsrc_stats']['prcntl25_ave_d_margin_prcnt'])
prcntl75_ave_d_margin_prcnt[ind].append(cur_data['d_rsrc_stats']['prcntl75_ave_d_margin_prcnt'] - cur_data['d_rsrc_stats']['median_ave_d_margin_prcnt'])

# Energy Margin levels
median_energy_margin_prcnt[ind].append(cur_data['e_rsrc_stats']['median_ave_e_margin_prcnt'])
prcntl25_ave_e_margin_prcnt[ind].append(cur_data['e_rsrc_stats']['median_ave_e_margin_prcnt']-cur_data['e_rsrc_stats']['prcntl25_ave_e_margin_prcnt'])
prcntl75_ave_e_margin_prcnt [ind].append(cur_data['e_rsrc_stats']['prcntl75_ave_e_margin_prcnt']-cur_data['e_rsrc_stats']['median_ave_e_margin_prcnt'])

# METRICS
# DV % throughput
exec_over_poss[ind].append(cur_data['dv_stats']['exec_over_poss']*100)

# Obs Latency
median_obs_initial_lat_exec[ind].append(cur_data['lat_stats']['median_obs_initial_lat_exec'])
prcntl25_obs_initial_lat_exec[ind].append(cur_data['lat_stats']['median_obs_initial_lat_exec'] - cur_data['lat_stats']['prcntl25_obs_initial_lat_exec'])
prcntl75_obs_initial_lat_exec[ind].append(cur_data['lat_stats']['prcntl75_obs_initial_lat_exec'] - cur_data['lat_stats']['median_obs_initial_lat_exec'])

# AoI
median_av_aoi_exec[ind].append(cur_data['obs_aoi_stats_w_routing']['median_av_aoi_exec'])
prcntl25_av_aoi_exec[ind].append(cur_data['obs_aoi_stats_w_routing']['median_av_aoi_exec'] - cur_data['obs_aoi_stats_w_routing']['prcntl25_av_aoi_exec'])
prcntl75_av_aoi_exec[ind].append(cur_data['obs_aoi_stats_w_routing']['prcntl75_av_aoi_exec'] - cur_data['obs_aoi_stats_w_routing']['median_av_aoi_exec'])

def autolabel(rects,axis):
"""
Attach a text label above each bar displaying its height
from: https://matplotlib.org/examples/api/barchart_demo.html
"""
for rect in rects:
height = rect.get_height()
axis.text(rect.get_x() + rect.get_width()/4., height,
'%d' % int(height),
ha='center', va='bottom')

def double_bar_graph(ax,N,data,yLabelStr,titleStr,xLabelStr,xTickLabels,legendStrs,yerr = [None, None], legendFlag = True, colorStrs = ['b','gray'],width=0.35,):

if len(data) != 2:
raise Exception('Need exactly 2 data sets')

if N != len(data[0]) or N != len(data[1]) or N != len(xTickLabels):
raise Exception('number of bar graphs does not match data and/or tick labels supplied')

ind = np.arange(N) # the x locations for the groups

rects1 = ax.bar(ind, data[0], width, color=colorStrs[0], yerr= yerr[0])
rects2 = ax.bar(ind + width, data[1], width, color=colorStrs[1], yerr= yerr[1])
ax.set_ylabel(yLabelStr)
ax.set_title(titleStr)
ax.set_xticks(ind + width / 2)
ax.set_xlabel(xLabelStr)
ax.set_xticklabels(tuple(xTickLabels))
if legendFlag:
ax.legend((rects1[0], rects2[0]), tuple(legendStrs))
autolabel(rects1,ax)
autolabel(rects2,ax)

return ax

# MAKE PLOTS
N = 4 # maybe change to 4 if we add nominal case
width = 0.35 # the width of the bars
xLabelStr = 'Ground Station Failures'
xTickLabels = ('None','G0 - 24 hrs', 'G1 - 12 hrs', 'G2 -24 hrs')
legendStrs = ('SRP On', 'SRP Off')

############# one plot for total failures ####################
fig, ax = plt.subplots()
yLabelStr = 'Total Activity Failures (#)'
titleStr = 'Activity Failures with SRP on/off'
double_bar_graph(ax,N,total_failures,yLabelStr,titleStr,xLabelStr,xTickLabels,legendStrs)

###### one plot with two subplots (one for each state margin level) ######
fig, ax1 = plt.subplots(nrows=1, ncols=1)
yLabelStr = 'Data Margin (%)'
titleStr = 'Data Margin Levels with SRP on/off'
d_yerr = (np.asarray([prcntl25_ave_d_margin_prcnt[0],prcntl75_ave_d_margin_prcnt[0]]),np.asarray([prcntl25_ave_d_margin_prcnt[1],prcntl75_ave_d_margin_prcnt[1]]))
double_bar_graph(ax1,N,median_data_margin_prcnt,yLabelStr,titleStr,xLabelStr,xTickLabels,legendStrs,yerr=d_yerr)

""" yLabelStr = 'Energy Margin (%)'
titleStr = 'Energy Margin Levels with SRP on/off'
e_yerr = (np.asarray([prcntl25_ave_e_margin_prcnt[0],prcntl75_ave_e_margin_prcnt[0]]),np.asarray([prcntl25_ave_e_margin_prcnt[1],prcntl75_ave_e_margin_prcnt[1]]))
double_bar_graph(ax2,N,median_energy_margin_prcnt,yLabelStr,titleStr,xLabelStr,xTickLabels,legendStrs,yerr=e_yerr) """


###### one plot with a three subplots (one for each metric) ###
# Data Throughput Percentage
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1)
#titleStr = 'Metrics with SRP on/off'
yLabelStr = 'Data Throughput - Exec / Poss (%)'
titleStr = 'DV Throughput with SRP on/off'
xLabelStr = ''
double_bar_graph(ax1,N,exec_over_poss,yLabelStr,titleStr,xLabelStr,xTickLabels,legendStrs,legendFlag = False)


xLabelStr = 'Ground Station Failures'
# Median Latency
yLabelStr = 'Observation Latency (min)'
titleStr = 'Observation Initial Data Packet Latency with SRP on/off'
lat_yerr = (np.asarray([prcntl25_obs_initial_lat_exec[0],prcntl75_obs_initial_lat_exec[0]]),np.asarray([prcntl25_obs_initial_lat_exec[1],prcntl75_obs_initial_lat_exec[1]]))
double_bar_graph(ax2,N,median_obs_initial_lat_exec,yLabelStr,titleStr,xLabelStr,xTickLabels,legendStrs,yerr=lat_yerr,legendFlag = False)

""" # Median AoI
yLabelStr = 'Age of Information (hours)'
#titleStr = 'Observation Initial Data Packet Latency with SRP on/off'
aoi_yerr = (np.asarray([prcntl25_av_aoi_exec[0],prcntl75_av_aoi_exec[0]]),np.asarray([prcntl25_av_aoi_exec[1],prcntl75_av_aoi_exec[1]]))
double_bar_graph(ax3,N,median_av_aoi_exec,yLabelStr,titleStr,xLabelStr,xTickLabels,legendStrs,yerr=aoi_yerr) """
### SHOW PLOTS ###
plt.show()


49 changes: 49 additions & 0 deletions inputs/SRP_Zhou_scripts/test_runner.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
# this file is intended for setting up and running multiple SPRINT runs, where the config is changed
# in between runs
import json
from subprocess import Popen
# things to modify
# r'..\inputs\reference_model_definitions\sat_refs\zhou_original_sat.json': NVM - only doing Xlnk-always

# setup things to step through
# r'..\inputs\cases\orig_circinus\zhou\sim_case_config.json': ['scenario_params']['sim_run_perturbations']['schedule_disruptions']
schedule_disruptions_list = [
{"G0": [["2016-02-14T04:00:00.000000Z","2016-02-15T04:00:00.000000Z"]]},
{"G1": [["2016-02-14T04:00:00.000000Z","2016-02-14T16:00:00.000000Z"]]},
{"G2": [["2016-02-14T04:00:00.000000Z","2016-02-15T04:00:00.000000Z"]]}
]

# r'..\inputs\general_config\lp_general_params_inputs.json': ['lp_general_params']['use_self_replanner']
SRP_settings_list = [True, False]

SD_file = r'C:\Users\User\circinusGit\SPRINT\inputs\cases\orig_circinus_zhou\sim_case_config.json'
SRP_file = r'C:\Users\User\circinusGit\SPRINT\inputs\\general_config\lp_general_params_inputs.json'
scripts_folder = r"C:\Users\User\circinusGit\SPRINT\scripts"
# NOTE: NEED TO BE IN SCRIPTS DIRECTORY TO FIND windows_env_var_setup.bat
for SD_setting in schedule_disruptions_list:

with open(SD_file, "r") as jsonFile:
data = json.load(jsonFile)

data['scenario_params']['sim_run_perturbations']['schedule_disruptions'] = SD_setting

print('Setting schedule disruptions to: %s' % SD_setting)
with open(SD_file, "w") as jsonFile:
json.dump(data, jsonFile, indent=4, separators=(',', ': '))

for SRP_setting in SRP_settings_list:
with open(SRP_file, "r") as jsonFile:
data = json.load(jsonFile)

data['lp_general_params']['use_self_replanner'] = SRP_setting

print('Setting SRP to: %s' % SRP_setting)
with open(SRP_file, "w") as jsonFile:
json.dump(data, jsonFile, indent=4, separators=(',', ': '))

print('New Settings Set - run batch file')

# python runner_const_sim.py --inputs_location /c/Users/wcgru/Documents/GitHubClones/SPRINT/scripts/../inputs --case_name orig_circinus_zhou --restore_pickle "" --remote_debug false

p = Popen(r"C:\Users\User\circinusGit\SPRINT\scripts\windows_launcher.bat")
stdout, stderr = p.communicate()
Loading