This notebook generates and displays all the plots from the TriScale paper.
# Necessary import
from pathlib import Path
import json
import yaml
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.io as pio
pio.renderers.default = "notebook"
import triscale
import triplots
import UseCase_Pantheon.pantheon as pantheon
import UseCase_Glossy.flocklab as flocklab
data_path = Path('UseCase_Pantheon/PantheonData/10runs_30s/2019-08-20T15:34:33:+0200')
perf_file = data_path / 'pantheon_perf.json'
meta_file = data_path / 'pantheon_metadata.json'
config_file = Path('UseCase_Pantheon/PantheonData/config.yml')
custom_layout = {
"title":None,
"width":350,
"height":350,
"showlegend":False,
"margin":dict(l=55, r=0, t=0, b=45),
"xaxis":{'range':[100,32],
'autorange':False,
'title':{'font':{'size':16}}
},
"yaxis":{'range':[-7,127],
'title':{'font':{'size':16}}
},
}
custom_layout["yaxis"]["range"]=[80,122]
plot_path = Path('plots')
plot_filename = "plot_zoom_pantheon.pdf"
pantheon.plot_pantheon(perf_file,
meta_file,
config_file,
layout=custom_layout,
out_name=str(plot_path / plot_filename),
show=True);
Figure 1 (a) Data analysis and visualization reproduced from [66]. The dots represent the mean performance of the runs; the ellipses represent the $(1-\sigma)$ variation across runs.
result_dir_path = Path('UseCase_Pantheon/PantheonData/10runs_30s')
result_dir_list = [x for x in result_dir_path.iterdir() if x.is_dir()]
plot_path = Path('plots')
meta_data_file = 'pantheon_metadata.json'
config_file = Path('UseCase_Pantheon/PantheonData/config.yml')
out_name = Path('UseCase_Pantheon/PantheonData/metrics.csv')
convergence = {'expected': True,
'confidence': 95, # in %
'tolerance': 5, # in %
}
metric_tput = {'name':'Average Throughput',
'unit': 'Mbit/s',
'measure':50,
'bounds':[0,120], # expected value range
'tag':'throughput' # do not change the tag
}
metric_delay = {'name':'95th perc. of One-way delay',
'unit': 'ms',
'measure':95,
'bounds':[0,100], # expected value range
'tag':'delay' # do not change the tag
}
KPI_tput = {'percentile': 25,
'confidence': 75,
'name': 'Average Throughput',
'unit': 'Mbit/s',
'bounds':[0,120], # expected value range
'tag':'throughput' # do not change the tag
}
KPI_delay = {'percentile': 75,
'confidence': 75,
'name': '95th perc. of One-way delay',
'unit': 'ms',
'bounds':[0,100], # expected value range
'tag':'delay' # do not change the tag
}
metric_list = [metric_tput, metric_delay]
kpi_list = [ KPI_tput, KPI_delay ]
metrics = pantheon.compute_metric(result_dir_list,
meta_data_file,
convergence,
metric_list,
out_name=out_name,
force_computation=False,
plot=False,
verbose=False)
custom_layout = {
"title":None,
"width":350,
"height":350,
"showlegend":False,
"margin":dict(l=55, r=0, t=0, b=45),
"xaxis":{'range':[100,32],
'autorange':False,
'title':{'font':{'size':16}}
},
"yaxis":{'range':[-7,127],
'title':{'font':{'size':16}}
},
}
zoom = True
if zoom:
custom_layout["yaxis"]["range"]=[80,122]
plot_filename = "plot_zoom_triscale.pdf"
else:
plot_filename = "plot_summary_triscale.pdf"
plot_path = Path('')
plot_path = plot_path / 'plots' / 'Pantheon'
series_label = np.sort(metrics['datetime'].unique())
for series_ix in [series_label[1]]:
# Get the metrics values for one series
metric_series = metrics.loc[metrics['datetime'] == series_ix]
# Plot them
pantheon.plot_triscale_kpi(metric_series,
meta_file,
kpi_list,
config_file,
layout=custom_layout,
out_name=str(plot_path / plot_filename),
show=True)
Output retrieved from file. Skipping computation.
Figure 1 (b) Data analysis and visualization produced by TriScale. The dots represent the KPIs of each scheme. Shaded areas represent dominance regions: scheme $A$ performs better than scheme $B$ if the KPI of $B$ lies in the dominance region of $A$.
# Construct the path to the different test results
result_dir_path = Path('UseCase_Pantheon/PantheonData/10runs_30s')
result_dir_list = [x for x in result_dir_path.iterdir() if x.is_dir()]
# Meta data file name
meta_data_file = 'pantheon_metadata.json'
# Config file name and path
config_file = Path('UseCase_Pantheon/PantheonData/config.yml')
out_name = Path('UseCase_Pantheon/PantheonData/metrics.csv')
plot_path = Path('plots')
# Inputs
metric_delay = {'name':'One-way delay',
'unit': 'ms',
'measure':95,
'bounds':[0,100], # expected value range
'tag':'delay' # do not change the tag
}
convergence = {'expected': True,
'confidence': 95, # in %
'tolerance': 5, # in %
}
# Compute/load the metrics
metrics = pantheon.compute_metric(result_dir_list,
meta_data_file,
convergence,
[metric_delay],
out_name = out_name)
data_file = str(result_dir_list[0] / 'fillp_datalink_delay_run1_flow1.csv')
plot_filename = "plot_example_metric.pdf"
custom_layout = {
"title":None,
"width":500,
"height":300,
"margin":dict(l=55, r=5, t=0, b=45),
"legend":{'x':.1, 'y':.1},
"xaxis":{'title':{'font':{'size':16},
'text':'Time [ s ]'}
},
"yaxis":{'title':{'font':{'size':16},
'text':metric_delay['name'] +' [ '+ metric_delay['unit'] +' ]'}
},
}
triscale.analysis_metric( data_file,
metric_delay,
plot=True,
plot_out_name=str(plot_path / plot_filename),
custom_layout = custom_layout,
convergence=convergence);
Output retrieved from file. Skipping computation.
Figure 3 (a) Raw data (one-way delay) and metric data (95th percentile). Example run of FillP.
# Construct the path to the different test results
result_dir_path = Path('UseCase_Pantheon/PantheonData/10runs_30s')
result_dir_list = [x for x in result_dir_path.iterdir() if x.is_dir()]
# Meta data file name
meta_data_file = 'pantheon_metadata.json'
# Config file name and path
config_file = Path('UseCase_Pantheon/PantheonData/config.yml')
out_name = Path('UseCase_Pantheon/PantheonData/metrics.csv')
plot_path = Path('plots')
# Inputs
metric_delay = {'name':'One-way delay',
'unit': 'ms',
'measure':95,
'bounds':[0,100], # expected value range
'tag':'delay' # do not change the tag
}
convergence = {'expected': True,
'confidence': 95, # in %
'tolerance': 5, # in %
}
KPI_delay = {'percentile': 75,
'confidence': 75,
'name': 'One-way delay',
'unit': 'ms',
'bounds':[0,100], # expected value range
'tag':'delay' # do not change the tag
}
# Compute/load the metrics
metrics = pantheon.compute_metric(result_dir_list,
meta_data_file,
convergence,
[metric_delay,metric_delay],
out_name = out_name)
plot_out_name = "plot_example_KPI.pdf"
metric_data = np.array(metrics.loc[(metrics['cc'] == 'fillp') &
(metrics['datetime'] == result_dir_list[1].stem)].delay_value)
note = go.layout.Annotation(
x=0.5,
y=0,
xref="paper",
yref="y",
text="KPI: 72.38 ms",
showarrow=False,
font={'size':16 }
)
custom_layout = {
"title":None,
"width":300,
"height":100,
"margin":dict(l=0, r=0, t=5, b=40),
"showlegend": False,
"yaxis":{'title':{'font':{'size':16}},
# 'visible' : True
},
"xaxis":{'title':{'font':{'size':16},
'text':'95th percentile of one-way delay [ms]'}
},
"annotations":[note]
}
triscale.analysis_kpi(metric_data,
KPI_delay,
to_plot=['horizontal'],
plot_out_name=str(plot_path/plot_out_name),
custom_layout=custom_layout);
Output retrieved from file. Skipping computation.
Figure 3 (b) Runs’ metric data and corresponding KPI value.
KPI_tput = {'percentile': 25,
'confidence': 75,
'name': 'Average Throughput',
'unit': 'Mbit/s',
'bounds':[0,120], # expected value range
'tag':'throughput' # do not change the tag
}
KPI_delay = {'percentile': 75,
'confidence': 75,
'name': 'One-way delay',
'unit': 'ms',
'bounds':[0,100], # expected value range
'tag':'delay' # do not change the tag
}
score_delay = {'percentile': 75,
'confidence': 75,
'name': '95th perc. of One-way delay',
'unit': 'ms',
'bounds':[0,100], # expected value range
'tag':'delay' # do not change the tag
}
# Compute the KPIs
kpis = pantheon.compute_kpi(metrics,
[KPI_tput, KPI_delay],
'datetime',
plot=False,
verbose=False)
plot_path = Path('plots')
plot_out_name = "plot_example_var_score.pdf"
kpi_data = np.array(kpis.loc[(kpis['cc'] == 'fillp')].delay_value)
note = go.layout.Annotation(
x=0.5,
y=0,
xref="paper",
yref="y",
text="Var. Score: 1.66 ms",
showarrow=False,
font={'size':16 }
)
custom_layout = {
"title":None,
"width":300,
"height":100,
"margin":dict(l=0, r=0, t=5, b=40),
"showlegend": False,
# "legend":{'x':.1, 'y':.1},
# "xaxis":{'title':{'font':{'size':16},
# 'text':'Time [ s ]'}
# },
"yaxis":{'title':{'font':{'size':16}},
# 'text':'Time [ s ]'},
# 'visible' : True
},
"xaxis":{'title':{'font':{'size':16},
'text':'One-way delay KPIs [ms]'}
},
"annotations":[note]
}
triscale.analysis_variability(kpi_data,
score_delay,
to_plot=['horizontal'],
plot_out_name=str(plot_path/plot_out_name),
custom_layout=custom_layout);
Figure 3 (c) Series’ KPI data and corresponding variability score.
data_file = Path('UseCase_Glossy/Data_FlockLab/2019-08_FlockLab_sky.csv')
df = flocklab.parse_data_file(str(data_file), active_link_threshold=50)
plot_path = Path('plots')
plot_name = 'plot_flocklab_autocorr.pdf'
link_quality_bounds = [0,100]
link_quality_name = 'PRR [%]'
fig_theil, fig_autocorr = triscale.network_profiling(df, link_quality_bounds, link_quality_name)
custom_layout = {
'title':None,
"width":500,
"height":250,
"margin":dict(l=40, r=0, t=0, b=40),
"legend":{'x':.9, 'y':.9,'font':{'size':18}},
"xaxis":{'title':{'font':{'size':20},
'text':'Lag'}
},
}
fig_autocorr.update_layout(custom_layout)
fig_autocorr.write_image(str(plot_path/plot_name))
fig_autocorr.show()
Figure 4 Autocorrelation plot for the wireless link quality on FlockLab, based on the raw data collected by the testbed maintainers (data from August 2019). The dataset con-tains one test every two hours. The first peak at lag 12 (i.e.,24h) reveals the daily seasonal component. The data also show a second main peak at lag 84; which corresponds to oneweek. Indeed, there is less interference in the weekends than on weekdays, which creates a weekly seasonal component.
metric_tput = {'name':'Avg. Throughput',
'unit': 'Mbit/s',
'measure':50,
'bounds':[0,120], # expected value range
'tag':'throughput' # do not change the tag
}
convergence = {'expected': True,
'confidence': 95, # in %
'tolerance': 5, # in %
}
result_dir_path = Path('UseCase_Pantheon/PantheonData/10_20_30_40_50_60s')
result_dir_list = [x for x in result_dir_path.iterdir() if x.is_dir()]
meta_data_file = 'pantheon_metadata.json'
plot_path = Path('plots')
sample = {'cc':'ledbat',
'run':1}
custom_layout = {
'title':None,
"width":500,
"height":300,
"margin":dict(l=40, r=0, t=0, b=40),
"legend":{'x':.1, 'y':.9,'font':{'size':18}},
"xaxis":{'title':{'font':{'size':20},
'text':'Time [ s ]'}
},
"yaxis":{'title':{'font':{'size':20},
'text':metric_tput['name'] +' [ '+ metric_tput['unit'] +' ]'}
},
}
runtimes = [10,60,20,50,40,30]
for i in [0,2,5,4,3,1]:
if i != 0:
custom_layout.update({"showlegend":False})
plot_out_name = "plot_ledbat_%s_runtime.pdf" % runtimes[i]
metrics_design, figure = pantheon.compute_metric( [result_dir_list[i]],
meta_data_file,
convergence,
[metric_tput],
plot=True,
showplot=False,
layout=custom_layout,
plot_out_name=str(plot_path/plot_out_name),
verbose=False,
sample=sample)
figure.show()
Figure 5 Egress throughput of LEDBAT in MahiMahi, calibrated to the real path from AWS California to Mexico. A runtime of 30s is clearly not sufficient for LEDBAT’s throughput to converge. The scheme does converge eventually,but even with 60s runtime, TriScale’s convergence test fails: the impact of the start-up phase is too important. Two possible solutions are (i) to increase the runtime or (ii) to prun the start-up time in the raw data.
result_dir_path = Path('UseCase_Pantheon/PantheonData/10runs_30s')
result_dir_list = [x for x in result_dir_path.iterdir() if x.is_dir()]
meta_data_file = 'pantheon_metadata.json'
config_file = Path('UseCase_Pantheon/PantheonData/config.yml')
convergence = {'expected': True,
'confidence': 95, # in %
'tolerance': 5, # in %
}
metric_tput = {'name':'Average Throughput',
'unit': 'MBit/s',
'measure':50,
'bounds':[0,120], # expected value range
'tag':'throughput' # do not change the tag
}
metric_delay = {'name':'95th perc. of One-way delay',
'unit': 'ms',
'measure':95,
'bounds':[0,100], # expected value range
'tag':'delay' # do not change the tag
}
metric_list = [metric_tput, metric_delay]
out_name = Path('UseCase_Pantheon/PantheonData/metrics.csv')
plot_path = Path('plots')
metrics = pantheon.compute_metric(result_dir_list,
meta_data_file,
convergence,
metric_list,
out_name=out_name,
force_computation=False,
plot=False,
verbose=False)
custom_layout = {
'title':None,
"width":400,
"height":150,
"margin":dict(l=25, r=0, t=0, b=0),
"legend":{'x':.9, 'y':1.2,'font':{'size':16}},
"xaxis" :None
}
# Passed test
plot_out_name = "plot_webrtc_autocorr_passed.pdf"
sample = metrics.loc[(metrics['cc'] == 'webrtc') & (metrics['datetime'] == '2019-08-22T07:59:10:+0200')]
plot_passed = triplots.autocorr_plot( sample.throughput_value.values,
layout=custom_layout,
out_name=str(plot_path/plot_out_name))
# # Failed test
plot_out_name = "plot_webrtc_autocorr_failed.pdf"
custom_layout.update({
'showlegend':False,
"xaxis":{'title':{'font':{'size':16},
'text':'Lag'}},
"margin":dict(l=25, r=0, t=0, b=40)})
sample = metrics.loc[(metrics['cc'] == 'webrtc') & (metrics['datetime'] == '2019-08-21T12:14:13:+0200')]
plot_failed = triplots.autocorr_plot( sample.throughput_value.values,
layout=custom_layout,
out_name=str(plot_path/plot_out_name))
Output retrieved from file. Skipping computation.
Figure 6 Autocorrelation coefficient for two exemplary series of WebRTC. The upper series passes the autocorrelation test, whereas the lower series does not: this is an artifact induced by the small number of samples (ten in this case).
result_dir_path = Path('UseCase_Pantheon/PantheonData/10runs_30s')
result_dir_list = [x for x in result_dir_path.iterdir() if x.is_dir()]
meta_data_file = 'pantheon_metadata.json'
config_file = Path('UseCase_Pantheon/PantheonData/config.yml')
convergence = {'expected': True,
'confidence': 95, # in %
'tolerance': 5, # in %
}
metric_tput = {'name':'Average Throughput',
'unit': 'MBit/s',
'measure':50,
'bounds':[0,120], # expected value range
'tag':'throughput' # do not change the tag
}
metric_delay = {'name':'95th perc. of One-way delay',
'unit': 'ms',
'measure':95,
'bounds':[0,100], # expected value range
'tag':'delay' # do not change the tag
}
KPI_tput = {'percentile': 25,
'confidence': 75,
'name': 'Average Throughput',
'unit': 'MBit/s',
'bounds':[0,120], # expected value range
'tag':'throughput' # do not change the tag
}
KPI_delay = {'percentile': 75,
'confidence': 75,
'name': '95th perc. of One-way delay',
'unit': 'ms',
'bounds':[0,100], # expected value range
'tag':'delay' # do not change the tag
}
score_tput = {'percentile': 75,
'confidence': 75,
'name': 'Throughput',
'unit': 'MBit/s',
'bounds':[0,120], # expected value range
'tag':'throughput' # do not change the tag
}
score_delay = {'percentile': 75,
'confidence': 75,
'name': 'One-way delay',
'unit': 'ms',
'bounds':[0,100], # expected value range
'tag':'delay' # do not change the tag
}
out_name = Path('UseCase_Pantheon/PantheonData/metrics.csv')
plot_path = Path('plots')
metric_list = [metric_tput, metric_delay]
kpi_list = [ KPI_tput, KPI_delay ]
score_list = [ score_tput, score_delay ]
metrics = pantheon.compute_metric(result_dir_list,
meta_data_file,
convergence,
metric_list,
out_name=out_name,
force_computation=False,
plot=False,
verbose=False)
KPIs = pantheon.compute_kpi(metrics,
kpi_list,
series='datetime',
plot=False,
verbose=False)
scores = pantheon.compute_score(KPIs,
score_list,
plot=False,
verbose=False)
custom_layout = {
"width":1000,
"height":150,
"margin":dict(l=30, r=40, t=25, b=45),
# "legend":{'x':.9, 'y':1.2,'font':{'size':16}},
# "xaxis" :None
}
plot_out_name = "plot_score_matrix.pdf"
pantheon.plot_triscale_scores_matrix( scores,
score_list,
config_file,
layout = custom_layout,
out_name=str(plot_path/plot_out_name)
)
Output retrieved from file. Skipping computation.
Figure 7 Variability scores computed by TriScale for the performance dimensions throughput and delay. In this example, the variability scores are computed as the 25th to 75th percentile interval estimated with 75% confidence. From the computed variability scores, the user gets a quantification, with a 75% probability, about the range of variation in the KPI values for 50% of the series. The variability scores hence quantify reproducibility: the larger the scores, the less reproducible are the results.