from pathlib import Path
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
tests = pd.read_csv('tests.csv')
utility = pd.read_csv('utility.csv')
train = pd.read_csv('train.csv')
welfare = pd.read_csv('welfare.csv')
# Insert Revenue and Regret from test_data into train_data
train.update(tests)
# Find max utility over all misreports
util = utility.groupby(['Iter','Agent','Noise','Clip','Exp'])['Utility'].max()
# Transform to df with index/columns
util_data = pd.DataFrame(pd.DataFrame(util).to_records())
# Find max regret over all misreports
reg = utility.groupby(['Iter','Agent','Noise','Clip','Exp'])['Regret'].max()
# Transform to df with index/columns
reg_data = pd.DataFrame(pd.DataFrame(reg).to_records())
# Find min revenue over all misreports
tr = train.groupby(['Iter','Noise','Clip','Exp'])['Revenue'].min()
# Transform to df with index/columns
train_data = pd.DataFrame(pd.DataFrame(tr).to_records())
# Find max regret over all misreports
reg_s = train.groupby(['Iter','Noise','Clip','Exp'])['Regret'].max()
# Transform to df with index/columns
reg_sum_data = pd.DataFrame(pd.DataFrame(reg_s).to_records())
# Find min welfare over all misreports
wel = welfare.groupby(['Iter','Noise','Clip','Exp'])['Welfare'].min()
# Transform to df with index/columns
wel_data = pd.DataFrame(pd.DataFrame(wel).to_records())
train1 = pd.read_csv('exp1/train.csv')
train = pd.concat([train1, ...])
train.to_csv("train.csv")
w = sns.FacetGrid(wel_data, col='Noise', row='Clip', hue='Exp')
w = w.map(plt.plot, 'Iter', 'Welfare')
rev = sns.FacetGrid(train_data, col='Noise', row='Clip', hue='Exp')
rev = rev.map(plt.plot, 'Iter', 'Revenue')
reg_sum = sns.FacetGrid(reg_sum_data, col='Noise', hue='Exp')
reg_sum = reg_sum.map(plt.plot, 'Iter', 'Regret')
reg0 = sns.FacetGrid(reg_data.query('Agent == 0'), col='Noise', row='Clip', hue='Exp')
reg0 = reg0.map(plt.plot, 'Iter', 'Regret')
a = sns.FacetGrid(util_data, col='Noise', row='Agent', hue='Exp')
a = a.map(plt.plot, 'Iter', 'Utility')
In low noise settings, we have high regret and slow convergence, since the mechanism learner is not robust.
In intermediate noise settings, we get better-than-lottery welfare and revenue, as well as lower regret and faster convergence than with the non-robust learner.
In high noise settings, we approach a lottery, resulting in worse allocative welfare and revenue, but low regret.