In this tutorial we will show how to run a black-box attribute inference attack on a regression model. This will be demonstrated on the diabetes dataset from scikitlearn (https://scikit-learn.org/stable/datasets/toy_dataset.html#diabetes-dataset).
We start by trying to infer the 'sex' feature, which is a binary feature.
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import warnings
warnings.filterwarnings('ignore')
from art.utils import load_diabetes
(x_train, y_train), (x_test, y_test), _, _ = load_diabetes(test_set=0.5)
from sklearn.tree import DecisionTreeRegressor
from art.estimators.regression.scikitlearn import ScikitlearnRegressor
model = DecisionTreeRegressor()
model.fit(x_train, y_train)
art_regressor = ScikitlearnRegressor(model)
print('Base model score: ', model.score(x_test, y_test))
Base model score: -0.053305975661749994
import numpy as np
from art.attacks.inference.attribute_inference import AttributeInferenceBlackBox
attack_train_ratio = 0.5
attack_train_size = int(len(x_train) * attack_train_ratio)
attack_x_train = x_train[:attack_train_size]
attack_y_train = y_train[:attack_train_size]
attack_x_test = x_train[attack_train_size:]
attack_y_test = y_train[attack_train_size:]
attack_feature = 1 # sex
# get original model's predictions
attack_x_test_predictions = np.array([np.argmax(arr) for arr in art_regressor.predict(attack_x_test)]).reshape(-1,1)
# only attacked feature
attack_x_test_feature = attack_x_test[:, attack_feature].copy().reshape(-1, 1)
# training data without attacked feature
x_test_for_attack = np.delete(attack_x_test, attack_feature, 1)
bb_attack = AttributeInferenceBlackBox(art_regressor, attack_feature=attack_feature)
# train attack model
bb_attack.fit(attack_x_train)
# get inferred values
values = [-0.88085106, 1.]
inferred_train_bb = bb_attack.infer(x_test_for_attack, pred=attack_x_test_predictions, values=values)
# check accuracy
train_acc = np.sum(inferred_train_bb == np.around(attack_x_test_feature, decimals=8).reshape(1,-1)) / len(inferred_train_bb)
print(train_acc)
0.6126126126126126
This means that for 74% of the training set, the attacked feature is inferred correctly using this attack. Now let's check the precision and recall:
def calc_precision_recall(predicted, actual, positive_value=1):
score = 0 # both predicted and actual are positive
num_positive_predicted = 0 # predicted positive
num_positive_actual = 0 # actual positive
for i in range(len(predicted)):
if predicted[i] == positive_value:
num_positive_predicted += 1
if actual[i] == positive_value:
num_positive_actual += 1
if predicted[i] == actual[i]:
if predicted[i] == positive_value:
score += 1
if num_positive_predicted == 0:
precision = 1
else:
precision = score / num_positive_predicted # the fraction of predicted “Yes” responses that are correct
if num_positive_actual == 0:
recall = 1
else:
recall = score / num_positive_actual # the fraction of “Yes” responses that are predicted correctly
return precision, recall
print(calc_precision_recall(inferred_train_bb, np.around(attack_x_test_feature, decimals=8), positive_value=1.))
(0.5816326530612245, 0.9661016949152542)
To verify the significance of these results, we now run a baseline attack that uses only the remaining features to try to predict the value of the attacked feature, with no use of the model itself.
from art.attacks.inference.attribute_inference import AttributeInferenceBaseline
baseline_attack = AttributeInferenceBaseline(attack_feature=attack_feature)
# train attack model
baseline_attack.fit(attack_x_train)
# infer values
inferred_train_baseline = baseline_attack.infer(x_test_for_attack, values=values)
# check accuracy
baseline_train_acc = np.sum(inferred_train_baseline == np.around(attack_x_test_feature, decimals=8).reshape(1,-1)) / len(inferred_train_baseline)
print(baseline_train_acc)
0.6666666666666666
In this case, the black-box attack does significantly better than the baseline.
Now we will try to infer the bmi level feature.
attack_feature = 3 # bmi
# only attacked feature
attack_x_test_feature = attack_x_test[:, attack_feature].copy().reshape(-1, 1)
# training data without attacked feature
x_test_for_attack = np.delete(attack_x_test, attack_feature, 1)
bb_attack = AttributeInferenceBlackBox(art_regressor, attack_feature=attack_feature)
# train attack model
bb_attack.fit(attack_x_train)
inferred_train_bb = bb_attack.infer(x_test_for_attack, pred=attack_x_test_predictions)
# check MSE
train_acc = np.sum((attack_x_test_feature - inferred_train_bb) ** 2) / len(inferred_train_bb)
print(train_acc)
54.80737471036833
baseline_attack = AttributeInferenceBaseline(attack_feature=attack_feature)
# train attack model
baseline_attack.fit(attack_x_train)
# infer values
inferred_train_baseline = baseline_attack.infer(x_test_for_attack)
# check MSE
baseline_train_acc = np.sum((attack_x_test_feature - inferred_train_baseline) ** 2) / len(inferred_train_baseline)
print(baseline_train_acc)
67.66769489356126
The attack succeeds better than the baseline (a lower MSE means higher accuracy).