@@ -6,6 +6,7 @@ | |||
"mindarmour/setup.py" "missing-docstring" | |||
"mindarmour/setup.py" "invalid-name" | |||
"mindarmour/mindarmour/reliability/model_fault_injection/fault_injection.py" "protected-access" | |||
"mindarmour/setup.py" "unused-argument" | |||
# Tests | |||
"mindarmour/tests/st" "missing-docstring" | |||
@@ -1,2 +1,2 @@ | |||
mindspore: | |||
'mindspore/mindspore/version/202202/20220226/master_20220226002452_09f114e52ef6ebdefb1de4477e035b771b61f5b6/' | |||
'mindspore/mindspore/daily/202203/20220320/master_20220320041531_3e442945369de2d9dd20e9e2e9d3c7524a128ee7_newest/' |
@@ -15,7 +15,7 @@ | |||
MindArmour, a tool box of MindSpore to enhance model trustworthiness and achieve | |||
privacy-preserving machine learning. | |||
""" | |||
from .adv_robustness.attacks import Attack | |||
from .adv_robustness.attacks.attack import Attack | |||
from .adv_robustness.attacks.black.black_model import BlackModel | |||
from .adv_robustness.defenses.defense import Defense | |||
from .adv_robustness.detectors.detector import Detector | |||
@@ -15,8 +15,10 @@ | |||
This module includes classical black-box and white-box attack algorithms | |||
in making adversarial examples. | |||
""" | |||
from .gradient_method import * | |||
from .iterative_gradient_method import * | |||
from .gradient_method import FastGradientMethod, FastGradientSignMethod, RandomFastGradientMethod, \ | |||
RandomFastGradientSignMethod, LeastLikelyClassMethod, RandomLeastLikelyClassMethod | |||
from .iterative_gradient_method import IterativeGradientMethod, BasicIterativeMethod, MomentumIterativeMethod, \ | |||
ProjectedGradientDescent, DiverseInputIterativeMethod, MomentumDiverseInputIterativeMethod | |||
from .deep_fool import DeepFool | |||
from .jsma import JSMAAttack | |||
from .carlini_wagner import CarliniWagnerL2Attack | |||
@@ -178,13 +178,12 @@ class Attack: | |||
best_position = check_numpy_param('best_position', best_position) | |||
x_ori, best_position = check_equal_shape('x_ori', x_ori, 'best_position', best_position) | |||
_, original_num = self._detection_scores((best_position,) + auxiliary_inputs, gt_boxes, gt_labels, model) | |||
# pylint: disable=invalid-name | |||
REDUCTION_ITERS = 6 # recover 10% difference each time and recover 60% totally. | |||
for _ in range(REDUCTION_ITERS): | |||
BLOCK_NUM = 30 # divide the image into 30 segments | |||
block_width = best_position.shape[0] // BLOCK_NUM | |||
reduction_iters = 6 # recover 10% difference each time and recover 60% totally. | |||
for _ in range(reduction_iters): | |||
block_num = 30 # divide the image into 30 segments | |||
block_width = best_position.shape[0] // block_num | |||
if block_width > 0: | |||
for i in range(BLOCK_NUM): | |||
for i in range(block_num): | |||
diff = x_ori[i*block_width: (i+1)*block_width, :, :]\ | |||
- best_position[i*block_width:(i+1)*block_width, :, :] | |||
if np.max(np.abs(diff)) >= 0.1*(self._bounds[1] - self._bounds[0]): | |||
@@ -125,20 +125,14 @@ class CarliniWagnerL2Attack(Attack): | |||
self._num_classes = check_int_positive('num_classes', num_classes) | |||
self._min = check_param_type('box_min', box_min, float) | |||
self._max = check_param_type('box_max', box_max, float) | |||
self._bin_search_steps = check_int_positive('search_steps', | |||
bin_search_steps) | |||
self._max_iterations = check_int_positive('max_iterations', | |||
max_iterations) | |||
self._confidence = check_param_multi_types('confidence', confidence, | |||
[int, float]) | |||
self._learning_rate = check_value_positive('learning_rate', | |||
learning_rate) | |||
self._initial_const = check_value_positive('initial_const', | |||
initial_const) | |||
self._bin_search_steps = check_int_positive('search_steps', bin_search_steps) | |||
self._max_iterations = check_int_positive('max_iterations', max_iterations) | |||
self._confidence = check_param_multi_types('confidence', confidence, [int, float]) | |||
self._learning_rate = check_value_positive('learning_rate', learning_rate) | |||
self._initial_const = check_value_positive('initial_const', initial_const) | |||
self._abort_early = check_param_type('abort_early', abort_early, bool) | |||
self._fast = check_param_type('fast', fast, bool) | |||
self._abort_early_check_ratio = check_value_positive('abort_early_check_ratio', | |||
abort_early_check_ratio) | |||
self._abort_early_check_ratio = check_value_positive('abort_early_check_ratio', abort_early_check_ratio) | |||
self._targeted = check_param_type('targeted', targeted, bool) | |||
self._net_grad = GradWrap(self._network) | |||
self._sparse = check_param_type('sparse', sparse, bool) | |||
@@ -154,10 +148,8 @@ class CarliniWagnerL2Attack(Attack): | |||
new_x (numpy.ndarray): Adversarial examples. | |||
org_x (numpy.ndarray): Original benign input samples. | |||
org_or_target_class (numpy.ndarray): Original/target labels. | |||
constant (float): A trade-off constant to use to balance loss | |||
and perturbation norm. | |||
confidence (float): Confidence level of the output of adversarial | |||
examples. | |||
constant (float): A trade-off constant to use to balance loss and perturbation norm. | |||
confidence (float): Confidence level of the output of adversarial examples. | |||
Returns: | |||
numpy.ndarray, norm of perturbation, sum of the loss and the | |||
@@ -183,7 +175,7 @@ class CarliniWagnerL2Attack(Attack): | |||
other_class_index = _best_logits_of_other_class( | |||
logits, org_or_target_class, value=np.inf) | |||
loss1 = np.sum((new_x - org_x)**2, | |||
loss1 = np.sum((new_x - org_x) ** 2, | |||
axis=tuple(range(len(new_x.shape))[1:])) | |||
loss2 = np.zeros_like(loss1, dtype=self._dtype) | |||
loss2_grade = np.zeros_like(new_x, dtype=self._dtype) | |||
@@ -193,16 +185,16 @@ class CarliniWagnerL2Attack(Attack): | |||
loss2[i] = max(0, logits[i][other_class_index[i]] | |||
- logits[i][org_or_target_class[i]] | |||
+ confidence) | |||
loss2_grade[i] = constant[i]*(jaco_grad[other_class_index[ | |||
loss2_grade[i] = constant[i] * (jaco_grad[other_class_index[ | |||
i]][i] - jaco_grad[org_or_target_class[i]][i]) | |||
else: | |||
for i in range(org_or_target_class.shape[0]): | |||
loss2[i] = max(0, logits[i][org_or_target_class[i]] | |||
- logits[i][other_class_index[i]] + confidence) | |||
loss2_grade[i] = constant[i]*(jaco_grad[org_or_target_class[ | |||
loss2_grade[i] = constant[i] * (jaco_grad[org_or_target_class[ | |||
i]][i] - jaco_grad[other_class_index[i]][i]) | |||
total_loss = loss1 + constant*loss2 | |||
loss1_grade = 2*(new_x - org_x) | |||
total_loss = loss1 + constant * loss2 | |||
loss1_grade = 2 * (new_x - org_x) | |||
for i in range(org_or_target_class.shape[0]): | |||
if loss2[i] < 0: | |||
msg = 'loss value should greater than or equal to 0, ' \ | |||
@@ -233,7 +225,7 @@ class CarliniWagnerL2Attack(Attack): | |||
mean = (self._min + self._max) / 2 | |||
diff = (self._max - self._min) / 2 | |||
inputs = (inputs - mean) / diff | |||
inputs = inputs*0.999999 | |||
inputs = inputs * 0.999999 | |||
return np.arctanh(inputs) | |||
def _to_model_space(self, inputs): | |||
@@ -257,8 +249,8 @@ class CarliniWagnerL2Attack(Attack): | |||
the_grad = 1 - np.square(inputs) | |||
mean = (self._min + self._max) / 2 | |||
diff = (self._max - self._min) / 2 | |||
inputs = inputs*diff + mean | |||
the_grad = the_grad*diff | |||
inputs = inputs * diff + mean | |||
the_grad = the_grad * diff | |||
return inputs, the_grad | |||
def _check_success(self, logits, labels): | |||
@@ -292,35 +284,30 @@ class CarliniWagnerL2Attack(Attack): | |||
reconstructed_original, _ = self._to_model_space(att_original) | |||
# find an adversarial sample | |||
const = np.ones_like(labels, dtype=self._dtype)*self._initial_const | |||
const = np.ones_like(labels, dtype=self._dtype) * self._initial_const | |||
lower_bound = np.zeros_like(labels, dtype=self._dtype) | |||
upper_bound = np.ones_like(labels, dtype=self._dtype)*np.inf | |||
upper_bound = np.ones_like(labels, dtype=self._dtype) * np.inf | |||
adversarial_res = inputs.copy() | |||
adversarial_loss = np.ones_like(labels, dtype=self._dtype)*np.inf | |||
adversarial_loss = np.ones_like(labels, dtype=self._dtype) * np.inf | |||
samples_num = labels.shape[0] | |||
adv_flag = np.zeros_like(labels) | |||
for binary_search_step in range(self._bin_search_steps): | |||
if (binary_search_step == self._bin_search_steps - 1) and \ | |||
(self._bin_search_steps >= 10): | |||
if (binary_search_step == self._bin_search_steps - 1) and (self._bin_search_steps >= 10): | |||
const = min(1e10, upper_bound) | |||
LOGGER.debug(TAG, | |||
'starting optimization with const = %s', | |||
str(const)) | |||
LOGGER.debug(TAG, 'starting optimization with const = %s', str(const)) | |||
att_perturbation = np.zeros_like(att_original, dtype=self._dtype) | |||
loss_at_previous_check = np.ones_like(labels, dtype=self._dtype)*np.inf | |||
loss_at_previous_check = np.ones_like(labels, dtype=self._dtype) * np.inf | |||
# create a new optimizer to minimize the perturbation | |||
optimizer = _AdamOptimizer(att_perturbation.shape) | |||
for iteration in range(self._max_iterations): | |||
x_input, dxdp = self._to_model_space( | |||
att_original + att_perturbation) | |||
x_input, dxdp = self._to_model_space(att_original + att_perturbation) | |||
logits = self._network(Tensor(x_input)).asnumpy() | |||
current_l2_loss, current_loss, dldx = self._loss_function( | |||
logits, x_input, reconstructed_original, | |||
labels, const, self._confidence) | |||
current_l2_loss, current_loss, dldx = self._loss_function(logits, x_input, reconstructed_original, | |||
labels, const, self._confidence) | |||
is_adv = self._check_success(logits, labels) | |||
@@ -334,58 +321,51 @@ class CarliniWagnerL2Attack(Attack): | |||
if np.all(adv_flag): | |||
if self._fast: | |||
LOGGER.debug(TAG, "succeed find adversarial examples.") | |||
msg = 'iteration: {}, logits_att: {}, ' \ | |||
'loss: {}, l2_dist: {}' \ | |||
.format(iteration, | |||
np.argmax(logits, axis=1), | |||
current_loss, current_l2_loss) | |||
msg = 'iteration: {}, logits_att: {}, loss: {}, l2_dist: {}' \ | |||
.format(iteration, np.argmax(logits, axis=1), current_loss, current_l2_loss) | |||
LOGGER.debug(TAG, msg) | |||
return adversarial_res | |||
dldx, inputs = check_equal_shape('dldx', dldx, 'inputs', inputs) | |||
gradient = dldx*dxdp | |||
att_perturbation += \ | |||
optimizer(gradient, self._learning_rate) | |||
gradient = dldx * dxdp | |||
att_perturbation += optimizer(gradient, self._learning_rate) | |||
# check if should stop iteration early | |||
flag = True | |||
iter_check = iteration % (np.ceil( | |||
self._max_iterations*self._abort_early_check_ratio)) | |||
self._max_iterations * self._abort_early_check_ratio)) | |||
if self._abort_early and iter_check == 0: | |||
# check progress | |||
for i in range(inputs.shape[0]): | |||
if current_loss[i] <= .9999*loss_at_previous_check[i]: | |||
if current_loss[i] <= .9999 * loss_at_previous_check[i]: | |||
flag = False | |||
# stop Adam if all samples has no progress | |||
if flag: | |||
LOGGER.debug(TAG, | |||
'step:%d, no progress yet, stop iteration', | |||
binary_search_step) | |||
LOGGER.debug(TAG, 'step:%d, no progress yet, stop iteration', binary_search_step) | |||
break | |||
loss_at_previous_check = current_loss | |||
for i in range(samples_num): | |||
# update bound based on search result | |||
if adv_flag[i]: | |||
LOGGER.debug(TAG, | |||
'example %d, found adversarial with const=%f', | |||
i, const[i]) | |||
upper_bound[i] = const[i] | |||
else: | |||
LOGGER.debug(TAG, | |||
'example %d, failed to find adversarial' | |||
' with const=%f', | |||
i, const[i]) | |||
lower_bound[i] = const[i] | |||
if upper_bound[i] == np.inf: | |||
const[i] *= 10 | |||
else: | |||
const[i] = (lower_bound[i] + upper_bound[i]) / 2 | |||
upper_bound, lower_bound, const = self._update_bounds(samples_num, adv_flag, const, upper_bound, | |||
lower_bound) | |||
return adversarial_res | |||
def _update_bounds(self, samples_num, adv_flag, const, upper_bound, lower_bound): | |||
"""update bound based on search result""" | |||
for i in range(samples_num): | |||
if adv_flag[i]: | |||
LOGGER.debug(TAG, 'example %d, found adversarial with const=%f', i, const[i]) | |||
upper_bound[i] = const[i] | |||
else: | |||
LOGGER.debug(TAG, 'example %d, failed to find adversarial with const=%f', i, const[i]) | |||
lower_bound[i] = const[i] | |||
if upper_bound[i] == np.inf: | |||
const[i] *= 10 | |||
else: | |||
const[i] = (lower_bound[i] + upper_bound[i]) / 2 | |||
return upper_bound, lower_bound, const | |||
class _AdamOptimizer: | |||
""" | |||
@@ -428,8 +408,8 @@ class _AdamOptimizer: | |||
""" | |||
gradient = check_numpy_param('gradient', gradient) | |||
self._t += 1 | |||
self._m = beta1*self._m + (1 - beta1)*gradient | |||
self._v = beta2*self._v + (1 - beta2)*gradient**2 | |||
alpha = learning_rate*np.sqrt(1 - beta2**self._t) / (1 - beta1**self._t) | |||
pertur = -alpha*self._m / (np.sqrt(self._v) + epsilon) | |||
self._m = beta1 * self._m + (1 - beta1) * gradient | |||
self._v = beta2 * self._v + (1 - beta2) * gradient ** 2 | |||
alpha = learning_rate * np.sqrt(1 - beta2 ** self._t) / (1 - beta1 ** self._t) | |||
pertur = -alpha * self._m / (np.sqrt(self._v) + epsilon) | |||
return pertur |
@@ -253,16 +253,7 @@ class DeepFool(Attack): | |||
if diff_w_k < diff_w: | |||
diff_w = diff_w_k | |||
weight = w_k | |||
if self._norm_level == 2 or self._norm_level == '2': | |||
r_i = diff_w*weight / (np.linalg.norm(weight) + 1e-8) | |||
elif self._norm_level == np.inf or self._norm_level == 'inf': | |||
r_i = diff_w*np.sign(weight) \ | |||
/ (np.linalg.norm(weight, ord=1) + 1e-8) | |||
else: | |||
msg = 'ord {} is not available in normalization,' \ | |||
.format(str(self._norm_level)) | |||
LOGGER.error(TAG, msg) | |||
raise NotImplementedError(msg) | |||
r_i = self._normalize_r_i(diff_w, weight) | |||
r_tot[idx, ...] = r_tot[idx, ...] + r_i | |||
images = self._update_image(x_origin, r_tot) | |||
@@ -311,16 +302,7 @@ class DeepFool(Attack): | |||
diff_w = diff_w_k | |||
weight = w_k | |||
if self._norm_level == 2 or self._norm_level == '2': | |||
r_i = diff_w*weight / (np.linalg.norm(weight) + 1e-8) | |||
elif self._norm_level == np.inf or self._norm_level == 'inf': | |||
r_i = diff_w*np.sign(weight) \ | |||
/ (np.linalg.norm(weight, ord=1) + 1e-8) | |||
else: | |||
msg = 'ord {} is not available in normalization.' \ | |||
.format(str(self._norm_level)) | |||
LOGGER.error(TAG, msg) | |||
raise NotImplementedError(msg) | |||
r_i = self._normalize_r_i(diff_w, weight) | |||
r_tot[idx, ...] = r_tot[idx, ...] + r_i | |||
if self._bounds is not None: | |||
@@ -337,3 +319,16 @@ class DeepFool(Attack): | |||
inputs = inputs.astype(inputs_dtype) | |||
del preds, grads | |||
return inputs | |||
def _normalize_r_i(self, diff_w, weight): | |||
"""normalize r_i used to update r_tot""" | |||
if self._norm_level == 2 or self._norm_level == '2': | |||
r_i = diff_w * weight / (np.linalg.norm(weight) + 1e-8) | |||
elif self._norm_level == np.inf or self._norm_level == 'inf': | |||
r_i = diff_w * np.sign(weight) / (np.linalg.norm(weight, ord=1) + 1e-8) | |||
else: | |||
msg = 'ord {} is not available in normalization,'.format(str(self._norm_level)) | |||
LOGGER.error(TAG, msg) | |||
raise NotImplementedError(msg) | |||
return r_i |
@@ -160,9 +160,7 @@ class SimilarityDetector(Detector): | |||
distance_mat = np.sort(distance_mat, axis=-1) | |||
distances.append(distance_mat[:, :self._max_k_neighbor]) | |||
# the rest | |||
distance_mat = _pairwise_distances(x_input=data[(data.shape[0] // | |||
self._chunk_size)* | |||
self._chunk_size:, :], | |||
distance_mat = _pairwise_distances(x_input=data[(data.shape[0] // self._chunk_size) * self._chunk_size:, :], | |||
y_input=data) | |||
distance_mat = np.sort(distance_mat, axis=-1) | |||
distances.append(distance_mat[:, :self._max_k_neighbor]) | |||
@@ -450,7 +450,6 @@ class Fuzzer: | |||
for mutate in mutate_config: | |||
method = mutate['method'] | |||
if method not in self._attacks_list: | |||
# mutates[method] = self._strategies[method]() | |||
mutates[method] = self._strategies[method] | |||
else: | |||
network = self._target_model._network | |||
@@ -120,9 +120,11 @@ class _NaturalPerturb: | |||
pass | |||
def _check(self, image): | |||
""" Check image format. If input image is RGB and its shape | |||
""" | |||
Check image format. If input image is RGB and its shape | |||
is (C, H, W), it will be transposed to (H, W, C). If the value | |||
of the image is not normalized , it will be rescaled between 0 to 255.""" | |||
of the image is not normalized , it will be rescaled between 0 to 255. | |||
""" | |||
rgb = _is_rgb(image) | |||
chw = False | |||
gray3dim = False | |||
@@ -131,14 +133,10 @@ class _NaturalPerturb: | |||
chw = _is_chw(image) | |||
if chw: | |||
image = _chw_to_hwc(image) | |||
else: | |||
image = image | |||
else: | |||
if len(np.shape(image)) == 3: | |||
gray3dim = True | |||
image = image[0] | |||
else: | |||
image = image | |||
if normalized: | |||
image = image * 255 | |||
return rgb, chw, normalized, gray3dim, np.uint8(image) | |||
@@ -36,7 +36,7 @@ _reciprocal = P.Reciprocal() | |||
@_grad_scale.register("Tensor", "Tensor") | |||
def tensor_grad_scale(scale, grad): | |||
""" grad scaling """ | |||
return grad*_reciprocal(scale) | |||
return grad * _reciprocal(scale) | |||
class _TupleAdd(nn.Cell): | |||
@@ -141,12 +141,11 @@ class DPOptimizerClassFactory: | |||
self._mech_param_updater = None | |||
if self._mech is not None and self._mech._decay_policy is not None: | |||
self._mech_param_updater = _MechanismsParamsUpdater(decay_policy=self._mech._decay_policy, | |||
decay_rate=self._mech._noise_decay_rate, | |||
cur_noise_multiplier= | |||
self._mech._noise_multiplier, | |||
init_noise_multiplier= | |||
self._mech._initial_noise_multiplier) | |||
self._mech_param_updater = _MechanismsParamsUpdater( | |||
decay_policy=self._mech._decay_policy, | |||
decay_rate=self._mech._noise_decay_rate, | |||
cur_noise_multiplier=self._mech._noise_multiplier, | |||
init_noise_multiplier=self._mech._initial_noise_multiplier) | |||
def construct(self, gradients): | |||
""" | |||
@@ -62,7 +62,7 @@ _reciprocal = P.Reciprocal() | |||
@_grad_scale.register("Tensor", "Tensor") | |||
def tensor_grad_scale(scale, grad): | |||
""" grad scaling """ | |||
return grad*F.cast(_reciprocal(scale), F.dtype(grad)) | |||
return grad * F.cast(_reciprocal(scale), F.dtype(grad)) | |||
class DPModel(Model): | |||
@@ -417,10 +417,8 @@ class _TrainOneStepWithLossScaleCell(Cell): | |||
self._noise_mech_param_updater = _MechanismsParamsUpdater( | |||
decay_policy=self._noise_mech._decay_policy, | |||
decay_rate=self._noise_mech._noise_decay_rate, | |||
cur_noise_multiplier= | |||
self._noise_mech._noise_multiplier, | |||
init_noise_multiplier= | |||
self._noise_mech._initial_noise_multiplier) | |||
cur_noise_multiplier=self._noise_mech._noise_multiplier, | |||
init_noise_multiplier=self._noise_mech._initial_noise_multiplier) | |||
def construct(self, data, label, sens=None): | |||
""" | |||
@@ -444,8 +442,8 @@ class _TrainOneStepWithLossScaleCell(Cell): | |||
record_labels = self._split(label) | |||
# first index | |||
loss = self.network(record_datas[0], record_labels[0]) | |||
scaling_sens_filled = C.ones_like(loss)*F.cast(scaling_sens, | |||
F.dtype(loss)) | |||
scaling_sens_filled = C.ones_like(loss) * F.cast(scaling_sens, | |||
F.dtype(loss)) | |||
record_grad = self.grad(self.network, weights)(record_datas[0], | |||
record_labels[0], | |||
scaling_sens_filled) | |||
@@ -465,8 +463,8 @@ class _TrainOneStepWithLossScaleCell(Cell): | |||
total_loss = loss | |||
for i in range(1, self._micro_batches): | |||
loss = self.network(record_datas[i], record_labels[i]) | |||
scaling_sens_filled = C.ones_like(loss)*F.cast(scaling_sens, | |||
F.dtype(loss)) | |||
scaling_sens_filled = C.ones_like(loss) * F.cast(scaling_sens, | |||
F.dtype(loss)) | |||
record_grad = self.grad(self.network, weights)(record_datas[i], | |||
record_labels[i], | |||
scaling_sens_filled) | |||
@@ -31,6 +31,7 @@ def _is_positive_int(item): | |||
return False | |||
return item > 0 | |||
def _is_non_negative_int(item): | |||
"""Verify that the value is a non-negative integer.""" | |||
if not isinstance(item, int): | |||
@@ -51,6 +52,7 @@ def _is_non_negative_float(item): | |||
return False | |||
return item >= 0 | |||
def _is_range_0_1_float(item): | |||
if not isinstance(item, (int, float)): | |||
return False | |||
@@ -151,7 +153,6 @@ _VALID_CONFIG_CHECKLIST = { | |||
} | |||
def _check_config(attack_config, config_checklist): | |||
""" | |||
Verify that config_list is valid. | |||
@@ -161,7 +162,7 @@ def _check_config(attack_config, config_checklist): | |||
check_param_type("config", config, dict) | |||
if set(config.keys()) != {"params", "method"}: | |||
msg = "Keys of each config in attack_config must be {}," \ | |||
"but got {}.".format({'method', 'params'}, set(config.keys())) | |||
"but got {}.".format({'method', 'params'}, set(config.keys())) | |||
LOGGER.error(TAG, msg) | |||
raise KeyError(msg) | |||
@@ -175,7 +176,7 @@ def _check_config(attack_config, config_checklist): | |||
if not params.keys() <= config_checklist[method].keys(): | |||
msg = "Params in method {} is not accepted, the parameters " \ | |||
"that can be set are {}.".format(method, set(config_checklist[method].keys())) | |||
"that can be set are {}.".format(method, set(config_checklist[method].keys())) | |||
LOGGER.error(TAG, msg) | |||
raise KeyError(msg) | |||
@@ -199,7 +200,7 @@ def _check_config(attack_config, config_checklist): | |||
break | |||
if not flag: | |||
msg = "Setting of parmeter {} in method {} is invalid".format(param_key, method) | |||
msg = "Setting of parameter {} in method {} is invalid".format(param_key, method) | |||
raise ValueError(msg) | |||
@@ -26,11 +26,14 @@ from mindspore.nn import Cell | |||
from mindarmour.utils.logger import LogUtil | |||
from mindarmour.utils._check_param import check_int_positive, check_value_positive, \ | |||
check_value_non_negative, check_param_type | |||
LOGGER = LogUtil.get_instance() | |||
TAG = 'Suppression training.' | |||
class SuppressPrivacyFactory: | |||
""" Factory class of SuppressCtrl mechanisms""" | |||
def __init__(self): | |||
pass | |||
@@ -107,6 +110,7 @@ class SuppressPrivacyFactory: | |||
LOGGER.error(TAG, msg) | |||
raise ValueError(msg) | |||
class SuppressCtrl(Cell): | |||
""" | |||
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_suppress_privacy.html#%E5%BC%95%E5%85%A5%E6%8A%91%E5%88%B6%E9%9A%90%E7%A7%81%E8%AE%AD%E7%BB%83>`_ | |||
@@ -122,6 +126,7 @@ class SuppressCtrl(Cell): | |||
sparse_end (float): The sparsity to reach. | |||
sparse_start (Union[float, int]): The sparsity to start. | |||
""" | |||
def __init__(self, networks, mask_layers, end_epoch, batch_num, start_epoch, mask_times, lr, | |||
sparse_end, sparse_start): | |||
super(SuppressCtrl, self).__init__() | |||
@@ -137,7 +142,7 @@ class SuppressCtrl(Cell): | |||
self.weight_lower_bound = 0.005 # all network weight will be larger than this value | |||
self.sparse_vibra = 0.02 # the sparsity may have certain range of variations | |||
self.sparse_valid_max_weight = 0.02 # if max network weight is less than this value, suppress operation stop temporarily | |||
self.sparse_valid_max_weight = 0.02 # if max network weight is less than this value, operation stop temporarily | |||
self.add_noise_thd = 0.50 # if network weight is more than this value, noise is forced | |||
self.noise_volume = 0.1 # noise volume 0.1 | |||
self.base_ground_thd = 0.0000001 # if network weight is less than this value, will be considered as 0 | |||
@@ -149,72 +154,15 @@ class SuppressCtrl(Cell): | |||
self.mask_start_step = 0 # suppress operation is actually started at this step | |||
self.mask_prev_step = 0 # previous suppress operation is done at this step | |||
self.cur_sparse = 0.0 # current sparsity to which one suppress will get | |||
self.mask_all_steps = (end_epoch - start_epoch + 1)*batch_num # the amount of step contained in all suppress operation | |||
self.mask_step_interval = self.mask_all_steps/mask_times # the amount of step contaied in one suppress operation | |||
self.mask_all_steps = (end_epoch - start_epoch + 1) * batch_num # the amount of step contained in all operation | |||
self.mask_step_interval = self.mask_all_steps / mask_times # the amount of step contained in one operation | |||
self.mask_initialized = False # flag means the initialization is done | |||
self.grad_idx_map = [] | |||
if self.lr > 0.5: | |||
msg = "learning rate should not be greater than 0.5, but got {}".format(self.lr) | |||
LOGGER.error(TAG, msg) | |||
raise ValueError(msg) | |||
if self.mask_start_epoch > self.mask_end_epoch: | |||
msg = "start_epoch should not be greater than end_epoch, but got start_epoch and end_epoch are: " \ | |||
"{}, {}".format(self.mask_start_epoch, self.mask_end_epoch) | |||
LOGGER.error(TAG, msg) | |||
raise ValueError(msg) | |||
if self.mask_end_epoch > 100: | |||
msg = "The end_epoch should be smaller than 100, but got {}".format(self.mask_end_epoch) | |||
LOGGER.error(TAG, msg) | |||
raise ValueError(msg) | |||
if self.mask_step_interval <= 0: | |||
msg = "step_interval should be greater than 0, but got {}".format(self.mask_step_interval) | |||
LOGGER.error(TAG, msg) | |||
raise ValueError(msg) | |||
if self.mask_step_interval <= 10 or self.mask_step_interval >= 20: | |||
msg = "mask_interval should be greater than 10, smaller than 20, but got {}".format(self.mask_step_interval) | |||
msg += "\n Precision of trained model may be poor !!! " | |||
msg += "\n please modify epoch_start, epoch_end and batch_num !" | |||
msg += "\n mask_interval = (epoch_end-epoch_start+1)*batch_num/mask_times, batch_num = samples/batch_size" | |||
LOGGER.info(TAG, msg) | |||
if self.sparse_end >= 1.00 or self.sparse_end <= 0: | |||
msg = "sparse_end should be in range (0, 1), but got {}".format(self.sparse_end) | |||
LOGGER.error(TAG, msg) | |||
raise ValueError(msg) | |||
if self.sparse_start >= self.sparse_end: | |||
msg = "sparse_start should be smaller than sparse_end, but got sparse_start and sparse_end are: " \ | |||
"{}, {}".format(self.sparse_start, self.sparse_end) | |||
LOGGER.error(TAG, msg) | |||
raise ValueError(msg) | |||
self._check_params() | |||
if mask_layers is not None: | |||
mask_layer_id = 0 | |||
for one_mask_layer in mask_layers: | |||
if not isinstance(one_mask_layer, MaskLayerDes): | |||
msg = "mask_layers should be a list of MaskLayerDes, but got a {}".format(type(one_mask_layer)) | |||
LOGGER.error(TAG, msg) | |||
raise ValueError(msg) | |||
layer_name = one_mask_layer.layer_name | |||
mask_layer_id2 = 0 | |||
for one_mask_layer_2 in mask_layers: | |||
if mask_layer_id != mask_layer_id2 and layer_name == one_mask_layer_2.layer_name: | |||
msg = "Mask layer name should be unique, but got duplicate name: {} in mask_layer {} and {}".\ | |||
format(layer_name, mask_layer_id, mask_layer_id2) | |||
LOGGER.error(TAG, msg) | |||
raise ValueError(msg) | |||
if mask_layer_id != mask_layer_id2 and one_mask_layer.grad_idx == one_mask_layer_2.grad_idx: | |||
msg = "Grad_idx should be unique, but got duplicate idx: {} in mask_layer {} and {}".\ | |||
format(layer_name, one_mask_layer_2.layer_name, one_mask_layer.grad_idx) | |||
LOGGER.error(TAG, msg) | |||
raise ValueError(msg) | |||
mask_layer_id2 = mask_layer_id2 + 1 | |||
mask_layer_id = mask_layer_id + 1 | |||
self._check_mask_layers() | |||
if networks is not None: | |||
for layer in networks.get_parameters(expand=True): | |||
@@ -277,6 +225,71 @@ class SuppressCtrl(Cell): | |||
msg += "\nsup_privacy only support SGD optimizer" | |||
LOGGER.warn(TAG, msg) | |||
def _check_params(self): | |||
"""check parameters""" | |||
if self.lr > 0.5: | |||
msg = "learning rate should not be greater than 0.5, but got {}".format(self.lr) | |||
LOGGER.error(TAG, msg) | |||
raise ValueError(msg) | |||
if self.mask_start_epoch > self.mask_end_epoch: | |||
msg = "start_epoch should not be greater than end_epoch, but got start_epoch and end_epoch are: " \ | |||
"{}, {}".format(self.mask_start_epoch, self.mask_end_epoch) | |||
LOGGER.error(TAG, msg) | |||
raise ValueError(msg) | |||
if self.mask_end_epoch > 100: | |||
msg = "The end_epoch should be smaller than 100, but got {}".format(self.mask_end_epoch) | |||
LOGGER.error(TAG, msg) | |||
raise ValueError(msg) | |||
if self.mask_step_interval <= 0: | |||
msg = "step_interval should be greater than 0, but got {}".format(self.mask_step_interval) | |||
LOGGER.error(TAG, msg) | |||
raise ValueError(msg) | |||
if self.mask_step_interval <= 10 or self.mask_step_interval >= 20: | |||
msg = "mask_interval should be greater than 10, smaller than 20, but got {}".format(self.mask_step_interval) | |||
msg += "\n Precision of trained model may be poor !!! " | |||
msg += "\n please modify epoch_start, epoch_end and batch_num !" | |||
msg += "\n mask_interval = (epoch_end-epoch_start+1)*batch_num/mask_times, batch_num = samples/batch_size" | |||
LOGGER.info(TAG, msg) | |||
if self.sparse_end >= 1.00 or self.sparse_end <= 0: | |||
msg = "sparse_end should be in range (0, 1), but got {}".format(self.sparse_end) | |||
LOGGER.error(TAG, msg) | |||
raise ValueError(msg) | |||
if self.sparse_start >= self.sparse_end: | |||
msg = "sparse_start should be smaller than sparse_end, but got sparse_start and sparse_end are: " \ | |||
"{}, {}".format(self.sparse_start, self.sparse_end) | |||
LOGGER.error(TAG, msg) | |||
raise ValueError(msg) | |||
def _check_mask_layers(self): | |||
"""check mask layers""" | |||
mask_layer_id = 0 | |||
for one_mask_layer in self.mask_layers: | |||
if not isinstance(one_mask_layer, MaskLayerDes): | |||
msg = "mask_layers should be a list of MaskLayerDes, but got a {}".format(type(one_mask_layer)) | |||
LOGGER.error(TAG, msg) | |||
raise ValueError(msg) | |||
layer_name = one_mask_layer.layer_name | |||
mask_layer_id2 = 0 | |||
for one_mask_layer_2 in self.mask_layers: | |||
if mask_layer_id != mask_layer_id2 and layer_name == one_mask_layer_2.layer_name: | |||
msg = "Mask layer name should be unique, but got duplicate name: {} in mask_layer {} and {}". \ | |||
format(layer_name, mask_layer_id, mask_layer_id2) | |||
LOGGER.error(TAG, msg) | |||
raise ValueError(msg) | |||
if mask_layer_id != mask_layer_id2 and one_mask_layer.grad_idx == one_mask_layer_2.grad_idx: | |||
msg = "Grad_idx should be unique, but got duplicate idx: {} in mask_layer {} and {}". \ | |||
format(layer_name, one_mask_layer_2.layer_name, one_mask_layer.grad_idx) | |||
LOGGER.error(TAG, msg) | |||
raise ValueError(msg) | |||
mask_layer_id2 = mask_layer_id2 + 1 | |||
mask_layer_id = mask_layer_id + 1 | |||
def update_status(self, cur_epoch, cur_step, cur_step_in_epoch): | |||
""" | |||
Update the suppress operation status. | |||
@@ -296,7 +309,7 @@ class SuppressCtrl(Cell): | |||
self.mask_prev_step = cur_step | |||
self.to_do_mask = True | |||
# execute the last suppression operation | |||
elif cur_epoch == self.mask_end_epoch and cur_step_in_epoch == self.batch_num-2: | |||
elif cur_epoch == self.mask_end_epoch and cur_step_in_epoch == self.batch_num - 2: | |||
self.mask_prev_step = cur_step | |||
self.to_do_mask = True | |||
else: | |||
@@ -340,8 +353,8 @@ class SuppressCtrl(Cell): | |||
grad_mask_cell = self.grads_mask_list[grad_idx] | |||
last_sparse_pos = grad_mask_cell.sparse_pos_list[-1] | |||
if actual_stop_pos <= 0 or \ | |||
(actual_stop_pos < last_sparse_pos + grad_mask_cell.part_num and \ | |||
grad_mask_cell.is_approximity and m > 0): | |||
(actual_stop_pos < last_sparse_pos + grad_mask_cell.part_num and \ | |||
grad_mask_cell.is_approximity and m > 0): | |||
sparse_weight_thd = 0 | |||
msg = "{} len={}, sparse={}, current sparse thd={}, [idle] \n" \ | |||
.format(layer.name, len_array, actual_stop_pos / len_array, sparse_weight_thd) | |||
@@ -377,7 +390,7 @@ class SuppressCtrl(Cell): | |||
del partition | |||
msg = "{} len={}, sparse={}, current sparse thd={}, max={}, min={}, avg={}, avg_abs={} \n".format( | |||
layer.name, len_array, actual_stop_pos/len_array, sparse_weight_thd, | |||
layer.name, len_array, actual_stop_pos / len_array, sparse_weight_thd, | |||
weight_abs_max, weight_abs_min, weight_avg, weight_abs_avg) | |||
LOGGER.info(TAG, msg) | |||
del weight_array_flat_abs | |||
@@ -413,7 +426,7 @@ class SuppressCtrl(Cell): | |||
p = 0 | |||
q = 0 | |||
# add noise on weights if not masking or clipping. | |||
weight_noise_bound = min(self.add_noise_thd, max(self.noise_volume*10, weight_abs_max*0.75)) | |||
weight_noise_bound = min(self.add_noise_thd, max(self.noise_volume * 10, weight_abs_max * 0.75)) | |||
size = self.grads_mask_list[layer_index].para_num | |||
for i in range(0, size): | |||
if mul_mask_array_flat[i] <= 0.0: | |||
@@ -428,14 +441,14 @@ class SuppressCtrl(Cell): | |||
else: | |||
# not mask | |||
if weight_array_flat[i] > 0.0: | |||
add_mask_array_flat[i] = (weight_array_flat[i] \ | |||
add_mask_array_flat[i] = (weight_array_flat[i] | |||
- min(self.weight_lower_bound, sparse_weight_thd)) / self.lr | |||
else: | |||
add_mask_array_flat[i] = (weight_array_flat[i] | |||
+ min(self.weight_lower_bound, sparse_weight_thd)) / self.lr | |||
p = p + 1 | |||
elif is_lower_clip and abs(weight_array_flat[i]) <= \ | |||
self.weight_lower_bound and sparse_weight_thd > self.weight_lower_bound*0.5: | |||
self.weight_lower_bound and sparse_weight_thd > self.weight_lower_bound * 0.5: | |||
# not mask | |||
mul_mask_array_flat[i] = 1.0 | |||
if weight_array_flat[i] > 0.0: | |||
@@ -463,8 +476,8 @@ class SuppressCtrl(Cell): | |||
grad_mask_cell.update() | |||
de_weight_cell.update() | |||
msg = "Dimension of mask tensor is {}D, which located in the {}-th layer of the network. \n The number of " \ | |||
"suppressed elements, max-clip elements, min-clip elements and noised elements are {}, {}, {}, {}"\ | |||
.format(len(grad_mask_cell.mul_mask_array_shape), layer_index, m, n, p, q) | |||
"suppressed elements, max-clip elements, min-clip elements and noised elements are {}, {}, {}, {}" \ | |||
.format(len(grad_mask_cell.mul_mask_array_shape), layer_index, m, n, p, q) | |||
LOGGER.info(TAG, msg) | |||
grad_mask_cell.sparse_pos_list.append(m) | |||
@@ -500,8 +513,8 @@ class SuppressCtrl(Cell): | |||
for i in range(0, part_num): | |||
if split_k_num <= 0: | |||
break | |||
array_row_mul_mask = mul_mask_array_flat[i * part_size : (i + 1) * part_size] | |||
array_row_flat_abs = weight_array_flat_abs[i * part_size : (i + 1) * part_size] | |||
array_row_mul_mask = mul_mask_array_flat[i * part_size: (i + 1) * part_size] | |||
array_row_flat_abs = weight_array_flat_abs[i * part_size: (i + 1) * part_size] | |||
if not init_batch_suppress: | |||
array_row_flat_abs_masked = np.where(array_row_mul_mask <= 0.0, -1.0, array_row_flat_abs) | |||
set_abs = set(array_row_flat_abs_masked) | |||
@@ -553,7 +566,7 @@ class SuppressCtrl(Cell): | |||
split_k_num, (actual_stop_pos - last_sparse_pos), actual_stop_pos, real_suppress_num) | |||
LOGGER.info(TAG, msg) | |||
if init_batch_suppress: | |||
init_sparse_actual = real_suppress_num/para_num | |||
init_sparse_actual = real_suppress_num / para_num | |||
print("init batch suppresss, actual sparse = {}".format(init_sparse_actual)) | |||
gc.collect() | |||
@@ -660,6 +673,7 @@ class SuppressCtrl(Cell): | |||
return sparse, sparse_value_1, sparse_value_2 | |||
def calc_actual_sparse_for_fc1(self, networks): | |||
"""calculate actual sparse for full connection 1 layer""" | |||
return self.calc_actual_sparse_for_layer(networks, "fc1.weight") | |||
def calc_actual_sparse_for_layer(self, networks, layer_name): | |||
@@ -716,6 +730,7 @@ class SuppressCtrl(Cell): | |||
msg += "\nsup_privacy only support SGD optimizer" | |||
LOGGER.info(TAG, msg) | |||
def get_one_mask_layer(mask_layers, layer_name): | |||
""" | |||
Returns the layer definitions that need to be suppressed. | |||
@@ -732,6 +747,7 @@ def get_one_mask_layer(mask_layers, layer_name): | |||
return each_mask_layer | |||
return None | |||
class MaskLayerDes: | |||
""" | |||
Describe the layer that need to be suppressed. | |||
@@ -763,6 +779,7 @@ class MaskLayerDes: | |||
>>> masklayers = [] | |||
>>> masklayers.append(MaskLayerDes("conv1.weight", 0, False, True, 10)) | |||
""" | |||
def __init__(self, layer_name, grad_idx, is_add_noise, is_lower_clip, min_num, upper_bound=1.20): | |||
self.layer_name = check_param_type('layer_name', layer_name, str) | |||
check_param_type('grad_idx', grad_idx, int) | |||
@@ -773,6 +790,7 @@ class MaskLayerDes: | |||
self.upper_bound = check_value_positive('upper_bound', upper_bound) | |||
self.inited = False | |||
class GradMaskInCell(Cell): | |||
""" | |||
Define the mask matrix for gradients masking. | |||
@@ -787,6 +805,7 @@ class GradMaskInCell(Cell): | |||
If min_num is smaller than (parameter num*SupperssCtrl.sparse_end), min_num has no effect. | |||
upper_bound ([float, int]): max abs value of weight in this layer, default: 1.20. | |||
""" | |||
def __init__(self, array, is_add_noise, is_lower_clip, min_num, upper_bound=1.20): | |||
super(GradMaskInCell, self).__init__() | |||
self.mul_mask_array_shape = array.shape | |||
@@ -806,7 +825,7 @@ class GradMaskInCell(Cell): | |||
self.part_size = self.para_num | |||
self.part_num_max = 16 | |||
self.para_many_num = 10000 | |||
self.para_huge_num = 10*10000*10000 | |||
self.para_huge_num = 10 * 10000 * 10000 | |||
if self.para_num > self.para_many_num: | |||
self.is_approximity = True | |||
@@ -836,6 +855,7 @@ class GradMaskInCell(Cell): | |||
""" | |||
self.mul_mask_tensor = Tensor(self.mul_mask_array_flat.reshape(self.mul_mask_array_shape), mstype.float32) | |||
class DeWeightInCell(Cell): | |||
""" | |||
Define the mask matrix for de-weight masking. | |||
@@ -843,6 +863,7 @@ class DeWeightInCell(Cell): | |||
Args: | |||
array (numpy.ndarray): The mask array. | |||
""" | |||
def __init__(self, array): | |||
super(DeWeightInCell, self).__init__() | |||
self.add_mask_array_shape = array.shape | |||
@@ -196,7 +196,6 @@ class _TupleMul(nn.Cell): | |||
def construct(self, input1, input2): | |||
"""Add two tuple of data.""" | |||
out = self.hyper_map(self.mul, input1, input2) | |||
#print(out) | |||
return out | |||
# come from nn.cell_wrapper.TrainOneStepCell | |||
@@ -11,8 +11,6 @@ | |||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
# See the License for the specific language governing permissions and | |||
# limitations under the License. | |||
# ============================================================================ | |||
""" | |||
Out-of-Distribution detection module for images. | |||
""" | |||
@@ -33,6 +31,7 @@ class OodDetector: | |||
model (Model):The training model. | |||
ds_train (numpy.ndarray): The training dataset. | |||
""" | |||
def __init__(self, model, ds_train): | |||
self.model = model | |||
self.ds_train = check_param_type('ds_train', ds_train, np.ndarray) | |||
@@ -66,7 +65,6 @@ class OodDetector: | |||
- float, the optimal threshold. | |||
""" | |||
def ood_predict(self, threshold, ds_test): | |||
""" | |||
The out-of-distribution detection. | |||
@@ -9,8 +9,6 @@ | |||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
# See the License for the specific language governing permissions and | |||
# limitations under the License. | |||
# ============================================================================ | |||
""" | |||
Concpt drift module | |||
""" | |||
@@ -8,7 +8,6 @@ | |||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
# See the License for the specific language governing permissions and | |||
# limitations under the License. | |||
# ============================================================================ | |||
""" | |||
This module provides model fault injection to evaluate the reliability of given model. | |||
""" | |||
@@ -8,8 +8,6 @@ | |||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
# See the License for the specific language governing permissions and | |||
# limitations under the License. | |||
# ============================================================================ | |||
""" | |||
Fault injection module | |||
""" | |||
@@ -8,8 +8,6 @@ | |||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
# See the License for the specific language governing permissions and | |||
# limitations under the License. | |||
# ============================================================================ | |||
""" | |||
Fault type module | |||
""" | |||
@@ -11,6 +11,9 @@ | |||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
# See the License for the specific language governing permissions and | |||
# limitations under the License. | |||
""" | |||
setup script | |||
""" | |||
import os | |||
import stat | |||
import shlex | |||
@@ -27,7 +30,7 @@ pkg_dir = os.path.join(cur_dir, 'build') | |||
def clean(): | |||
# pylint: disable=unused-argument | |||
"""clean""" | |||
def readonly_handler(func, path, execinfo): | |||
os.chmod(path, stat.S_IWRITE) | |||
func(path) | |||
@@ -38,6 +41,7 @@ def clean(): | |||
def write_version(file): | |||
"""write version""" | |||
file.write("__version__ = '{}'\n".format(version)) | |||