From ab56b4d4a620c2553444368a7e6ed7f968c39b64 Mon Sep 17 00:00:00 2001 From: shu-kun-zhang Date: Fri, 21 Jan 2022 11:09:19 +0800 Subject: [PATCH] Fix code dependency issue and fix typos --- .../adv_robustness/attacks/black/genetic_attack.py | 30 +--- .../attacks/black/hop_skip_jump_attack.py | 29 +--- .../attacks/black/natural_evolutionary_strategy.py | 35 +---- .../attacks/black/pointwise_attack.py | 24 +--- .../adv_robustness/attacks/black/pso_attack.py | 52 +------ .../attacks/black/salt_and_pepper_attack.py | 23 +-- .../adv_robustness/attacks/carlini_wagner.py | 27 +--- mindarmour/adv_robustness/attacks/deep_fool.py | 30 +--- .../adv_robustness/attacks/gradient_method.py | 52 +++---- .../attacks/iterative_gradient_method.py | 157 ++++++++++++--------- mindarmour/adv_robustness/attacks/jsma.py | 23 +-- mindarmour/adv_robustness/attacks/lbfgs.py | 14 +- .../adv_robustness/defenses/adversarial_defense.py | 24 ++-- .../defenses/natural_adversarial_defense.py | 4 +- .../defenses/projected_adversarial_defense.py | 4 +- .../detectors/black/similarity_detector.py | 33 +++++ mindarmour/adv_robustness/detectors/mag_net.py | 24 +++- .../detectors/region_based_detector.py | 2 +- .../evaluations/attack_evaluation.py | 26 +--- .../evaluations/defense_evaluation.py | 6 +- .../adv_robustness/evaluations/visual_metrics.py | 2 +- 21 files changed, 238 insertions(+), 383 deletions(-) diff --git a/mindarmour/adv_robustness/attacks/black/genetic_attack.py b/mindarmour/adv_robustness/attacks/black/genetic_attack.py index 4dd32e3..69d7b0c 100644 --- a/mindarmour/adv_robustness/attacks/black/genetic_attack.py +++ b/mindarmour/adv_robustness/attacks/black/genetic_attack.py @@ -92,6 +92,12 @@ class GeneticAttack(Attack): >>> net = Net() >>> model = ModelToBeAttacked(net) >>> attack = GeneticAttack(model, sparse=False) + >>> batch_size = 6 + >>> x_test = np.random.rand(batch_size, 10) + >>> y_test = np.random.randint(low=0, high=10, size=batch_size) + >>> y_test = np.eye(10)[y_test] + >>> y_test = y_test.astype(np.float32) + >>> _, adv_data, _ = attack.generate(x_test, y_test) """ def __init__(self, model, model_type='classification', targeted=True, reserve_ratio=0.3, sparse=True, pop_size=6, mutation_rate=0.005, per_bounds=0.15, max_steps=1000, step_size=0.20, temp=0.3, @@ -235,14 +241,6 @@ class GeneticAttack(Attack): - numpy.ndarray, generated adversarial examples. - numpy.ndarray, query times for each sample. - - Examples: - >>> batch_size = 6 - >>> x_test = np.random.rand(batch_size, 10) - >>> y_test = np.random.randint(low=0, high=10, size=batch_size) - >>> y_test = np.eye(10)[y_test] - >>> y_test = y_test.astype(np.float32) - >>> _, adv_data, _ = attack._generate_classification(x_test, y_test) """ inputs, labels = check_pair_numpy_param('inputs', inputs, 'labels', labels) if self._sparse: @@ -346,14 +344,6 @@ class GeneticAttack(Attack): - numpy.ndarray, generated adversarial examples. - numpy.ndarray, query times for each sample. - - Examples: - >>> batch_size = 6 - >>> x_test = np.random.rand(batch_size, 10) - >>> y_test = np.random.randint(low=0, high=10, size=batch_size) - >>> y_test = np.eye(10)[y_test] - >>> y_test = y_test.astype(np.float32) - >>> _, adv_data, _ = attack._generate_detection(x_test, y_test) """ images, auxiliary_inputs, gt_boxes, gt_labels = check_detection_inputs(inputs, labels) adv_list = [] @@ -458,14 +448,6 @@ class GeneticAttack(Attack): - numpy.ndarray, generated adversarial examples. - numpy.ndarray, query times for each sample. - - Examples: - >>> batch_size = 6 - >>> x_test = np.random.rand(batch_size, 10) - >>> y_test = np.random.randint(low=0, high=10, size=batch_size) - >>> y_test = np.eye(10)[y_test] - >>> y_test = y_test.astype(np.float32) - >>> _, adv_data, _ = attack.generate(x_test, y_test) """ if self._model_type == 'classification': success_list, adv_data, query_time_list = self._generate_classification(inputs, labels) diff --git a/mindarmour/adv_robustness/attacks/black/hop_skip_jump_attack.py b/mindarmour/adv_robustness/attacks/black/hop_skip_jump_attack.py index 51b74af..b0038a4 100644 --- a/mindarmour/adv_robustness/attacks/black/hop_skip_jump_attack.py +++ b/mindarmour/adv_robustness/attacks/black/hop_skip_jump_attack.py @@ -92,6 +92,11 @@ class HopSkipJumpAttack(Attack): >>> net = Net() >>> model = ModelToBeAttacked(net) >>> attack = HopSkipJumpAttack(model) + >>> n, c, h, w = 1, 1, 32, 32 + >>> class_num = 3 + >>> x_test = np.asarray(np.random.random((n,c,h,w)), np.float32) + >>> y_test = np.random.randint(0, class_num, size=n) + >>> _, adv_x, _= attack.generate(x_test, y_test) """ def __init__(self, model, init_num_evals=100, max_num_evals=1000, @@ -183,30 +188,6 @@ class HopSkipJumpAttack(Attack): - numpy.ndarray, generated adversarial examples. - numpy.ndarray, query times for each sample. - - Examples: - >>> import numpy as np - >>> from mindspore import Tensor - >>> from mindarmour import BlackModel - >>> from mindarmour.adv_robustness.attacks import HopSkipJumpAttack - >>> from tests.ut.python.utils.mock_net import Net - >>> class ModelToBeAttacked(BlackModel): - ... def __init__(self, network): - ... super(ModelToBeAttacked, self).__init__() - ... self._network = network - ... def predict(self, inputs): - ... if len(inputs.shape) == 3: - ... inputs = inputs[np.newaxis, :] - ... result = self._network(Tensor(inputs.astype(np.float32))) - ... return result.asnumpy() - >>> net = Net() - >>> model = ModelToBeAttacked(net) - >>> attack = HopSkipJumpAttack(model) - >>> n, c, h, w = 1, 1, 32, 32 - >>> class_num = 3 - >>> x_test = np.asarray(np.random.random((n,c,h,w)), np.float32) - >>> y_test = np.random.randint(0, class_num, size=n) - >>> _, adv_x, _= attack.generate(x_test, y_test) """ if labels is not None: inputs, labels = check_pair_numpy_param('inputs', inputs, diff --git a/mindarmour/adv_robustness/attacks/black/natural_evolutionary_strategy.py b/mindarmour/adv_robustness/attacks/black/natural_evolutionary_strategy.py index 49780f4..bcb0596 100644 --- a/mindarmour/adv_robustness/attacks/black/natural_evolutionary_strategy.py +++ b/mindarmour/adv_robustness/attacks/black/natural_evolutionary_strategy.py @@ -98,6 +98,13 @@ class NES(Attack): >>> SCENE = 'Query_Limit' >>> TOP_K = -1 >>> attack= NES(model, SCENE, top_k=TOP_K) + >>> num_class = 5 + >>> x_test = np.asarray(np.random.random((1, 1, 32, 32)), np.float32) + >>> target_image = np.asarray(np.random.random((1, 1, 32, 32)), np.float32) + >>> orig_class = 0 + >>> target_class = 2 + >>> attack.set_target_images(target_image) + >>> tag, adv, queries = attack.generate(np.array(x_test), np.array([target_class])) """ def __init__(self, model, scene, max_queries=10000, top_k=-1, num_class=10, batch_size=128, epsilon=0.3, @@ -153,34 +160,6 @@ class NES(Attack): ValueError: If the top_k less than 0 in Label-Only or Partial-Info setting. ValueError: If the target_imgs is None in Label-Only or Partial-Info setting. ValueError: If scene is not in ['Label_Only', 'Partial_Info', 'Query_Limit'] - - Examples: - >>> import numpy as np - >>> from mindspore import Tensor - >>> from mindarmour import BlackModel - >>> from mindarmour.adv_robustness.attacks import NES - >>> from tests.ut.python.utils.mock_net import Net - >>> class ModelToBeAttacked(BlackModel): - ... def __init__(self, network): - ... super(ModelToBeAttacked, self).__init__() - ... self._network = network - ... def predict(self, inputs): - ... if len(inputs.shape) == 3: - ... inputs = inputs[np.newaxis, :] - ... result = self._network(Tensor(inputs.astype(np.float32))) - ... return result.asnumpy() - >>> net = Net() - >>> model = ModelToBeAttacked(net) - >>> SCENE = 'Query_Limit' - >>> TOP_K = -1 - >>> attack= NES(model, SCENE, top_k=TOP_K) - >>> num_class = 5 - >>> x_test = np.asarray(np.random.random((32, 32)), np.float32) - >>> target_image = np.asarray(np.random.random((32, 32)), np.float32) - >>> orig_class = 0 - >>> target_class = 2 - >>> attack.set_target_images(target_image) - >>> tag, adv, queries = attack.generate(np.array(x_test), np.array([target_class])) """ inputs, labels = check_pair_numpy_param('inputs', inputs, 'labels', labels) if not self._sparse: diff --git a/mindarmour/adv_robustness/attacks/black/pointwise_attack.py b/mindarmour/adv_robustness/attacks/black/pointwise_attack.py index d5be8b0..4c9ccc5 100644 --- a/mindarmour/adv_robustness/attacks/black/pointwise_attack.py +++ b/mindarmour/adv_robustness/attacks/black/pointwise_attack.py @@ -60,8 +60,12 @@ class PointWiseAttack(Attack): ... result = self._network(Tensor(inputs.astype(np.float32))) ... return result.asnumpy() >>> net = Net() + >>> np.random.seed(5) >>> model = ModelToBeAttacked(net) >>> attack = PointWiseAttack(model) + >>> x_test = np.asarray(np.random.random((1,1,32,32)), np.float32) + >>> y_test = np.random.randint(0, 3, size=1) + >>> is_adv_list, adv_list, query_times_each_adv = attack.generate(x_test, y_test) """ def __init__(self, model, max_iter=1000, search_iter=10, is_targeted=False, init_attack=None, sparse=True): @@ -91,26 +95,6 @@ class PointWiseAttack(Attack): - numpy.ndarray, generated adversarial examples. - numpy.ndarray, query times for each sample. - - Examples: - >>> import numpy as np - >>> from mindspore import Tensor - >>> from mindarmour import BlackModel - >>> from mindarmour.adv_robustness.attacks import PointWiseAttack - >>> from tests.ut.python.utils.mock_net import Net - >>> class ModelToBeAttacked(BlackModel): - ... def __init__(self, network): - ... super(ModelToBeAttacked, self).__init__() - ... self._network = network - ... def predict(self, inputs): - ... result = self._network(Tensor(inputs.astype(np.float32))) - ... return result.asnumpy() - >>> net = Net() - >>> model = ModelToBeAttacked(net) - >>> attack = PointWiseAttack(model) - >>> x_test = np.asarray(np.random.random((1,1,32,32)), np.float32) - >>> y_test = np.random.randint(0, 3, size=1) - >>> is_adv_list, adv_list, query_times_each_adv = attack.generate(x_test, y_test) """ arr_x, arr_y = check_pair_numpy_param('inputs', inputs, 'labels', labels) if not self._sparse: diff --git a/mindarmour/adv_robustness/attacks/black/pso_attack.py b/mindarmour/adv_robustness/attacks/black/pso_attack.py index f779318..b5ed105 100644 --- a/mindarmour/adv_robustness/attacks/black/pso_attack.py +++ b/mindarmour/adv_robustness/attacks/black/pso_attack.py @@ -83,13 +83,18 @@ class PSOAttack(Attack): ... def __init__(self): ... super(Net, self).__init__() ... self._relu = nn.ReLU() - ... ... def construct(self, inputs): ... out = self._relu(inputs) ... return out >>> net = Net() >>> model = ModelToBeAttacked(net) >>> attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, sparse=False) + >>> batch_size = 6 + >>> x_test = np.random.rand(batch_size, 10) + >>> y_test = np.random.randint(low=0, high=10, size=batch_size) + >>> y_test = np.eye(10)[y_test] + >>> y_test = y_test.astype(np.float32) + >>> _, adv_data, _ = attack.generate(x_test, y_test) """ def __init__(self, model, model_type='classification', targeted=False, reserve_ratio=0.3, sparse=True, @@ -228,17 +233,6 @@ class PSOAttack(Attack): - numpy.ndarray, generated adversarial examples. - numpy.ndarray, query times for each sample. - - Examples: - >>> net = Net() - >>> model = ModelToBeAttacked(net) - >>> attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, sparse=False) - >>> batch_size = 6 - >>> x_test = np.random.rand(batch_size, 10) - >>> y_test = np.random.randint(low=0, high=10, size=batch_size) - >>> y_test = np.eye(10)[y_test] - >>> y_test = y_test.astype(np.float32) - >>> _, adv_data, _ = attack.generate(x_test, y_test) """ # inputs check inputs, labels = check_pair_numpy_param('inputs', inputs, @@ -507,40 +501,6 @@ class PSOAttack(Attack): - numpy.ndarray, generated adversarial examples. - numpy.ndarray, query times for each sample. - - Examples: - >>> import numpy as np - >>> import mindspore.nn as nn - >>> from mindspore import Tensor - >>> from mindspore.nn import Cell - >>> from mindarmour import BlackModel - >>> from mindarmour.adv_robustness.attacks import PSOAttack - >>> class ModelToBeAttacked(BlackModel): - ... def __init__(self, network): - ... super(ModelToBeAttacked, self).__init__() - ... self._network = network - ... def predict(self, inputs): - ... if len(inputs.shape) == 1: - ... inputs = np.expand_dims(inputs, axis=0) - ... result = self._network(Tensor(inputs.astype(np.float32))) - ... return result.asnumpy() - >>> class Net(Cell): - ... def __init__(self): - ... super(Net, self).__init__() - ... self._relu = nn.ReLU() - ... - ... def construct(self, inputs): - ... out = self._relu(inputs) - ... return out - >>> net = Net() - >>> model = ModelToBeAttacked(net) - >>> attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, sparse=False) - >>> batch_size = 6 - >>> x_test = np.random.rand(batch_size, 10) - >>> y_test = np.random.randint(low=0, high=10, size=batch_size) - >>> y_test = np.eye(10)[y_test] - >>> y_test = y_test.astype(np.float32) - >>> _, adv_data, _ = attack.generate(x_test, y_test) """ # inputs check if self._model_type == 'classification': diff --git a/mindarmour/adv_robustness/attacks/black/salt_and_pepper_attack.py b/mindarmour/adv_robustness/attacks/black/salt_and_pepper_attack.py index dc7ff9f..003f638 100644 --- a/mindarmour/adv_robustness/attacks/black/salt_and_pepper_attack.py +++ b/mindarmour/adv_robustness/attacks/black/salt_and_pepper_attack.py @@ -55,6 +55,9 @@ class SaltAndPepperNoiseAttack(Attack): >>> net = Net() >>> model = ModelToBeAttacked(net) >>> attack = SaltAndPepperNoiseAttack(model) + >>> x_test = np.asarray(np.random.random((1,1,32,32)), np.float32) + >>> y_test = np.random.randint(0, 3, size=1) + >>> _, adv_list, _ = attack.generate(x_test, y_test) """ def __init__(self, model, bounds=(0.0, 1.0), max_iter=100, is_targeted=False, sparse=True): @@ -81,26 +84,6 @@ class SaltAndPepperNoiseAttack(Attack): - numpy.ndarray, generated adversarial examples. - numpy.ndarray, query times for each sample. - - Examples: - >>> import numpy as np - >>> from mindspore import Tensor - >>> from mindarmour import BlackModel - >>> from mindarmour.adv_robustness.attacks import SaltAndPepperNoiseAttack - >>> from tests.ut.python.utils.mock_net import Net - >>> class ModelToBeAttacked(BlackModel): - ... def __init__(self, network): - ... super(ModelToBeAttacked, self).__init__() - ... self._network = network - ... def predict(self, inputs): - ... result = self._network(Tensor(inputs.astype(np.float32))) - ... return result.asnumpy() - >>> net = Net() - >>> model = ModelToBeAttacked(net) - >>> attack = PointWiseAttack(model) - >>> x_test = np.asarray(np.random.random((1,1,32,32)), np.float32) - >>> y_test = np.random.randint(0, 3, size=1) - >>> _, adv_list, _ = attack.generate(x_test, y_test) """ arr_x, arr_y = check_pair_numpy_param('inputs', inputs, 'labels', labels) if not self._sparse: diff --git a/mindarmour/adv_robustness/attacks/carlini_wagner.py b/mindarmour/adv_robustness/attacks/carlini_wagner.py index 7b200af..67c3a70 100644 --- a/mindarmour/adv_robustness/attacks/carlini_wagner.py +++ b/mindarmour/adv_robustness/attacks/carlini_wagner.py @@ -103,14 +103,15 @@ class CarliniWagnerL2Attack(Attack): ... def __init__(self): ... super(Net, self).__init__() ... self._softmax = M.Softmax() - ... ... def construct(self, inputs): ... out = self._softmax(inputs) ... return out + >>> net = Net() >>> input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32) - >>> label_np = np.array([3]).astype(np.int64) >>> num_classes = input_np.shape[1] + >>> label_np = np.array([3]).astype(np.int64) >>> attack = CarliniWagnerL2Attack(net, num_classes, targeted=False) + >>> adv_data = attack.generate(input_np, label_np) """ def __init__(self, network, num_classes, box_min=0.0, box_max=1.0, @@ -280,28 +281,6 @@ class CarliniWagnerL2Attack(Attack): Returns: numpy.ndarray, generated adversarial examples. - - Examples: - >>> import numpy as np - >>> import mindspore.ops.operations as M - >>> from mindspore.nn import Cell - >>> from mindarmour.adv_robustness.attacks import CarliniWagnerL2Attack - >>> class Net(Cell): - ... def __init__(self): - ... super(Net, self).__init__() - ... self._softmax = M.Softmax() - ... - ... def construct(self, inputs): - ... out = self._softmax(inputs) - ... return out - >>> input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32) - >>> num_classes = input_np.shape[1] - >>> label_np = np.array([3]).astype(np.int64) - >>> attack_nonTargeted = CarliniWagnerL2Attack(net, num_classes, targeted=False) - >>> advs_nonTargeted = attack_nonTargeted.generate(input_np, label_np) - >>> target_np = np.array([1]).astype(np.int64) - >>> attack_targeted = CarliniWagnerL2Attack(net, num_classes, targeted=False) - >>> advs_targeted = attack_targeted.generate(input_np, target_np) """ LOGGER.debug(TAG, "enter the func generate.") diff --git a/mindarmour/adv_robustness/attacks/deep_fool.py b/mindarmour/adv_robustness/attacks/deep_fool.py index d8dd5b9..819a20d 100644 --- a/mindarmour/adv_robustness/attacks/deep_fool.py +++ b/mindarmour/adv_robustness/attacks/deep_fool.py @@ -130,8 +130,14 @@ class DeepFool(Attack): ... out = self._softmax(inputs) ... return out >>> net = Net() + >>> input_shape = (1, 5) + >>> _, classes = input_shape >>> attack = DeepFool(net, classes, max_iters=10, norm_level=2, ... bounds=(0.0, 1.0)) + >>> input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32) + >>> input_me = Tensor(input_np) + >>> true_labels = np.argmax(net(input_me).asnumpy(), axis=1) + >>> advs = attack.generate(input_np, true_labels) """ def __init__(self, network, num_classes, model_type='classification', @@ -177,30 +183,6 @@ class DeepFool(Attack): Raises: NotImplementedError: If norm_level is not in [2, np.inf, '2', 'inf']. - - Examples: - >>> import numpy as np - >>> import mindspore.ops.operations as P - >>> from mindspore.nn import Cell - >>> from mindspore import Tensor - >>> from mindarmour.adv_robustness.attacks import DeepFool - >>> class Net(Cell): - ... def __init__(self): - ... super(Net, self).__init__() - ... self._softmax = P.Softmax() - ... def construct(self, inputs): - ... out = self._softmax(inputs) - ... return out - >>> net = Net() - >>> attack = DeepFool(net, classes, max_iters=10, norm_level=2, - ... bounds=(0.0, 1.0)) - >>> input_shape = (1, 5) - >>> _, classes = input_shape - >>> input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32) - >>> input_me = Tensor(input_np) - >>> true_labels = np.argmax(net(input_me).asnumpy(), axis=1) - >>> attack = DeepFool(net, classes, max_iters=10, norm_level=2, bounds=(0.0, 1.0)) - >>> advs = attack.generate(input_np, true_labels) """ if self._model_type == 'detection': diff --git a/mindarmour/adv_robustness/attacks/gradient_method.py b/mindarmour/adv_robustness/attacks/gradient_method.py index d1d2900..5f033e1 100644 --- a/mindarmour/adv_robustness/attacks/gradient_method.py +++ b/mindarmour/adv_robustness/attacks/gradient_method.py @@ -45,26 +45,6 @@ class GradientMethod(Attack): In form of (clip_min, clip_max). Default: None. loss_fn (Loss): Loss function for optimization. If None, the input network \ is already equipped with loss function. Default: None. - - Examples: - >>> import numpy as np - >>> import mindspore.nn as nn - >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits - >>> from mindspore import Tensor - >>> from mindarmour.adv_robustness.attacks import FastGradientMethod - >>> class Net(Cell): - ... def __init__(self): - ... super(Net, self).__init__() - ... self._relu = nn.ReLU() - ... - ... def construct(self, inputs): - ... out = self._relu(inputs) - ... return out - >>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) - >>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) - >>> net = Net() - >>> attack = FastGradientMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) - >>> adv_x = attack.generate(inputs, labels) """ def __init__(self, network, eps=0.07, alpha=None, bounds=None, @@ -180,8 +160,9 @@ class FastGradientMethod(GradientMethod): ... def construct(self, inputs): ... out = self._relu(inputs) ... return out - >>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) - >>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) + >>> inputs = np.asarray([[0.1, 0.2, 0.7]], np.float32) + >>> labels = np.asarray([2],np.int32) + >>> labels = np.eye(3)[labels].astype(np.float32) >>> net = Net() >>> attack = FastGradientMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) >>> adv_x = attack.generate(inputs, labels) @@ -261,8 +242,9 @@ class RandomFastGradientMethod(FastGradientMethod): ... out = self._relu(inputs) ... return out >>> net = Net() - >>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) - >>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) + >>> inputs = np.asarray([[0.1, 0.2, 0.7]], np.float32) + >>> labels = np.asarray([2],np.int32) + >>> labels = np.eye(3)[labels].astype(np.float32) >>> attack = RandomFastGradientMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) >>> adv_x = attack.generate(inputs, labels) """ @@ -315,8 +297,9 @@ class FastGradientSignMethod(GradientMethod): ... out = self._relu(inputs) ... return out >>> net = Net() - >>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) - >>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) + >>> inputs = np.asarray([[0.1, 0.2, 0.7]], np.float32) + >>> labels = np.asarray([2],np.int32) + >>> labels = np.eye(3)[labels].astype(np.float32) >>> attack = FastGradientSignMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) >>> adv_x = attack.generate(inputs, labels) """ @@ -391,8 +374,9 @@ class RandomFastGradientSignMethod(FastGradientSignMethod): ... out = self._relu(inputs) ... return out >>> net = Net() - >>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) - >>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) + >>> inputs = np.asarray([[0.1, 0.2, 0.7]], np.float32) + >>> labels = np.asarray([2],np.int32) + >>> labels = np.eye(3)[labels].astype(np.float32) >>> attack = RandomFastGradientSignMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) >>> adv_x = attack.generate(inputs, labels) """ @@ -439,9 +423,10 @@ class LeastLikelyClassMethod(FastGradientSignMethod): ... def construct(self, inputs): ... out = self._relu(inputs) ... return out + >>> inputs = np.asarray([[0.1, 0.2, 0.7]], np.float32) + >>> labels = np.asarray([2],np.int32) + >>> labels = np.eye(3)[labels].astype(np.float32) >>> net = Net() - >>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) - >>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) >>> attack = LeastLikelyClassMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) >>> adv_x = attack.generate(inputs, labels) """ @@ -489,10 +474,11 @@ class RandomLeastLikelyClassMethod(FastGradientSignMethod): ... def construct(self, inputs): ... out = self._relu(inputs) ... return out + >>> inputs = np.asarray([[0.1, 0.2, 0.7]], np.float32) + >>> labels = np.asarray([2],np.int32) + >>> labels = np.eye(3)[labels].astype(np.float32) >>> net = Net() - >>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) - >>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) - >>> attack = RandomLeastLikelyClassMethod(network, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) + >>> attack = RandomLeastLikelyClassMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) >>> adv_x = attack.generate(inputs, labels) """ diff --git a/mindarmour/adv_robustness/attacks/iterative_gradient_method.py b/mindarmour/adv_robustness/attacks/iterative_gradient_method.py index 5c1a01d..d95e5a8 100644 --- a/mindarmour/adv_robustness/attacks/iterative_gradient_method.py +++ b/mindarmour/adv_robustness/attacks/iterative_gradient_method.py @@ -147,12 +147,6 @@ class IterativeGradientMethod(Attack): Raises: NotImplementedError: This function is not available in IterativeGradientMethod. - - Examples: - >>> adv_x = attack.generate([[0.1, 0.9, 0.6], - >>> [0.3, 0, 0.3]], - >>> [[0, , 1, 0, 0, 0, 0, 0, 0, 0], - >>> [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]]) """ msg = 'The function generate() is an abstract method in class ' \ '`IterativeGradientMethod`, and should be implemented ' \ @@ -186,17 +180,23 @@ class BasicIterativeMethod(IterativeGradientMethod): Examples: >>> import numpy as np >>> import mindspore.nn as nn + >>> from mindspore.ops import operations as P >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits >>> from mindarmour.adv_robustness.attacks import BasicIterativeMethod >>> class Net(Cell): ... def __init__(self): ... super(Net, self).__init__() - ... self._relu = nn.ReLU() + ... self._softmax = P.Softmax() ... def construct(self, inputs): - ... out = self._relu(inputs) + ... out = self._softmax(inputs) ... return out >>> net = Net() >>> attack = BasicIterativeMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) + >>> inputs = np.asarray([[0.1, 0.2, 0.7]], np.float32) + >>> labels = np.asarray([2],np.int32) + >>> labels = np.eye(3)[labels].astype(np.float32) + >>> net = Net() + >>> adv_x = attack.generate(inputs, labels) """ def __init__(self, network, eps=0.3, eps_iter=0.1, bounds=(0.0, 1.0), is_targeted=False, nb_iter=5, loss_fn=None): @@ -225,25 +225,6 @@ class BasicIterativeMethod(IterativeGradientMethod): For each input if it has more than one label, it is wrapped in a tuple. Returns: numpy.ndarray, generated adversarial examples. - - Examples: - >>> import numpy as np - >>> import mindspore.nn as nn - >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits - >>> from mindarmour.adv_robustness.attacks import BasicIterativeMethod - >>> class Net(Cell): - ... def __init__(self): - ... super(Net, self).__init__() - ... self._relu = nn.ReLU() - ... def construct(self, inputs): - ... out = self._relu(inputs) - ... return out - >>> net = Net() - >>> attack = BasicIterativeMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) - >>> adv_x = attack.generate([[0.3, 0.2, 0.6], - ... [0.3, 0.2, 0.4]], - ... [[0, 0, 1, 0, 0, 0, 0, 0, 0, 0], - ... [0, 0, 0, 0, 0, 0, 1, 0, 0, 0]]) """ inputs_image, inputs, labels = check_inputs_labels(inputs, labels) arr_x = inputs_image @@ -299,6 +280,27 @@ class MomentumIterativeMethod(IterativeGradientMethod): np.inf, 1 or 2. Default: 'inf'. loss_fn (Loss): Loss function for optimization. If None, the input network \ is already equipped with loss function. Default: None. + + Examples: + >>> import numpy as np + >>> import mindspore.nn as nn + >>> from mindspore.ops import operations as P + >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits + >>> from mindarmour.adv_robustness.attacks import MomentumIterativeMethod + >>> class Net(Cell): + ... def __init__(self): + ... super(Net, self).__init__() + ... self._softmax = P.Softmax() + ... def construct(self, inputs): + ... out = self._softmax(inputs) + ... return out + >>> net = Net() + >>> attack = MomentumIterativeMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) + >>> inputs = np.asarray([[0.1, 0.2, 0.7]], np.float32) + >>> labels = np.asarray([2],np.int32) + >>> labels = np.eye(3)[labels].astype(np.float32) + >>> net = Net() + >>> adv_x = attack.generate(inputs, labels) """ def __init__(self, network, eps=0.3, eps_iter=0.1, bounds=(0.0, 1.0), @@ -326,25 +328,6 @@ class MomentumIterativeMethod(IterativeGradientMethod): Returns: numpy.ndarray, generated adversarial examples. - - Examples: - >>> import numpy as np - >>> import mindspore.nn as nn - >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits - >>> from mindarmour.adv_robustness.attacks import MomentumIterativeMethod - >>> class Net(Cell): - ... def __init__(self): - ... super(Net, self).__init__() - ... self._relu = nn.ReLU() - ... def construct(self, inputs): - ... out = self._relu(inputs) - ... return out - >>> net = Net() - >>> attack = MomentumIterativeMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) - >>> adv_x = attack.generate([[0.5, 0.2, 0.6], - ... [0.3, 0, 0.2]], - ... [[0, 0, 0, 0, 0, 0, 0, 0, 1, 0], - ... [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]]) """ inputs_image, inputs, labels = check_inputs_labels(inputs, labels) arr_x = inputs_image @@ -443,6 +426,27 @@ class ProjectedGradientDescent(BasicIterativeMethod): np.inf, 1 or 2. Default: 'inf'. loss_fn (Loss): Loss function for optimization. If None, the input network \ is already equipped with loss function. Default: None. + + Examples: + >>> import numpy as np + >>> import mindspore.nn as nn + >>> from mindspore.ops import operations as P + >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits + >>> from mindarmour.adv_robustness.attacks import ProjectedGradientDescent + >>> class Net(Cell): + ... def __init__(self): + ... super(Net, self).__init__() + ... self._softmax = P.Softmax() + ... def construct(self, inputs): + ... out = self._softmax(inputs) + ... return out + >>> net = Net() + >>> attack = ProjectedGradientDescent(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) + >>> inputs = np.asarray([[0.1, 0.2, 0.7]], np.float32) + >>> labels = np.asarray([2],np.int32) + >>> labels = np.eye(3)[labels].astype(np.float32) + >>> net = Net() + >>> adv_x = attack.generate(inputs, labels) """ def __init__(self, network, eps=0.3, eps_iter=0.1, bounds=(0.0, 1.0), @@ -469,25 +473,6 @@ class ProjectedGradientDescent(BasicIterativeMethod): Returns: numpy.ndarray, generated adversarial examples. - - Examples: - >>> import numpy as np - >>> import mindspore.nn as nn - >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits - >>> from mindarmour.adv_robustness.attacks import ProjectedGradientDescent - >>> class Net(Cell): - ... def __init__(self): - ... super(Net, self).__init__() - ... self._relu = nn.ReLU() - ... def construct(self, inputs): - ... out = self._relu(inputs) - ... return out - >>> net = Net() - >>> attack = ProjectedGradientDescent(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) - >>> adv_x = attack.generate([[0.6, 0.2, 0.6], - ... [0.3, 0.3, 0.4]], - ... [[0, 0, 0, 0, 0, 0, 0, 0, 0, 1], - ... [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]]) """ inputs_image, inputs, labels = check_inputs_labels(inputs, labels) arr_x = inputs_image @@ -539,6 +524,27 @@ class DiverseInputIterativeMethod(BasicIterativeMethod): prob (float): Transformation probability. Default: 0.5. loss_fn (Loss): Loss function for optimization. If None, the input network \ is already equipped with loss function. Default: None. + + Examples: + >>> import numpy as np + >>> import mindspore.nn as nn + >>> from mindspore.ops import operations as P + >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits + >>> from mindarmour.adv_robustness.attacks import DiverseInputIterativeMethod + >>> class Net(Cell): + ... def __init__(self): + ... super(Net, self).__init__() + ... self._softmax = P.Softmax() + ... def construct(self, inputs): + ... out = self._softmax(inputs) + ... return out + >>> net = Net() + >>> attack = DiverseInputIterativeMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) + >>> inputs = np.asarray([[0.1, 0.2, 0.7]], np.float32) + >>> labels = np.asarray([2],np.int32) + >>> labels = np.eye(3)[labels].astype(np.float32) + >>> net = Net() + >>> adv_x = attack.generate(inputs, labels) """ def __init__(self, network, eps=0.3, bounds=(0.0, 1.0), is_targeted=False, prob=0.5, loss_fn=None): @@ -575,6 +581,27 @@ class MomentumDiverseInputIterativeMethod(MomentumIterativeMethod): prob (float): Transformation probability. Default: 0.5. loss_fn (Loss): Loss function for optimization. If None, the input network \ is already equipped with loss function. Default: None. + + Examples: + >>> import numpy as np + >>> import mindspore.nn as nn + >>> from mindspore.ops import operations as P + >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits + >>> from mindarmour.adv_robustness.attacks import MomentumDiverseInputIterativeMethod + >>> class Net(Cell): + ... def __init__(self): + ... super(Net, self).__init__() + ... self._softmax = P.Softmax() + ... def construct(self, inputs): + ... out = self._softmax(inputs) + ... return out + >>> net = Net() + >>> attack = MomentumDiverseInputIterativeMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) + >>> inputs = np.asarray([[0.1, 0.2, 0.7]], np.float32) + >>> labels = np.asarray([2],np.int32) + >>> labels = np.eye(3)[labels].astype(np.float32) + >>> net = Net() + >>> adv_x = attack.generate(inputs, labels) """ def __init__(self, network, eps=0.3, bounds=(0.0, 1.0), is_targeted=False, norm_level='l1', prob=0.5, loss_fn=None): diff --git a/mindarmour/adv_robustness/attacks/jsma.py b/mindarmour/adv_robustness/attacks/jsma.py index 3b9da65..a91750b 100644 --- a/mindarmour/adv_robustness/attacks/jsma.py +++ b/mindarmour/adv_robustness/attacks/jsma.py @@ -68,7 +68,10 @@ class JSMAAttack(Attack): >>> net = Net() >>> input_shape = (1, 5) >>> batch_size, classes = input_shape + >>> input_np = np.random.random(input_shape).astype(np.float32) + >>> label_np = np.random.randint(classes, size=batch_size) >>> attack = JSMAAttack(net, classes, max_iteration=5) + >>> advs = attack.generate(input_np, label_np) """ def __init__(self, network, num_classes, box_min=0.0, box_max=1.0, @@ -193,26 +196,6 @@ class JSMAAttack(Attack): Returns: numpy.ndarray, adversarial samples. - - Examples: - >>> import numpy as np - >>> import mindspore.nn as nn - >>> from mindspore.nn import Cell - >>> from mindarmour.adv_robustness.attacks import JSMAAttack - >>> class Net(Cell): - ... def __init__(self): - ... super(Net, self).__init__() - ... self._relu = nn.ReLU() - ... def construct(self, inputs): - ... out = self._relu(inputs) - ... return out - >>> net = Net() - >>> input_shape = (1, 5) - >>> batch_size, classes = input_shape - >>> input_np = np.random.random(input_shape).astype(np.float32) - >>> label_np = np.random.randint(classes, size=batch_size) - >>> attack = JSMAAttack(net, classes, max_iteration=5) - >>> advs = attack.generate(input_np, label_np) """ inputs, labels = check_pair_numpy_param('inputs', inputs, 'labels', labels) diff --git a/mindarmour/adv_robustness/attacks/lbfgs.py b/mindarmour/adv_robustness/attacks/lbfgs.py index af5e2ee..75783da 100644 --- a/mindarmour/adv_robustness/attacks/lbfgs.py +++ b/mindarmour/adv_robustness/attacks/lbfgs.py @@ -58,7 +58,13 @@ class LBFGS(Attack): >>> from mindarmour.adv_robustness.attacks import LBFGS >>> from tests.ut.python.utils.mock_net import Net >>> net = Net() + >>> classes = 10 >>> attack = LBFGS(net, is_targeted=True) + >>> input_np = np.asarray(np.random.random((1,1,32,32)), np.float32) + >>> label_np = np.array([3]).astype(np.int64) + >>> target_np = np.array([7]).astype(np.int64) + >>> target_np = np.eye(10)[target_np].astype(np.float32) + >>> adv = attack.generate(input_np, target_np) """ def __init__(self, network, eps=1e-5, bounds=(0.0, 1.0), is_targeted=True, nb_iter=150, search_iters=30, loss_fn=None, sparse=False): @@ -96,14 +102,6 @@ class LBFGS(Attack): Returns: numpy.ndarray, generated adversarial examples. - - Examples: - >>> import numpy as np - >>> from mindarmour.adv_robustness.attacks import LBFGS - >>> from tests.ut.python.utils.mock_net import Net - >>> net = Net() - >>> attack = LBFGS(net, is_targeted=True) - >>> adv = attack.generate([[0.1, 0.2, 0.6], [0.3, 0, 0.4]], [2, 2]) """ LOGGER.debug(TAG, 'start to generate adv image.') arr_x, arr_y = check_pair_numpy_param('inputs', inputs, 'labels', labels) diff --git a/mindarmour/adv_robustness/defenses/adversarial_defense.py b/mindarmour/adv_robustness/defenses/adversarial_defense.py index 3ec5aa3..a5dc204 100644 --- a/mindarmour/adv_robustness/defenses/adversarial_defense.py +++ b/mindarmour/adv_robustness/defenses/adversarial_defense.py @@ -45,8 +45,8 @@ class AdversarialDefense(Defense): >>> lr = 0.001 >>> momentum = 0.9 >>> batch_size = 32 - >>> num_class = 10 - >>> loss_fn = SoftmaxCrossEntropyWithLogits(sparse=False) + >>> num_classes = 10 + >>> loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False) >>> optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=momentum) >>> adv_defense = AdversarialDefense(net, loss_fn, optimizer) >>> inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32) @@ -81,6 +81,9 @@ class AdversarialDefense(Defense): Returns: numpy.ndarray, loss of defense operation. + + Examples: + >>> adv_defense.defense(inputs, labels) """ inputs, labels = check_pair_numpy_param('inputs', inputs, 'labels', labels) @@ -110,22 +113,22 @@ class AdversarialDefenseWithAttacks(AdversarialDefense): >>> from mindspore.nn.optim.momentum import Momentum >>> from mindarmour.adv_robustness.attacks import FastGradientSignMethod >>> from mindarmour.adv_robustness.attacks import ProjectedGradientDescent - >>> from mindarmour.adv_robustness.defenses import AdversarialDefense + >>> from mindarmour.adv_robustness.defenses import AdversarialDefenseWithAttacks >>> from mindspore import nn >>> from tests.ut.python.utils.mock_net import Net >>> net = Net() >>> lr = 0.001 >>> momentum = 0.9 >>> batch_size = 32 - >>> num_class = 10 + >>> num_classes = 10 >>> loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False) >>> optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=momentum) >>> fgsm = FastGradientSignMethod(net, loss_fn=loss_fn) >>> pgd = ProjectedGradientDescent(net, loss_fn=loss_fn) - >>> ead = AdversarialDefenseWithAttack(net, [fgsm, pgd], loss_fn=loss_fn, - ... optimizer=optimizer) + >>> ead = AdversarialDefenseWithAttacks(net, [fgsm, pgd], loss_fn=loss_fn, + ... optimizer=optimizer) >>> inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32) - >>> labels = np.random.randint(num_class, size=batch_size).astype(np.int32) + >>> labels = np.random.randint(num_classes, size=batch_size).astype(np.int32) >>> labels = np.eye(num_classes)[labels].astype(np.float32) >>> loss = ead.defense(inputs, labels) """ @@ -154,6 +157,9 @@ class AdversarialDefenseWithAttacks(AdversarialDefense): Returns: numpy.ndarray, loss of adversarial defense operation. + + Examples: + >>> adv_defense.defense(inputs, labels) """ inputs, labels = check_pair_numpy_param('inputs', inputs, 'labels', labels) @@ -205,7 +211,7 @@ class EnsembleAdversarialDefense(AdversarialDefenseWithAttacks): >>> lr = 0.001 >>> momentum = 0.9 >>> batch_size = 32 - >>> num_class = 10 + >>> num_classes = 10 >>> loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False) >>> optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=momentum) >>> fgsm = FastGradientSignMethod(net, loss_fn=loss_fn) @@ -213,7 +219,7 @@ class EnsembleAdversarialDefense(AdversarialDefenseWithAttacks): >>> ead = EnsembleAdversarialDefense(net, [fgsm, pgd], loss_fn=loss_fn, ... optimizer=optimizer) >>> inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32) - >>> labels = np.random.randint(num_class, size=batch_size).astype(np.int32) + >>> labels = np.random.randint(num_classes, size=batch_size).astype(np.int32) >>> labels = np.eye(num_classes)[labels].astype(np.float32) >>> loss = ead.defense(inputs, labels) """ diff --git a/mindarmour/adv_robustness/defenses/natural_adversarial_defense.py b/mindarmour/adv_robustness/defenses/natural_adversarial_defense.py index fca27d7..2f666c0 100644 --- a/mindarmour/adv_robustness/defenses/natural_adversarial_defense.py +++ b/mindarmour/adv_robustness/defenses/natural_adversarial_defense.py @@ -45,12 +45,12 @@ class NaturalAdversarialDefense(AdversarialDefenseWithAttacks): >>> lr = 0.001 >>> momentum = 0.9 >>> batch_size = 32 - >>> num_class = 10 + >>> num_classes = 10 >>> loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False) >>> optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=momentum) >>> nad = NaturalAdversarialDefense(net, loss_fn=loss_fn, optimizer=optimizer) >>> inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32) - >>> labels = np.random.randint(num_class, size=batch_size).astype(np.int32) + >>> labels = np.random.randint(num_classes, size=batch_size).astype(np.int32) >>> labels = np.eye(num_classes)[labels].astype(np.float32) >>> loss = nad.defense(inputs, labels) """ diff --git a/mindarmour/adv_robustness/defenses/projected_adversarial_defense.py b/mindarmour/adv_robustness/defenses/projected_adversarial_defense.py index 97ff3a0..878b91e 100644 --- a/mindarmour/adv_robustness/defenses/projected_adversarial_defense.py +++ b/mindarmour/adv_robustness/defenses/projected_adversarial_defense.py @@ -50,12 +50,12 @@ class ProjectedAdversarialDefense(AdversarialDefenseWithAttacks): >>> lr = 0.001 >>> momentum = 0.9 >>> batch_size = 32 - >>> num_class = 10 + >>> num_classes = 10 >>> loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False) >>> optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=momentum) >>> pad = ProjectedAdversarialDefense(net, loss_fn=loss_fn, optimizer=optimizer) >>> inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32) - >>> labels = np.random.randint(num_class, size=batch_size).astype(np.int32) + >>> labels = np.random.randint(num_classes, size=batch_size).astype(np.int32) >>> labels = np.eye(num_classes)[labels].astype(np.float32) >>> loss = pad.defense(inputs, labels) """ diff --git a/mindarmour/adv_robustness/detectors/black/similarity_detector.py b/mindarmour/adv_robustness/detectors/black/similarity_detector.py index 0ca975e..93c4d8d 100644 --- a/mindarmour/adv_robustness/detectors/black/similarity_detector.py +++ b/mindarmour/adv_robustness/detectors/black/similarity_detector.py @@ -139,6 +139,13 @@ class SimilarityDetector(Detector): Raises: ValueError: The number of training data is less than max_k_neighbor! + + Examples: + >>> x_train = np.random.rand(10, 32, 32, 3).astype(np.float32) + >>> perm = np.random.permutation(x_train.shape[0]) + >>> benign_queries = x_train[perm[:10], :, :, :] + >>> num_nearest_neighbors, thresholds = detector.fit(inputs=x_train) + >>> detector.set_threshold(num_nearest_neighbors[-1], thresholds[-1]) """ data = check_numpy_param('inputs', inputs) data_len = data.shape[0] @@ -189,6 +196,14 @@ class SimilarityDetector(Detector): Raises: ValueError: The parameters of threshold or num_of_neighbors is not available. + + Examples: + >>> x_train = np.random.rand(10, 32, 32, 3).astype(np.float32) + >>> perm = np.random.permutation(x_train.shape[0]) + >>> benign_queries = x_train[perm[:10], :, :, :] + >>> num_nearest_neighbors, thresholds = detector.fit(inputs=x_train) + >>> detector.set_threshold(num_nearest_neighbors[-1], thresholds[-1]) + >>> detector.detect(benign_queries) """ if self._threshold is None or self._num_of_neighbors is None: msg = 'Explicit detection threshold and number of nearest ' \ @@ -237,6 +252,8 @@ class SimilarityDetector(Detector): """ Clear the buffer memory. + Examples: + >>> detector.detect(benign_queries) """ while self._buffer: self._buffer.pop() @@ -248,6 +265,10 @@ class SimilarityDetector(Detector): Args: num_of_neighbors (int): Number of the nearest neighbors. threshold (float): Detection threshold. + + Examples: + >>> num_nearest_neighbors, thresholds = detector.fit(inputs=x_train) + >>> detector.set_threshold(num_nearest_neighbors[-1], thresholds[-1]) """ self._num_of_neighbors = check_int_positive('num_of_neighbors', num_of_neighbors) @@ -259,6 +280,9 @@ class SimilarityDetector(Detector): Returns: list[int], number of queries between adjacent detections. + + Examples: + >>> detector.get_detection_interval() """ detected_queries = self._detected_queries interval = [] @@ -272,6 +296,9 @@ class SimilarityDetector(Detector): Returns: list[int], sequence number of detected malicious queries. + + Examples: + >>> detector.get_detected_queries() """ detected_queries = self._detected_queries return detected_queries @@ -288,6 +315,9 @@ class SimilarityDetector(Detector): Raises: NotImplementedError: This function is not available in class `SimilarityDetector`. + + Examples: + >>> detector.detect_diff() """ msg = 'The function detect_diff() is not available in the class ' \ '`SimilarityDetector`.' @@ -303,6 +333,9 @@ class SimilarityDetector(Detector): Raises: NotImplementedError: This function is not available in class `SimilarityDetector`. + + Examples: + >>> detector.transform(x_train) """ msg = 'The function transform() is not available in the class `SimilarityDetector`.' LOGGER.error(TAG, msg) diff --git a/mindarmour/adv_robustness/detectors/mag_net.py b/mindarmour/adv_robustness/detectors/mag_net.py index fa1319f..436879e 100644 --- a/mindarmour/adv_robustness/detectors/mag_net.py +++ b/mindarmour/adv_robustness/detectors/mag_net.py @@ -184,21 +184,31 @@ class DivergenceBasedDetector(ErrorBasedDetector): Examples: >>> import numpy as np - >>> from mindspore.ops.operations import Add + >>> import mindspore.ops.operations as P >>> from mindspore.nn import Cell >>> from mindspore import Model >>> from mindspore import context - >>> from mindarmour.adv_robustness.detectors import ErrorBasedDetector + >>> from mindarmour.adv_robustness.detectors import DivergenceBasedDetector >>> class PredNet(Cell): - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self.add = Add() - >>> def construct(self, inputs): - >>> return self.add(inputs, inputs) + ... def __init__(self): + ... super(PredNet, self).__init__() + ... self.shape = P.Shape() + ... self.reshape = P.Reshape() + ... self._softmax = P.Softmax() + ... def construct(self, inputs): + ... data = self.reshape(inputs, (self.shape(inputs)[0], -1)) + ... return self._softmax(data) + >>> class Net(Cell): + ... def __init__(self): + ... super(Net, self).__init__() + ... self.add = P.Add() + ... def construct(self, inputs): + ... return self.add(inputs, inputs) >>> np.random.seed(5) >>> ori = np.random.rand(4, 4, 4).astype(np.float32) >>> np.random.seed(6) >>> adv = np.random.rand(4, 4, 4).astype(np.float32) + >>> encoder = Model(Net()) >>> model = Model(PredNet()) >>> detector = DivergenceBasedDetector(encoder, model) >>> threshold = detector.fit(ori) diff --git a/mindarmour/adv_robustness/detectors/region_based_detector.py b/mindarmour/adv_robustness/detectors/region_based_detector.py index aab643f..20cb21f 100644 --- a/mindarmour/adv_robustness/detectors/region_based_detector.py +++ b/mindarmour/adv_robustness/detectors/region_based_detector.py @@ -57,7 +57,7 @@ class RegionBasedDetector(Detector): >>> from mindspore.nn import Cell >>> from mindspore import Model >>> from mindspore import context - >>> from mindarmour.adv_robustness.detectors import ErrorBasedDetector + >>> from mindarmour.adv_robustness.detectors import RegionBasedDetector >>> class Net(Cell): ... def __init__(self): ... super(Net, self).__init__() diff --git a/mindarmour/adv_robustness/evaluations/attack_evaluation.py b/mindarmour/adv_robustness/evaluations/attack_evaluation.py index 1138ab8..ebf449f 100644 --- a/mindarmour/adv_robustness/evaluations/attack_evaluation.py +++ b/mindarmour/adv_robustness/evaluations/attack_evaluation.py @@ -63,6 +63,8 @@ class AttackEvaluate: >>> l_0, l_2, l_inf = attack_eval.avg_lp_distance() >>> ass = attack_eval.avg_ssim() >>> nte = attack_eval.nte() + >>> actc = attack_eval.avg_conf_true_class() + """ def __init__(self, inputs, labels, adv_inputs, adv_preds, @@ -103,10 +105,6 @@ class AttackEvaluate: Returns: float, ranges between (0, 1). The higher, the more successful the attack is. - - Examples: - >>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y) - >>> mr = attack_eval.mis_classification_rate() """ return self._success_idxes.shape[0]*1.0 / self._inputs.shape[0] @@ -116,10 +114,6 @@ class AttackEvaluate: Returns: float, ranges between (0, 1). The higher, the more successful the attack is. - - Examples: - >>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y) - >>> acac = attack_eval.avg_conf_adv_class() """ idxes = self._success_idxes success_num = idxes.shape[0] @@ -135,10 +129,6 @@ class AttackEvaluate: Returns: float, ranges between (0, 1). The lower, the more successful the attack is. - - Examples: - >>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y) - >>> acac = attack_eval.avg_conf_adv_class() """ idxes = self._success_idxes success_num = idxes.shape[0] @@ -158,10 +148,6 @@ class AttackEvaluate: the more successful the attack is. - If return value is -1, there is no success adversarial examples. - - Examples: - >>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y) - >>> l_0, l_2, l_inf = attack_eval.avg_lp_distance() """ idxes = self._success_idxes success_num = idxes.shape[0] @@ -190,10 +176,6 @@ class AttackEvaluate: successful the attack is. - If return value is -1: there is no success adversarial examples. - - Examples: - >>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y) - >>> ass = attack_eval.avg_ssim() """ success_num = self._success_idxes.shape[0] if success_num == 0: @@ -215,10 +197,6 @@ class AttackEvaluate: Returns: float, ranges between (0, 1). The higher, the more successful the attack is. - - Examples: - >>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y) - >>> nte = attack_eval.nte() """ idxes = self._success_idxes success_num = idxes.shape[0] diff --git a/mindarmour/adv_robustness/evaluations/defense_evaluation.py b/mindarmour/adv_robustness/evaluations/defense_evaluation.py index 6b46599..e0b5839 100644 --- a/mindarmour/adv_robustness/evaluations/defense_evaluation.py +++ b/mindarmour/adv_robustness/evaluations/defense_evaluation.py @@ -52,7 +52,11 @@ class DefenseEvaluate: >>> def_eval = DefenseEvaluate(raw_preds, ... def_preds, ... true_labels) - >>> def_eval.cav() + >>> cav = def_eval.cav() + >>> crr = def_eval.crr() + >>> csr = def_eval.csr() + >>> ccv = def_eval.ccv() + >>> cos = def_eval.cos() """ def __init__(self, raw_preds, def_preds, true_labels): self._raw_preds, self._def_preds = check_pair_numpy_param('raw_preds', diff --git a/mindarmour/adv_robustness/evaluations/visual_metrics.py b/mindarmour/adv_robustness/evaluations/visual_metrics.py index fb83699..639b57b 100644 --- a/mindarmour/adv_robustness/evaluations/visual_metrics.py +++ b/mindarmour/adv_robustness/evaluations/visual_metrics.py @@ -58,7 +58,7 @@ class RadarMetric: ... metrics_labels, ... title='', ... scale='sparse') - >>> rm.show() + >>> #rm.show() """ def __init__(self, metrics_name, metrics_data, labels, title, scale='hide'):