Browse Source

Fix code dependency issue and fix typos

tags/v1.6.0^2
shu-kun-zhang 3 years ago
parent
commit
ab56b4d4a6
21 changed files with 238 additions and 383 deletions
  1. +6
    -24
      mindarmour/adv_robustness/attacks/black/genetic_attack.py
  2. +5
    -24
      mindarmour/adv_robustness/attacks/black/hop_skip_jump_attack.py
  3. +7
    -28
      mindarmour/adv_robustness/attacks/black/natural_evolutionary_strategy.py
  4. +4
    -20
      mindarmour/adv_robustness/attacks/black/pointwise_attack.py
  5. +6
    -46
      mindarmour/adv_robustness/attacks/black/pso_attack.py
  6. +3
    -20
      mindarmour/adv_robustness/attacks/black/salt_and_pepper_attack.py
  7. +3
    -24
      mindarmour/adv_robustness/attacks/carlini_wagner.py
  8. +6
    -24
      mindarmour/adv_robustness/attacks/deep_fool.py
  9. +19
    -33
      mindarmour/adv_robustness/attacks/gradient_method.py
  10. +92
    -65
      mindarmour/adv_robustness/attacks/iterative_gradient_method.py
  11. +3
    -20
      mindarmour/adv_robustness/attacks/jsma.py
  12. +6
    -8
      mindarmour/adv_robustness/attacks/lbfgs.py
  13. +15
    -9
      mindarmour/adv_robustness/defenses/adversarial_defense.py
  14. +2
    -2
      mindarmour/adv_robustness/defenses/natural_adversarial_defense.py
  15. +2
    -2
      mindarmour/adv_robustness/defenses/projected_adversarial_defense.py
  16. +33
    -0
      mindarmour/adv_robustness/detectors/black/similarity_detector.py
  17. +17
    -7
      mindarmour/adv_robustness/detectors/mag_net.py
  18. +1
    -1
      mindarmour/adv_robustness/detectors/region_based_detector.py
  19. +2
    -24
      mindarmour/adv_robustness/evaluations/attack_evaluation.py
  20. +5
    -1
      mindarmour/adv_robustness/evaluations/defense_evaluation.py
  21. +1
    -1
      mindarmour/adv_robustness/evaluations/visual_metrics.py

+ 6
- 24
mindarmour/adv_robustness/attacks/black/genetic_attack.py View File

@@ -92,6 +92,12 @@ class GeneticAttack(Attack):
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = GeneticAttack(model, sparse=False)
>>> batch_size = 6
>>> x_test = np.random.rand(batch_size, 10)
>>> y_test = np.random.randint(low=0, high=10, size=batch_size)
>>> y_test = np.eye(10)[y_test]
>>> y_test = y_test.astype(np.float32)
>>> _, adv_data, _ = attack.generate(x_test, y_test)
"""
def __init__(self, model, model_type='classification', targeted=True, reserve_ratio=0.3, sparse=True,
pop_size=6, mutation_rate=0.005, per_bounds=0.15, max_steps=1000, step_size=0.20, temp=0.3,
@@ -235,14 +241,6 @@ class GeneticAttack(Attack):
- numpy.ndarray, generated adversarial examples.

- numpy.ndarray, query times for each sample.

Examples:
>>> batch_size = 6
>>> x_test = np.random.rand(batch_size, 10)
>>> y_test = np.random.randint(low=0, high=10, size=batch_size)
>>> y_test = np.eye(10)[y_test]
>>> y_test = y_test.astype(np.float32)
>>> _, adv_data, _ = attack._generate_classification(x_test, y_test)
"""
inputs, labels = check_pair_numpy_param('inputs', inputs, 'labels', labels)
if self._sparse:
@@ -346,14 +344,6 @@ class GeneticAttack(Attack):
- numpy.ndarray, generated adversarial examples.

- numpy.ndarray, query times for each sample.

Examples:
>>> batch_size = 6
>>> x_test = np.random.rand(batch_size, 10)
>>> y_test = np.random.randint(low=0, high=10, size=batch_size)
>>> y_test = np.eye(10)[y_test]
>>> y_test = y_test.astype(np.float32)
>>> _, adv_data, _ = attack._generate_detection(x_test, y_test)
"""
images, auxiliary_inputs, gt_boxes, gt_labels = check_detection_inputs(inputs, labels)
adv_list = []
@@ -458,14 +448,6 @@ class GeneticAttack(Attack):
- numpy.ndarray, generated adversarial examples.

- numpy.ndarray, query times for each sample.

Examples:
>>> batch_size = 6
>>> x_test = np.random.rand(batch_size, 10)
>>> y_test = np.random.randint(low=0, high=10, size=batch_size)
>>> y_test = np.eye(10)[y_test]
>>> y_test = y_test.astype(np.float32)
>>> _, adv_data, _ = attack.generate(x_test, y_test)
"""
if self._model_type == 'classification':
success_list, adv_data, query_time_list = self._generate_classification(inputs, labels)


+ 5
- 24
mindarmour/adv_robustness/attacks/black/hop_skip_jump_attack.py View File

@@ -92,6 +92,11 @@ class HopSkipJumpAttack(Attack):
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = HopSkipJumpAttack(model)
>>> n, c, h, w = 1, 1, 32, 32
>>> class_num = 3
>>> x_test = np.asarray(np.random.random((n,c,h,w)), np.float32)
>>> y_test = np.random.randint(0, class_num, size=n)
>>> _, adv_x, _= attack.generate(x_test, y_test)
"""

def __init__(self, model, init_num_evals=100, max_num_evals=1000,
@@ -183,30 +188,6 @@ class HopSkipJumpAttack(Attack):
- numpy.ndarray, generated adversarial examples.

- numpy.ndarray, query times for each sample.

Examples:
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindarmour import BlackModel
>>> from mindarmour.adv_robustness.attacks import HopSkipJumpAttack
>>> from tests.ut.python.utils.mock_net import Net
>>> class ModelToBeAttacked(BlackModel):
... def __init__(self, network):
... super(ModelToBeAttacked, self).__init__()
... self._network = network
... def predict(self, inputs):
... if len(inputs.shape) == 3:
... inputs = inputs[np.newaxis, :]
... result = self._network(Tensor(inputs.astype(np.float32)))
... return result.asnumpy()
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = HopSkipJumpAttack(model)
>>> n, c, h, w = 1, 1, 32, 32
>>> class_num = 3
>>> x_test = np.asarray(np.random.random((n,c,h,w)), np.float32)
>>> y_test = np.random.randint(0, class_num, size=n)
>>> _, adv_x, _= attack.generate(x_test, y_test)
"""
if labels is not None:
inputs, labels = check_pair_numpy_param('inputs', inputs,


+ 7
- 28
mindarmour/adv_robustness/attacks/black/natural_evolutionary_strategy.py View File

@@ -98,6 +98,13 @@ class NES(Attack):
>>> SCENE = 'Query_Limit'
>>> TOP_K = -1
>>> attack= NES(model, SCENE, top_k=TOP_K)
>>> num_class = 5
>>> x_test = np.asarray(np.random.random((1, 1, 32, 32)), np.float32)
>>> target_image = np.asarray(np.random.random((1, 1, 32, 32)), np.float32)
>>> orig_class = 0
>>> target_class = 2
>>> attack.set_target_images(target_image)
>>> tag, adv, queries = attack.generate(np.array(x_test), np.array([target_class]))
"""

def __init__(self, model, scene, max_queries=10000, top_k=-1, num_class=10, batch_size=128, epsilon=0.3,
@@ -153,34 +160,6 @@ class NES(Attack):
ValueError: If the top_k less than 0 in Label-Only or Partial-Info setting.
ValueError: If the target_imgs is None in Label-Only or Partial-Info setting.
ValueError: If scene is not in ['Label_Only', 'Partial_Info', 'Query_Limit']

Examples:
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindarmour import BlackModel
>>> from mindarmour.adv_robustness.attacks import NES
>>> from tests.ut.python.utils.mock_net import Net
>>> class ModelToBeAttacked(BlackModel):
... def __init__(self, network):
... super(ModelToBeAttacked, self).__init__()
... self._network = network
... def predict(self, inputs):
... if len(inputs.shape) == 3:
... inputs = inputs[np.newaxis, :]
... result = self._network(Tensor(inputs.astype(np.float32)))
... return result.asnumpy()
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> SCENE = 'Query_Limit'
>>> TOP_K = -1
>>> attack= NES(model, SCENE, top_k=TOP_K)
>>> num_class = 5
>>> x_test = np.asarray(np.random.random((32, 32)), np.float32)
>>> target_image = np.asarray(np.random.random((32, 32)), np.float32)
>>> orig_class = 0
>>> target_class = 2
>>> attack.set_target_images(target_image)
>>> tag, adv, queries = attack.generate(np.array(x_test), np.array([target_class]))
"""
inputs, labels = check_pair_numpy_param('inputs', inputs, 'labels', labels)
if not self._sparse:


+ 4
- 20
mindarmour/adv_robustness/attacks/black/pointwise_attack.py View File

@@ -60,8 +60,12 @@ class PointWiseAttack(Attack):
... result = self._network(Tensor(inputs.astype(np.float32)))
... return result.asnumpy()
>>> net = Net()
>>> np.random.seed(5)
>>> model = ModelToBeAttacked(net)
>>> attack = PointWiseAttack(model)
>>> x_test = np.asarray(np.random.random((1,1,32,32)), np.float32)
>>> y_test = np.random.randint(0, 3, size=1)
>>> is_adv_list, adv_list, query_times_each_adv = attack.generate(x_test, y_test)
"""

def __init__(self, model, max_iter=1000, search_iter=10, is_targeted=False, init_attack=None, sparse=True):
@@ -91,26 +95,6 @@ class PointWiseAttack(Attack):
- numpy.ndarray, generated adversarial examples.

- numpy.ndarray, query times for each sample.

Examples:
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindarmour import BlackModel
>>> from mindarmour.adv_robustness.attacks import PointWiseAttack
>>> from tests.ut.python.utils.mock_net import Net
>>> class ModelToBeAttacked(BlackModel):
... def __init__(self, network):
... super(ModelToBeAttacked, self).__init__()
... self._network = network
... def predict(self, inputs):
... result = self._network(Tensor(inputs.astype(np.float32)))
... return result.asnumpy()
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = PointWiseAttack(model)
>>> x_test = np.asarray(np.random.random((1,1,32,32)), np.float32)
>>> y_test = np.random.randint(0, 3, size=1)
>>> is_adv_list, adv_list, query_times_each_adv = attack.generate(x_test, y_test)
"""
arr_x, arr_y = check_pair_numpy_param('inputs', inputs, 'labels', labels)
if not self._sparse:


+ 6
- 46
mindarmour/adv_robustness/attacks/black/pso_attack.py View File

@@ -83,13 +83,18 @@ class PSOAttack(Attack):
... def __init__(self):
... super(Net, self).__init__()
... self._relu = nn.ReLU()
...
... def construct(self, inputs):
... out = self._relu(inputs)
... return out
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, sparse=False)
>>> batch_size = 6
>>> x_test = np.random.rand(batch_size, 10)
>>> y_test = np.random.randint(low=0, high=10, size=batch_size)
>>> y_test = np.eye(10)[y_test]
>>> y_test = y_test.astype(np.float32)
>>> _, adv_data, _ = attack.generate(x_test, y_test)
"""

def __init__(self, model, model_type='classification', targeted=False, reserve_ratio=0.3, sparse=True,
@@ -228,17 +233,6 @@ class PSOAttack(Attack):
- numpy.ndarray, generated adversarial examples.

- numpy.ndarray, query times for each sample.

Examples:
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, sparse=False)
>>> batch_size = 6
>>> x_test = np.random.rand(batch_size, 10)
>>> y_test = np.random.randint(low=0, high=10, size=batch_size)
>>> y_test = np.eye(10)[y_test]
>>> y_test = y_test.astype(np.float32)
>>> _, adv_data, _ = attack.generate(x_test, y_test)
"""
# inputs check
inputs, labels = check_pair_numpy_param('inputs', inputs,
@@ -507,40 +501,6 @@ class PSOAttack(Attack):
- numpy.ndarray, generated adversarial examples.

- numpy.ndarray, query times for each sample.

Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor
>>> from mindspore.nn import Cell
>>> from mindarmour import BlackModel
>>> from mindarmour.adv_robustness.attacks import PSOAttack
>>> class ModelToBeAttacked(BlackModel):
... def __init__(self, network):
... super(ModelToBeAttacked, self).__init__()
... self._network = network
... def predict(self, inputs):
... if len(inputs.shape) == 1:
... inputs = np.expand_dims(inputs, axis=0)
... result = self._network(Tensor(inputs.astype(np.float32)))
... return result.asnumpy()
>>> class Net(Cell):
... def __init__(self):
... super(Net, self).__init__()
... self._relu = nn.ReLU()
...
... def construct(self, inputs):
... out = self._relu(inputs)
... return out
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, sparse=False)
>>> batch_size = 6
>>> x_test = np.random.rand(batch_size, 10)
>>> y_test = np.random.randint(low=0, high=10, size=batch_size)
>>> y_test = np.eye(10)[y_test]
>>> y_test = y_test.astype(np.float32)
>>> _, adv_data, _ = attack.generate(x_test, y_test)
"""
# inputs check
if self._model_type == 'classification':


+ 3
- 20
mindarmour/adv_robustness/attacks/black/salt_and_pepper_attack.py View File

@@ -55,6 +55,9 @@ class SaltAndPepperNoiseAttack(Attack):
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = SaltAndPepperNoiseAttack(model)
>>> x_test = np.asarray(np.random.random((1,1,32,32)), np.float32)
>>> y_test = np.random.randint(0, 3, size=1)
>>> _, adv_list, _ = attack.generate(x_test, y_test)
"""

def __init__(self, model, bounds=(0.0, 1.0), max_iter=100, is_targeted=False, sparse=True):
@@ -81,26 +84,6 @@ class SaltAndPepperNoiseAttack(Attack):
- numpy.ndarray, generated adversarial examples.

- numpy.ndarray, query times for each sample.

Examples:
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindarmour import BlackModel
>>> from mindarmour.adv_robustness.attacks import SaltAndPepperNoiseAttack
>>> from tests.ut.python.utils.mock_net import Net
>>> class ModelToBeAttacked(BlackModel):
... def __init__(self, network):
... super(ModelToBeAttacked, self).__init__()
... self._network = network
... def predict(self, inputs):
... result = self._network(Tensor(inputs.astype(np.float32)))
... return result.asnumpy()
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = PointWiseAttack(model)
>>> x_test = np.asarray(np.random.random((1,1,32,32)), np.float32)
>>> y_test = np.random.randint(0, 3, size=1)
>>> _, adv_list, _ = attack.generate(x_test, y_test)
"""
arr_x, arr_y = check_pair_numpy_param('inputs', inputs, 'labels', labels)
if not self._sparse:


+ 3
- 24
mindarmour/adv_robustness/attacks/carlini_wagner.py View File

@@ -103,14 +103,15 @@ class CarliniWagnerL2Attack(Attack):
... def __init__(self):
... super(Net, self).__init__()
... self._softmax = M.Softmax()
...
... def construct(self, inputs):
... out = self._softmax(inputs)
... return out
>>> net = Net()
>>> input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32)
>>> label_np = np.array([3]).astype(np.int64)
>>> num_classes = input_np.shape[1]
>>> label_np = np.array([3]).astype(np.int64)
>>> attack = CarliniWagnerL2Attack(net, num_classes, targeted=False)
>>> adv_data = attack.generate(input_np, label_np)
"""

def __init__(self, network, num_classes, box_min=0.0, box_max=1.0,
@@ -280,28 +281,6 @@ class CarliniWagnerL2Attack(Attack):

Returns:
numpy.ndarray, generated adversarial examples.

Examples:
>>> import numpy as np
>>> import mindspore.ops.operations as M
>>> from mindspore.nn import Cell
>>> from mindarmour.adv_robustness.attacks import CarliniWagnerL2Attack
>>> class Net(Cell):
... def __init__(self):
... super(Net, self).__init__()
... self._softmax = M.Softmax()
...
... def construct(self, inputs):
... out = self._softmax(inputs)
... return out
>>> input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32)
>>> num_classes = input_np.shape[1]
>>> label_np = np.array([3]).astype(np.int64)
>>> attack_nonTargeted = CarliniWagnerL2Attack(net, num_classes, targeted=False)
>>> advs_nonTargeted = attack_nonTargeted.generate(input_np, label_np)
>>> target_np = np.array([1]).astype(np.int64)
>>> attack_targeted = CarliniWagnerL2Attack(net, num_classes, targeted=False)
>>> advs_targeted = attack_targeted.generate(input_np, target_np)
"""

LOGGER.debug(TAG, "enter the func generate.")


+ 6
- 24
mindarmour/adv_robustness/attacks/deep_fool.py View File

@@ -130,8 +130,14 @@ class DeepFool(Attack):
... out = self._softmax(inputs)
... return out
>>> net = Net()
>>> input_shape = (1, 5)
>>> _, classes = input_shape
>>> attack = DeepFool(net, classes, max_iters=10, norm_level=2,
... bounds=(0.0, 1.0))
>>> input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32)
>>> input_me = Tensor(input_np)
>>> true_labels = np.argmax(net(input_me).asnumpy(), axis=1)
>>> advs = attack.generate(input_np, true_labels)
"""

def __init__(self, network, num_classes, model_type='classification',
@@ -177,30 +183,6 @@ class DeepFool(Attack):

Raises:
NotImplementedError: If norm_level is not in [2, np.inf, '2', 'inf'].

Examples:
>>> import numpy as np
>>> import mindspore.ops.operations as P
>>> from mindspore.nn import Cell
>>> from mindspore import Tensor
>>> from mindarmour.adv_robustness.attacks import DeepFool
>>> class Net(Cell):
... def __init__(self):
... super(Net, self).__init__()
... self._softmax = P.Softmax()
... def construct(self, inputs):
... out = self._softmax(inputs)
... return out
>>> net = Net()
>>> attack = DeepFool(net, classes, max_iters=10, norm_level=2,
... bounds=(0.0, 1.0))
>>> input_shape = (1, 5)
>>> _, classes = input_shape
>>> input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32)
>>> input_me = Tensor(input_np)
>>> true_labels = np.argmax(net(input_me).asnumpy(), axis=1)
>>> attack = DeepFool(net, classes, max_iters=10, norm_level=2, bounds=(0.0, 1.0))
>>> advs = attack.generate(input_np, true_labels)
"""

if self._model_type == 'detection':


+ 19
- 33
mindarmour/adv_robustness/attacks/gradient_method.py View File

@@ -45,26 +45,6 @@ class GradientMethod(Attack):
In form of (clip_min, clip_max). Default: None.
loss_fn (Loss): Loss function for optimization. If None, the input network \
is already equipped with loss function. Default: None.

Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindspore import Tensor
>>> from mindarmour.adv_robustness.attacks import FastGradientMethod
>>> class Net(Cell):
... def __init__(self):
... super(Net, self).__init__()
... self._relu = nn.ReLU()
...
... def construct(self, inputs):
... out = self._relu(inputs)
... return out
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])
>>> net = Net()
>>> attack = FastGradientMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
>>> adv_x = attack.generate(inputs, labels)
"""

def __init__(self, network, eps=0.07, alpha=None, bounds=None,
@@ -180,8 +160,9 @@ class FastGradientMethod(GradientMethod):
... def construct(self, inputs):
... out = self._relu(inputs)
... return out
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])
>>> inputs = np.asarray([[0.1, 0.2, 0.7]], np.float32)
>>> labels = np.asarray([2],np.int32)
>>> labels = np.eye(3)[labels].astype(np.float32)
>>> net = Net()
>>> attack = FastGradientMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
>>> adv_x = attack.generate(inputs, labels)
@@ -261,8 +242,9 @@ class RandomFastGradientMethod(FastGradientMethod):
... out = self._relu(inputs)
... return out
>>> net = Net()
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])
>>> inputs = np.asarray([[0.1, 0.2, 0.7]], np.float32)
>>> labels = np.asarray([2],np.int32)
>>> labels = np.eye(3)[labels].astype(np.float32)
>>> attack = RandomFastGradientMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
>>> adv_x = attack.generate(inputs, labels)
"""
@@ -315,8 +297,9 @@ class FastGradientSignMethod(GradientMethod):
... out = self._relu(inputs)
... return out
>>> net = Net()
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])
>>> inputs = np.asarray([[0.1, 0.2, 0.7]], np.float32)
>>> labels = np.asarray([2],np.int32)
>>> labels = np.eye(3)[labels].astype(np.float32)
>>> attack = FastGradientSignMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
>>> adv_x = attack.generate(inputs, labels)
"""
@@ -391,8 +374,9 @@ class RandomFastGradientSignMethod(FastGradientSignMethod):
... out = self._relu(inputs)
... return out
>>> net = Net()
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])
>>> inputs = np.asarray([[0.1, 0.2, 0.7]], np.float32)
>>> labels = np.asarray([2],np.int32)
>>> labels = np.eye(3)[labels].astype(np.float32)
>>> attack = RandomFastGradientSignMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
>>> adv_x = attack.generate(inputs, labels)
"""
@@ -439,9 +423,10 @@ class LeastLikelyClassMethod(FastGradientSignMethod):
... def construct(self, inputs):
... out = self._relu(inputs)
... return out
>>> inputs = np.asarray([[0.1, 0.2, 0.7]], np.float32)
>>> labels = np.asarray([2],np.int32)
>>> labels = np.eye(3)[labels].astype(np.float32)
>>> net = Net()
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])
>>> attack = LeastLikelyClassMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
>>> adv_x = attack.generate(inputs, labels)
"""
@@ -489,10 +474,11 @@ class RandomLeastLikelyClassMethod(FastGradientSignMethod):
... def construct(self, inputs):
... out = self._relu(inputs)
... return out
>>> inputs = np.asarray([[0.1, 0.2, 0.7]], np.float32)
>>> labels = np.asarray([2],np.int32)
>>> labels = np.eye(3)[labels].astype(np.float32)
>>> net = Net()
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])
>>> attack = RandomLeastLikelyClassMethod(network, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
>>> attack = RandomLeastLikelyClassMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
>>> adv_x = attack.generate(inputs, labels)
"""



+ 92
- 65
mindarmour/adv_robustness/attacks/iterative_gradient_method.py View File

@@ -147,12 +147,6 @@ class IterativeGradientMethod(Attack):
Raises:
NotImplementedError: This function is not available in
IterativeGradientMethod.

Examples:
>>> adv_x = attack.generate([[0.1, 0.9, 0.6],
>>> [0.3, 0, 0.3]],
>>> [[0, , 1, 0, 0, 0, 0, 0, 0, 0],
>>> [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]])
"""
msg = 'The function generate() is an abstract method in class ' \
'`IterativeGradientMethod`, and should be implemented ' \
@@ -186,17 +180,23 @@ class BasicIterativeMethod(IterativeGradientMethod):
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore.ops import operations as P
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindarmour.adv_robustness.attacks import BasicIterativeMethod
>>> class Net(Cell):
... def __init__(self):
... super(Net, self).__init__()
... self._relu = nn.ReLU()
... self._softmax = P.Softmax()
... def construct(self, inputs):
... out = self._relu(inputs)
... out = self._softmax(inputs)
... return out
>>> net = Net()
>>> attack = BasicIterativeMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
>>> inputs = np.asarray([[0.1, 0.2, 0.7]], np.float32)
>>> labels = np.asarray([2],np.int32)
>>> labels = np.eye(3)[labels].astype(np.float32)
>>> net = Net()
>>> adv_x = attack.generate(inputs, labels)
"""
def __init__(self, network, eps=0.3, eps_iter=0.1, bounds=(0.0, 1.0),
is_targeted=False, nb_iter=5, loss_fn=None):
@@ -225,25 +225,6 @@ class BasicIterativeMethod(IterativeGradientMethod):
For each input if it has more than one label, it is wrapped in a tuple.
Returns:
numpy.ndarray, generated adversarial examples.

Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindarmour.adv_robustness.attacks import BasicIterativeMethod
>>> class Net(Cell):
... def __init__(self):
... super(Net, self).__init__()
... self._relu = nn.ReLU()
... def construct(self, inputs):
... out = self._relu(inputs)
... return out
>>> net = Net()
>>> attack = BasicIterativeMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
>>> adv_x = attack.generate([[0.3, 0.2, 0.6],
... [0.3, 0.2, 0.4]],
... [[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 1, 0, 0, 0]])
"""
inputs_image, inputs, labels = check_inputs_labels(inputs, labels)
arr_x = inputs_image
@@ -299,6 +280,27 @@ class MomentumIterativeMethod(IterativeGradientMethod):
np.inf, 1 or 2. Default: 'inf'.
loss_fn (Loss): Loss function for optimization. If None, the input network \
is already equipped with loss function. Default: None.

Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore.ops import operations as P
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindarmour.adv_robustness.attacks import MomentumIterativeMethod
>>> class Net(Cell):
... def __init__(self):
... super(Net, self).__init__()
... self._softmax = P.Softmax()
... def construct(self, inputs):
... out = self._softmax(inputs)
... return out
>>> net = Net()
>>> attack = MomentumIterativeMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
>>> inputs = np.asarray([[0.1, 0.2, 0.7]], np.float32)
>>> labels = np.asarray([2],np.int32)
>>> labels = np.eye(3)[labels].astype(np.float32)
>>> net = Net()
>>> adv_x = attack.generate(inputs, labels)
"""

def __init__(self, network, eps=0.3, eps_iter=0.1, bounds=(0.0, 1.0),
@@ -326,25 +328,6 @@ class MomentumIterativeMethod(IterativeGradientMethod):

Returns:
numpy.ndarray, generated adversarial examples.

Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindarmour.adv_robustness.attacks import MomentumIterativeMethod
>>> class Net(Cell):
... def __init__(self):
... super(Net, self).__init__()
... self._relu = nn.ReLU()
... def construct(self, inputs):
... out = self._relu(inputs)
... return out
>>> net = Net()
>>> attack = MomentumIterativeMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
>>> adv_x = attack.generate([[0.5, 0.2, 0.6],
... [0.3, 0, 0.2]],
... [[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
... [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]])
"""
inputs_image, inputs, labels = check_inputs_labels(inputs, labels)
arr_x = inputs_image
@@ -443,6 +426,27 @@ class ProjectedGradientDescent(BasicIterativeMethod):
np.inf, 1 or 2. Default: 'inf'.
loss_fn (Loss): Loss function for optimization. If None, the input network \
is already equipped with loss function. Default: None.

Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore.ops import operations as P
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindarmour.adv_robustness.attacks import ProjectedGradientDescent
>>> class Net(Cell):
... def __init__(self):
... super(Net, self).__init__()
... self._softmax = P.Softmax()
... def construct(self, inputs):
... out = self._softmax(inputs)
... return out
>>> net = Net()
>>> attack = ProjectedGradientDescent(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
>>> inputs = np.asarray([[0.1, 0.2, 0.7]], np.float32)
>>> labels = np.asarray([2],np.int32)
>>> labels = np.eye(3)[labels].astype(np.float32)
>>> net = Net()
>>> adv_x = attack.generate(inputs, labels)
"""

def __init__(self, network, eps=0.3, eps_iter=0.1, bounds=(0.0, 1.0),
@@ -469,25 +473,6 @@ class ProjectedGradientDescent(BasicIterativeMethod):

Returns:
numpy.ndarray, generated adversarial examples.

Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindarmour.adv_robustness.attacks import ProjectedGradientDescent
>>> class Net(Cell):
... def __init__(self):
... super(Net, self).__init__()
... self._relu = nn.ReLU()
... def construct(self, inputs):
... out = self._relu(inputs)
... return out
>>> net = Net()
>>> attack = ProjectedGradientDescent(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
>>> adv_x = attack.generate([[0.6, 0.2, 0.6],
... [0.3, 0.3, 0.4]],
... [[0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
... [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
"""
inputs_image, inputs, labels = check_inputs_labels(inputs, labels)
arr_x = inputs_image
@@ -539,6 +524,27 @@ class DiverseInputIterativeMethod(BasicIterativeMethod):
prob (float): Transformation probability. Default: 0.5.
loss_fn (Loss): Loss function for optimization. If None, the input network \
is already equipped with loss function. Default: None.

Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore.ops import operations as P
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindarmour.adv_robustness.attacks import DiverseInputIterativeMethod
>>> class Net(Cell):
... def __init__(self):
... super(Net, self).__init__()
... self._softmax = P.Softmax()
... def construct(self, inputs):
... out = self._softmax(inputs)
... return out
>>> net = Net()
>>> attack = DiverseInputIterativeMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
>>> inputs = np.asarray([[0.1, 0.2, 0.7]], np.float32)
>>> labels = np.asarray([2],np.int32)
>>> labels = np.eye(3)[labels].astype(np.float32)
>>> net = Net()
>>> adv_x = attack.generate(inputs, labels)
"""
def __init__(self, network, eps=0.3, bounds=(0.0, 1.0),
is_targeted=False, prob=0.5, loss_fn=None):
@@ -575,6 +581,27 @@ class MomentumDiverseInputIterativeMethod(MomentumIterativeMethod):
prob (float): Transformation probability. Default: 0.5.
loss_fn (Loss): Loss function for optimization. If None, the input network \
is already equipped with loss function. Default: None.

Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore.ops import operations as P
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindarmour.adv_robustness.attacks import MomentumDiverseInputIterativeMethod
>>> class Net(Cell):
... def __init__(self):
... super(Net, self).__init__()
... self._softmax = P.Softmax()
... def construct(self, inputs):
... out = self._softmax(inputs)
... return out
>>> net = Net()
>>> attack = MomentumDiverseInputIterativeMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
>>> inputs = np.asarray([[0.1, 0.2, 0.7]], np.float32)
>>> labels = np.asarray([2],np.int32)
>>> labels = np.eye(3)[labels].astype(np.float32)
>>> net = Net()
>>> adv_x = attack.generate(inputs, labels)
"""
def __init__(self, network, eps=0.3, bounds=(0.0, 1.0),
is_targeted=False, norm_level='l1', prob=0.5, loss_fn=None):


+ 3
- 20
mindarmour/adv_robustness/attacks/jsma.py View File

@@ -68,7 +68,10 @@ class JSMAAttack(Attack):
>>> net = Net()
>>> input_shape = (1, 5)
>>> batch_size, classes = input_shape
>>> input_np = np.random.random(input_shape).astype(np.float32)
>>> label_np = np.random.randint(classes, size=batch_size)
>>> attack = JSMAAttack(net, classes, max_iteration=5)
>>> advs = attack.generate(input_np, label_np)
"""

def __init__(self, network, num_classes, box_min=0.0, box_max=1.0,
@@ -193,26 +196,6 @@ class JSMAAttack(Attack):

Returns:
numpy.ndarray, adversarial samples.

Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore.nn import Cell
>>> from mindarmour.adv_robustness.attacks import JSMAAttack
>>> class Net(Cell):
... def __init__(self):
... super(Net, self).__init__()
... self._relu = nn.ReLU()
... def construct(self, inputs):
... out = self._relu(inputs)
... return out
>>> net = Net()
>>> input_shape = (1, 5)
>>> batch_size, classes = input_shape
>>> input_np = np.random.random(input_shape).astype(np.float32)
>>> label_np = np.random.randint(classes, size=batch_size)
>>> attack = JSMAAttack(net, classes, max_iteration=5)
>>> advs = attack.generate(input_np, label_np)
"""
inputs, labels = check_pair_numpy_param('inputs', inputs,
'labels', labels)


+ 6
- 8
mindarmour/adv_robustness/attacks/lbfgs.py View File

@@ -58,7 +58,13 @@ class LBFGS(Attack):
>>> from mindarmour.adv_robustness.attacks import LBFGS
>>> from tests.ut.python.utils.mock_net import Net
>>> net = Net()
>>> classes = 10
>>> attack = LBFGS(net, is_targeted=True)
>>> input_np = np.asarray(np.random.random((1,1,32,32)), np.float32)
>>> label_np = np.array([3]).astype(np.int64)
>>> target_np = np.array([7]).astype(np.int64)
>>> target_np = np.eye(10)[target_np].astype(np.float32)
>>> adv = attack.generate(input_np, target_np)
"""
def __init__(self, network, eps=1e-5, bounds=(0.0, 1.0), is_targeted=True,
nb_iter=150, search_iters=30, loss_fn=None, sparse=False):
@@ -96,14 +102,6 @@ class LBFGS(Attack):

Returns:
numpy.ndarray, generated adversarial examples.

Examples:
>>> import numpy as np
>>> from mindarmour.adv_robustness.attacks import LBFGS
>>> from tests.ut.python.utils.mock_net import Net
>>> net = Net()
>>> attack = LBFGS(net, is_targeted=True)
>>> adv = attack.generate([[0.1, 0.2, 0.6], [0.3, 0, 0.4]], [2, 2])
"""
LOGGER.debug(TAG, 'start to generate adv image.')
arr_x, arr_y = check_pair_numpy_param('inputs', inputs, 'labels', labels)


+ 15
- 9
mindarmour/adv_robustness/defenses/adversarial_defense.py View File

@@ -45,8 +45,8 @@ class AdversarialDefense(Defense):
>>> lr = 0.001
>>> momentum = 0.9
>>> batch_size = 32
>>> num_class = 10
>>> loss_fn = SoftmaxCrossEntropyWithLogits(sparse=False)
>>> num_classes = 10
>>> loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
>>> optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=momentum)
>>> adv_defense = AdversarialDefense(net, loss_fn, optimizer)
>>> inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32)
@@ -81,6 +81,9 @@ class AdversarialDefense(Defense):

Returns:
numpy.ndarray, loss of defense operation.

Examples:
>>> adv_defense.defense(inputs, labels)
"""
inputs, labels = check_pair_numpy_param('inputs', inputs, 'labels',
labels)
@@ -110,22 +113,22 @@ class AdversarialDefenseWithAttacks(AdversarialDefense):
>>> from mindspore.nn.optim.momentum import Momentum
>>> from mindarmour.adv_robustness.attacks import FastGradientSignMethod
>>> from mindarmour.adv_robustness.attacks import ProjectedGradientDescent
>>> from mindarmour.adv_robustness.defenses import AdversarialDefense
>>> from mindarmour.adv_robustness.defenses import AdversarialDefenseWithAttacks
>>> from mindspore import nn
>>> from tests.ut.python.utils.mock_net import Net
>>> net = Net()
>>> lr = 0.001
>>> momentum = 0.9
>>> batch_size = 32
>>> num_class = 10
>>> num_classes = 10
>>> loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
>>> optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=momentum)
>>> fgsm = FastGradientSignMethod(net, loss_fn=loss_fn)
>>> pgd = ProjectedGradientDescent(net, loss_fn=loss_fn)
>>> ead = AdversarialDefenseWithAttack(net, [fgsm, pgd], loss_fn=loss_fn,
... optimizer=optimizer)
>>> ead = AdversarialDefenseWithAttacks(net, [fgsm, pgd], loss_fn=loss_fn,
... optimizer=optimizer)
>>> inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32)
>>> labels = np.random.randint(num_class, size=batch_size).astype(np.int32)
>>> labels = np.random.randint(num_classes, size=batch_size).astype(np.int32)
>>> labels = np.eye(num_classes)[labels].astype(np.float32)
>>> loss = ead.defense(inputs, labels)
"""
@@ -154,6 +157,9 @@ class AdversarialDefenseWithAttacks(AdversarialDefense):

Returns:
numpy.ndarray, loss of adversarial defense operation.

Examples:
>>> adv_defense.defense(inputs, labels)
"""
inputs, labels = check_pair_numpy_param('inputs', inputs, 'labels',
labels)
@@ -205,7 +211,7 @@ class EnsembleAdversarialDefense(AdversarialDefenseWithAttacks):
>>> lr = 0.001
>>> momentum = 0.9
>>> batch_size = 32
>>> num_class = 10
>>> num_classes = 10
>>> loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
>>> optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=momentum)
>>> fgsm = FastGradientSignMethod(net, loss_fn=loss_fn)
@@ -213,7 +219,7 @@ class EnsembleAdversarialDefense(AdversarialDefenseWithAttacks):
>>> ead = EnsembleAdversarialDefense(net, [fgsm, pgd], loss_fn=loss_fn,
... optimizer=optimizer)
>>> inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32)
>>> labels = np.random.randint(num_class, size=batch_size).astype(np.int32)
>>> labels = np.random.randint(num_classes, size=batch_size).astype(np.int32)
>>> labels = np.eye(num_classes)[labels].astype(np.float32)
>>> loss = ead.defense(inputs, labels)
"""


+ 2
- 2
mindarmour/adv_robustness/defenses/natural_adversarial_defense.py View File

@@ -45,12 +45,12 @@ class NaturalAdversarialDefense(AdversarialDefenseWithAttacks):
>>> lr = 0.001
>>> momentum = 0.9
>>> batch_size = 32
>>> num_class = 10
>>> num_classes = 10
>>> loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
>>> optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=momentum)
>>> nad = NaturalAdversarialDefense(net, loss_fn=loss_fn, optimizer=optimizer)
>>> inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32)
>>> labels = np.random.randint(num_class, size=batch_size).astype(np.int32)
>>> labels = np.random.randint(num_classes, size=batch_size).astype(np.int32)
>>> labels = np.eye(num_classes)[labels].astype(np.float32)
>>> loss = nad.defense(inputs, labels)
"""


+ 2
- 2
mindarmour/adv_robustness/defenses/projected_adversarial_defense.py View File

@@ -50,12 +50,12 @@ class ProjectedAdversarialDefense(AdversarialDefenseWithAttacks):
>>> lr = 0.001
>>> momentum = 0.9
>>> batch_size = 32
>>> num_class = 10
>>> num_classes = 10
>>> loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
>>> optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=momentum)
>>> pad = ProjectedAdversarialDefense(net, loss_fn=loss_fn, optimizer=optimizer)
>>> inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32)
>>> labels = np.random.randint(num_class, size=batch_size).astype(np.int32)
>>> labels = np.random.randint(num_classes, size=batch_size).astype(np.int32)
>>> labels = np.eye(num_classes)[labels].astype(np.float32)
>>> loss = pad.defense(inputs, labels)
"""


+ 33
- 0
mindarmour/adv_robustness/detectors/black/similarity_detector.py View File

@@ -139,6 +139,13 @@ class SimilarityDetector(Detector):
Raises:
ValueError: The number of training data is less than
max_k_neighbor!

Examples:
>>> x_train = np.random.rand(10, 32, 32, 3).astype(np.float32)
>>> perm = np.random.permutation(x_train.shape[0])
>>> benign_queries = x_train[perm[:10], :, :, :]
>>> num_nearest_neighbors, thresholds = detector.fit(inputs=x_train)
>>> detector.set_threshold(num_nearest_neighbors[-1], thresholds[-1])
"""
data = check_numpy_param('inputs', inputs)
data_len = data.shape[0]
@@ -189,6 +196,14 @@ class SimilarityDetector(Detector):
Raises:
ValueError: The parameters of threshold or num_of_neighbors is
not available.

Examples:
>>> x_train = np.random.rand(10, 32, 32, 3).astype(np.float32)
>>> perm = np.random.permutation(x_train.shape[0])
>>> benign_queries = x_train[perm[:10], :, :, :]
>>> num_nearest_neighbors, thresholds = detector.fit(inputs=x_train)
>>> detector.set_threshold(num_nearest_neighbors[-1], thresholds[-1])
>>> detector.detect(benign_queries)
"""
if self._threshold is None or self._num_of_neighbors is None:
msg = 'Explicit detection threshold and number of nearest ' \
@@ -237,6 +252,8 @@ class SimilarityDetector(Detector):
"""
Clear the buffer memory.

Examples:
>>> detector.detect(benign_queries)
"""
while self._buffer:
self._buffer.pop()
@@ -248,6 +265,10 @@ class SimilarityDetector(Detector):
Args:
num_of_neighbors (int): Number of the nearest neighbors.
threshold (float): Detection threshold.

Examples:
>>> num_nearest_neighbors, thresholds = detector.fit(inputs=x_train)
>>> detector.set_threshold(num_nearest_neighbors[-1], thresholds[-1])
"""
self._num_of_neighbors = check_int_positive('num_of_neighbors',
num_of_neighbors)
@@ -259,6 +280,9 @@ class SimilarityDetector(Detector):

Returns:
list[int], number of queries between adjacent detections.

Examples:
>>> detector.get_detection_interval()
"""
detected_queries = self._detected_queries
interval = []
@@ -272,6 +296,9 @@ class SimilarityDetector(Detector):

Returns:
list[int], sequence number of detected malicious queries.

Examples:
>>> detector.get_detected_queries()
"""
detected_queries = self._detected_queries
return detected_queries
@@ -288,6 +315,9 @@ class SimilarityDetector(Detector):
Raises:
NotImplementedError: This function is not available
in class `SimilarityDetector`.

Examples:
>>> detector.detect_diff()
"""
msg = 'The function detect_diff() is not available in the class ' \
'`SimilarityDetector`.'
@@ -303,6 +333,9 @@ class SimilarityDetector(Detector):

Raises:
NotImplementedError: This function is not available in class `SimilarityDetector`.

Examples:
>>> detector.transform(x_train)
"""
msg = 'The function transform() is not available in the class `SimilarityDetector`.'
LOGGER.error(TAG, msg)


+ 17
- 7
mindarmour/adv_robustness/detectors/mag_net.py View File

@@ -184,21 +184,31 @@ class DivergenceBasedDetector(ErrorBasedDetector):

Examples:
>>> import numpy as np
>>> from mindspore.ops.operations import Add
>>> import mindspore.ops.operations as P
>>> from mindspore.nn import Cell
>>> from mindspore import Model
>>> from mindspore import context
>>> from mindarmour.adv_robustness.detectors import ErrorBasedDetector
>>> from mindarmour.adv_robustness.detectors import DivergenceBasedDetector
>>> class PredNet(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.add = Add()
>>> def construct(self, inputs):
>>> return self.add(inputs, inputs)
... def __init__(self):
... super(PredNet, self).__init__()
... self.shape = P.Shape()
... self.reshape = P.Reshape()
... self._softmax = P.Softmax()
... def construct(self, inputs):
... data = self.reshape(inputs, (self.shape(inputs)[0], -1))
... return self._softmax(data)
>>> class Net(Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.add = P.Add()
... def construct(self, inputs):
... return self.add(inputs, inputs)
>>> np.random.seed(5)
>>> ori = np.random.rand(4, 4, 4).astype(np.float32)
>>> np.random.seed(6)
>>> adv = np.random.rand(4, 4, 4).astype(np.float32)
>>> encoder = Model(Net())
>>> model = Model(PredNet())
>>> detector = DivergenceBasedDetector(encoder, model)
>>> threshold = detector.fit(ori)


+ 1
- 1
mindarmour/adv_robustness/detectors/region_based_detector.py View File

@@ -57,7 +57,7 @@ class RegionBasedDetector(Detector):
>>> from mindspore.nn import Cell
>>> from mindspore import Model
>>> from mindspore import context
>>> from mindarmour.adv_robustness.detectors import ErrorBasedDetector
>>> from mindarmour.adv_robustness.detectors import RegionBasedDetector
>>> class Net(Cell):
... def __init__(self):
... super(Net, self).__init__()


+ 2
- 24
mindarmour/adv_robustness/evaluations/attack_evaluation.py View File

@@ -63,6 +63,8 @@ class AttackEvaluate:
>>> l_0, l_2, l_inf = attack_eval.avg_lp_distance()
>>> ass = attack_eval.avg_ssim()
>>> nte = attack_eval.nte()
>>> actc = attack_eval.avg_conf_true_class()

"""

def __init__(self, inputs, labels, adv_inputs, adv_preds,
@@ -103,10 +105,6 @@ class AttackEvaluate:

Returns:
float, ranges between (0, 1). The higher, the more successful the attack is.

Examples:
>>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y)
>>> mr = attack_eval.mis_classification_rate()
"""
return self._success_idxes.shape[0]*1.0 / self._inputs.shape[0]

@@ -116,10 +114,6 @@ class AttackEvaluate:

Returns:
float, ranges between (0, 1). The higher, the more successful the attack is.

Examples:
>>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y)
>>> acac = attack_eval.avg_conf_adv_class()
"""
idxes = self._success_idxes
success_num = idxes.shape[0]
@@ -135,10 +129,6 @@ class AttackEvaluate:

Returns:
float, ranges between (0, 1). The lower, the more successful the attack is.

Examples:
>>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y)
>>> acac = attack_eval.avg_conf_adv_class()
"""
idxes = self._success_idxes
success_num = idxes.shape[0]
@@ -158,10 +148,6 @@ class AttackEvaluate:
the more successful the attack is.

- If return value is -1, there is no success adversarial examples.

Examples:
>>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y)
>>> l_0, l_2, l_inf = attack_eval.avg_lp_distance()
"""
idxes = self._success_idxes
success_num = idxes.shape[0]
@@ -190,10 +176,6 @@ class AttackEvaluate:
successful the attack is.

- If return value is -1: there is no success adversarial examples.

Examples:
>>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y)
>>> ass = attack_eval.avg_ssim()
"""
success_num = self._success_idxes.shape[0]
if success_num == 0:
@@ -215,10 +197,6 @@ class AttackEvaluate:
Returns:
float, ranges between (0, 1). The higher, the more successful the
attack is.

Examples:
>>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y)
>>> nte = attack_eval.nte()
"""
idxes = self._success_idxes
success_num = idxes.shape[0]


+ 5
- 1
mindarmour/adv_robustness/evaluations/defense_evaluation.py View File

@@ -52,7 +52,11 @@ class DefenseEvaluate:
>>> def_eval = DefenseEvaluate(raw_preds,
... def_preds,
... true_labels)
>>> def_eval.cav()
>>> cav = def_eval.cav()
>>> crr = def_eval.crr()
>>> csr = def_eval.csr()
>>> ccv = def_eval.ccv()
>>> cos = def_eval.cos()
"""
def __init__(self, raw_preds, def_preds, true_labels):
self._raw_preds, self._def_preds = check_pair_numpy_param('raw_preds',


+ 1
- 1
mindarmour/adv_robustness/evaluations/visual_metrics.py View File

@@ -58,7 +58,7 @@ class RadarMetric:
... metrics_labels,
... title='',
... scale='sparse')
>>> rm.show()
>>> #rm.show()
"""
def __init__(self, metrics_name, metrics_data, labels, title, scale='hide'):


Loading…
Cancel
Save