Browse Source

Add API Example for Defense Module and fix minor issues

tags/v1.6.0
shu-kun-zhang 3 years ago
parent
commit
ea7dc4ceef
22 changed files with 296 additions and 258 deletions
  1. +12
    -12
      mindarmour/adv_robustness/attacks/black/genetic_attack.py
  2. +16
    -16
      mindarmour/adv_robustness/attacks/black/hop_skip_jump_attack.py
  3. +16
    -16
      mindarmour/adv_robustness/attacks/black/natural_evolutionary_strategy.py
  4. +12
    -12
      mindarmour/adv_robustness/attacks/black/pointwise_attack.py
  5. +30
    -30
      mindarmour/adv_robustness/attacks/black/pso_attack.py
  6. +12
    -12
      mindarmour/adv_robustness/attacks/black/salt_and_pepper_attack.py
  7. +14
    -18
      mindarmour/adv_robustness/attacks/carlini_wagner.py
  8. +12
    -12
      mindarmour/adv_robustness/attacks/deep_fool.py
  9. +43
    -45
      mindarmour/adv_robustness/attacks/gradient_method.py
  10. +24
    -24
      mindarmour/adv_robustness/attacks/iterative_gradient_method.py
  11. +12
    -12
      mindarmour/adv_robustness/attacks/jsma.py
  12. +0
    -1
      mindarmour/adv_robustness/attacks/lbfgs.py
  13. +1
    -6
      mindarmour/adv_robustness/defenses/adversarial_defense.py
  14. +8
    -8
      mindarmour/adv_robustness/detectors/black/similarity_detector.py
  15. +10
    -11
      mindarmour/adv_robustness/detectors/ensemble_detector.py
  16. +5
    -5
      mindarmour/adv_robustness/detectors/mag_net.py
  17. +5
    -5
      mindarmour/adv_robustness/detectors/region_based_detector.py
  18. +6
    -6
      mindarmour/adv_robustness/detectors/spatial_smoothing.py
  19. +30
    -1
      mindarmour/adv_robustness/evaluations/attack_evaluation.py
  20. +10
    -5
      mindarmour/adv_robustness/evaluations/black/defense_evaluation.py
  21. +15
    -0
      mindarmour/adv_robustness/evaluations/defense_evaluation.py
  22. +3
    -1
      mindarmour/adv_robustness/evaluations/visual_metrics.py

+ 12
- 12
mindarmour/adv_robustness/attacks/black/genetic_attack.py View File

@@ -76,19 +76,19 @@ class GeneticAttack(Attack):
>>> from mindarmour import BlackModel
>>> from mindarmour.adv_robustness.attacks import GeneticAttack
>>> class ModelToBeAttacked(BlackModel):
>>> def __init__(self, network):
>>> super(ModelToBeAttacked, self).__init__()
>>> self._network = network
>>> def predict(self, inputs):
>>> result = self._network(Tensor(inputs.astype(np.float32)))
>>> return result.asnumpy()
... def __init__(self, network):
... super(ModelToBeAttacked, self).__init__()
... self._network = network
... def predict(self, inputs):
... result = self._network(Tensor(inputs.astype(np.float32)))
... return result.asnumpy()
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._softmax = M.Softmax()
>>> def construct(self, inputs):
>>> out = self._softmax(inputs)
>>> return out
... def __init__(self):
... super(Net, self).__init__()
... self._softmax = M.Softmax()
... def construct(self, inputs):
... out = self._softmax(inputs)
... return out
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = GeneticAttack(model, sparse=False)


+ 16
- 16
mindarmour/adv_robustness/attacks/black/hop_skip_jump_attack.py View File

@@ -81,14 +81,14 @@ class HopSkipJumpAttack(Attack):
>>> from mindarmour.adv_robustness.attacks import HopSkipJumpAttack
>>> from tests.ut.python.utils.mock_net import Net
>>> class ModelToBeAttacked(BlackModel):
>>> def __init__(self, network):
>>> super(ModelToBeAttacked, self).__init__()
>>> self._network = network
>>> def predict(self, inputs):
>>> if len(inputs.shape) == 3:
>>> inputs = inputs[np.newaxis, :]
>>> result = self._network(Tensor(inputs.astype(np.float32)))
>>> return result.asnumpy()
... def __init__(self, network):
... super(ModelToBeAttacked, self).__init__()
... self._network = network
... def predict(self, inputs):
... if len(inputs.shape) == 3:
... inputs = inputs[np.newaxis, :]
... result = self._network(Tensor(inputs.astype(np.float32)))
... return result.asnumpy()
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = HopSkipJumpAttack(model)
@@ -191,14 +191,14 @@ class HopSkipJumpAttack(Attack):
>>> from mindarmour.adv_robustness.attacks import HopSkipJumpAttack
>>> from tests.ut.python.utils.mock_net import Net
>>> class ModelToBeAttacked(BlackModel):
>>> def __init__(self, network):
>>> super(ModelToBeAttacked, self).__init__()
>>> self._network = network
>>> def predict(self, inputs):
>>> if len(inputs.shape) == 3:
>>> inputs = inputs[np.newaxis, :]
>>> result = self._network(Tensor(inputs.astype(np.float32)))
>>> return result.asnumpy()
... def __init__(self, network):
... super(ModelToBeAttacked, self).__init__()
... self._network = network
... def predict(self, inputs):
... if len(inputs.shape) == 3:
... inputs = inputs[np.newaxis, :]
... result = self._network(Tensor(inputs.astype(np.float32)))
... return result.asnumpy()
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = HopSkipJumpAttack(model)


+ 16
- 16
mindarmour/adv_robustness/attacks/black/natural_evolutionary_strategy.py View File

@@ -85,14 +85,14 @@ class NES(Attack):
>>> from mindarmour.adv_robustness.attacks import NES
>>> from tests.ut.python.utils.mock_net import Net
>>> class ModelToBeAttacked(BlackModel):
>>> def __init__(self, network):
>>> super(ModelToBeAttacked, self).__init__()
>>> self._network = network
>>> def predict(self, inputs):
>>> if len(inputs.shape) == 3:
>>> inputs = inputs[np.newaxis, :]
>>> result = self._network(Tensor(inputs.astype(np.float32)))
>>> return result.asnumpy()
... def __init__(self, network):
... super(ModelToBeAttacked, self).__init__()
... self._network = network
... def predict(self, inputs):
... if len(inputs.shape) == 3:
... inputs = inputs[np.newaxis, :]
... result = self._network(Tensor(inputs.astype(np.float32)))
... return result.asnumpy()
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> SCENE = 'Query_Limit'
@@ -161,14 +161,14 @@ class NES(Attack):
>>> from mindarmour.adv_robustness.attacks import NES
>>> from tests.ut.python.utils.mock_net import Net
>>> class ModelToBeAttacked(BlackModel):
>>> def __init__(self, network):
>>> super(ModelToBeAttacked, self).__init__()
>>> self._network = network
>>> def predict(self, inputs):
>>> if len(inputs.shape) == 3:
>>> inputs = inputs[np.newaxis, :]
>>> result = self._network(Tensor(inputs.astype(np.float32)))
>>> return result.asnumpy()
... def __init__(self, network):
... super(ModelToBeAttacked, self).__init__()
... self._network = network
... def predict(self, inputs):
... if len(inputs.shape) == 3:
... inputs = inputs[np.newaxis, :]
... result = self._network(Tensor(inputs.astype(np.float32)))
... return result.asnumpy()
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> SCENE = 'Query_Limit'


+ 12
- 12
mindarmour/adv_robustness/attacks/black/pointwise_attack.py View File

@@ -53,12 +53,12 @@ class PointWiseAttack(Attack):
>>> from mindarmour.adv_robustness.attacks import PointWiseAttack
>>> from tests.ut.python.utils.mock_net import Net
>>> class ModelToBeAttacked(BlackModel):
>>> def __init__(self, network):
>>> super(ModelToBeAttacked, self).__init__()
>>> self._network = network
>>> def predict(self, inputs):
>>> result = self._network(Tensor(inputs.astype(np.float32)))
>>> return result.asnumpy()
... def __init__(self, network):
... super(ModelToBeAttacked, self).__init__()
... self._network = network
... def predict(self, inputs):
... result = self._network(Tensor(inputs.astype(np.float32)))
... return result.asnumpy()
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = PointWiseAttack(model)
@@ -99,12 +99,12 @@ class PointWiseAttack(Attack):
>>> from mindarmour.adv_robustness.attacks import PointWiseAttack
>>> from tests.ut.python.utils.mock_net import Net
>>> class ModelToBeAttacked(BlackModel):
>>> def __init__(self, network):
>>> super(ModelToBeAttacked, self).__init__()
>>> self._network = network
>>> def predict(self, inputs):
>>> result = self._network(Tensor(inputs.astype(np.float32)))
>>> return result.asnumpy()
... def __init__(self, network):
... super(ModelToBeAttacked, self).__init__()
... self._network = network
... def predict(self, inputs):
... result = self._network(Tensor(inputs.astype(np.float32)))
... return result.asnumpy()
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = PointWiseAttack(model)


+ 30
- 30
mindarmour/adv_robustness/attacks/black/pso_attack.py View File

@@ -71,22 +71,22 @@ class PSOAttack(Attack):
>>> from mindarmour import BlackModel
>>> from mindarmour.adv_robustness.attacks import PSOAttack
>>> class ModelToBeAttacked(BlackModel):
>>> def __init__(self, network):
>>> super(ModelToBeAttacked, self).__init__()
>>> self._network = network
>>> def predict(self, inputs):
>>> if len(inputs.shape) == 1:
>>> inputs = np.expand_dims(inputs, axis=0)
>>> result = self._network(Tensor(inputs.astype(np.float32)))
>>> return result.asnumpy()
... def __init__(self, network):
... super(ModelToBeAttacked, self).__init__()
... self._network = network
... def predict(self, inputs):
... if len(inputs.shape) == 1:
... inputs = np.expand_dims(inputs, axis=0)
... result = self._network(Tensor(inputs.astype(np.float32)))
... return result.asnumpy()
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._relu = nn.ReLU()
>>>
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
... def __init__(self):
... super(Net, self).__init__()
... self._relu = nn.ReLU()
...
... def construct(self, inputs):
... out = self._relu(inputs)
... return out
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, sparse=False)
@@ -516,22 +516,22 @@ class PSOAttack(Attack):
>>> from mindarmour import BlackModel
>>> from mindarmour.adv_robustness.attacks import PSOAttack
>>> class ModelToBeAttacked(BlackModel):
>>> def __init__(self, network):
>>> super(ModelToBeAttacked, self).__init__()
>>> self._network = network
>>> def predict(self, inputs):
>>> if len(inputs.shape) == 1:
>>> inputs = np.expand_dims(inputs, axis=0)
>>> result = self._network(Tensor(inputs.astype(np.float32)))
>>> return result.asnumpy()
... def __init__(self, network):
... super(ModelToBeAttacked, self).__init__()
... self._network = network
... def predict(self, inputs):
... if len(inputs.shape) == 1:
... inputs = np.expand_dims(inputs, axis=0)
... result = self._network(Tensor(inputs.astype(np.float32)))
... return result.asnumpy()
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._relu = nn.ReLU()
>>>
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
... def __init__(self):
... super(Net, self).__init__()
... self._relu = nn.ReLU()
...
... def construct(self, inputs):
... out = self._relu(inputs)
... return out
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, sparse=False)


+ 12
- 12
mindarmour/adv_robustness/attacks/black/salt_and_pepper_attack.py View File

@@ -46,12 +46,12 @@ class SaltAndPepperNoiseAttack(Attack):
>>> from mindarmour.adv_robustness.attacks import SaltAndPepperNoiseAttack
>>> from tests.ut.python.utils.mock_net import Net
>>> class ModelToBeAttacked(BlackModel):
>>> def __init__(self, network):
>>> super(ModelToBeAttacked, self).__init__()
>>> self._network = network
>>> def predict(self, inputs):
>>> result = self._network(Tensor(inputs.astype(np.float32)))
>>> return result.asnumpy()
... def __init__(self, network):
... super(ModelToBeAttacked, self).__init__()
... self._network = network
... def predict(self, inputs):
... result = self._network(Tensor(inputs.astype(np.float32)))
... return result.asnumpy()
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = SaltAndPepperNoiseAttack(model)
@@ -89,12 +89,12 @@ class SaltAndPepperNoiseAttack(Attack):
>>> from mindarmour.adv_robustness.attacks import SaltAndPepperNoiseAttack
>>> from tests.ut.python.utils.mock_net import Net
>>> class ModelToBeAttacked(BlackModel):
>>> def __init__(self, network):
>>> super(ModelToBeAttacked, self).__init__()
>>> self._network = network
>>> def predict(self, inputs):
>>> result = self._network(Tensor(inputs.astype(np.float32)))
>>> return result.asnumpy()
... def __init__(self, network):
... super(ModelToBeAttacked, self).__init__()
... self._network = network
... def predict(self, inputs):
... result = self._network(Tensor(inputs.astype(np.float32)))
... return result.asnumpy()
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = PointWiseAttack(model)


+ 14
- 18
mindarmour/adv_robustness/attacks/carlini_wagner.py View File

@@ -100,13 +100,13 @@ class CarliniWagnerL2Attack(Attack):
>>> from mindspore.nn import Cell
>>> from mindarmour.adv_robustness.attacks import CarliniWagnerL2Attack
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._softmax = M.Softmax()
>>>
>>> def construct(self, inputs):
>>> out = self._softmax(inputs)
>>> return out
... def __init__(self):
... super(Net, self).__init__()
... self._softmax = M.Softmax()
...
... def construct(self, inputs):
... out = self._softmax(inputs)
... return out
>>> input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32)
>>> label_np = np.array([3]).astype(np.int64)
>>> num_classes = input_np.shape[1]
@@ -286,23 +286,19 @@ class CarliniWagnerL2Attack(Attack):
>>> import mindspore.ops.operations as M
>>> from mindspore.nn import Cell
>>> from mindarmour.adv_robustness.attacks import CarliniWagnerL2Attack
>>>
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._softmax = M.Softmax()
>>>
>>> def construct(self, inputs):
>>> out = self._softmax(inputs)
>>> return out
>>>
... def __init__(self):
... super(Net, self).__init__()
... self._softmax = M.Softmax()
...
... def construct(self, inputs):
... out = self._softmax(inputs)
... return out
>>> input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32)
>>> num_classes = input_np.shape[1]
>>>
>>> label_np = np.array([3]).astype(np.int64)
>>> attack_nonTargeted = CarliniWagnerL2Attack(net, num_classes, targeted=False)
>>> advs_nonTargeted = attack_nonTargeted.generate(input_np, label_np)
>>>
>>> target_np = np.array([1]).astype(np.int64)
>>> attack_targeted = CarliniWagnerL2Attack(net, num_classes, targeted=False)
>>> advs_targeted = attack_targeted.generate(input_np, target_np)


+ 12
- 12
mindarmour/adv_robustness/attacks/deep_fool.py View File

@@ -123,12 +123,12 @@ class DeepFool(Attack):
>>> from mindspore import Tensor
>>> from mindarmour.adv_robustness.attacks import DeepFool
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._softmax = P.Softmax()
>>> def construct(self, inputs):
>>> out = self._softmax(inputs)
>>> return out
... def __init__(self):
... super(Net, self).__init__()
... self._softmax = P.Softmax()
... def construct(self, inputs):
... out = self._softmax(inputs)
... return out
>>> net = Net()
>>> attack = DeepFool(net, classes, max_iters=10, norm_level=2,
... bounds=(0.0, 1.0))
@@ -185,12 +185,12 @@ class DeepFool(Attack):
>>> from mindspore import Tensor
>>> from mindarmour.adv_robustness.attacks import DeepFool
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._softmax = P.Softmax()
>>> def construct(self, inputs):
>>> out = self._softmax(inputs)
>>> return out
... def __init__(self):
... super(Net, self).__init__()
... self._softmax = P.Softmax()
... def construct(self, inputs):
... out = self._softmax(inputs)
... return out
>>> net = Net()
>>> attack = DeepFool(net, classes, max_iters=10, norm_level=2,
... bounds=(0.0, 1.0))


+ 43
- 45
mindarmour/adv_robustness/attacks/gradient_method.py View File

@@ -53,13 +53,13 @@ class GradientMethod(Attack):
>>> from mindspore import Tensor
>>> from mindarmour.adv_robustness.attacks import FastGradientMethod
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._relu = nn.ReLU()
>>>
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
... def __init__(self):
... super(Net, self).__init__()
... self._relu = nn.ReLU()
...
... def construct(self, inputs):
... out = self._relu(inputs)
... return out
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])
>>> net = Net()
@@ -174,12 +174,12 @@ class FastGradientMethod(GradientMethod):
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindarmour.adv_robustness.attacks import FastGradientMethod
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._relu = nn.ReLU()
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
... def __init__(self):
... super(Net, self).__init__()
... self._relu = nn.ReLU()
... def construct(self, inputs):
... out = self._relu(inputs)
... return out
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])
>>> net = Net()
@@ -254,12 +254,12 @@ class RandomFastGradientMethod(FastGradientMethod):
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindarmour.adv_robustness.attacks import RandomFastGradientMethod
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._relu = nn.ReLU()
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
... def __init__(self):
... super(Net, self).__init__()
... self._relu = nn.ReLU()
... def construct(self, inputs):
... out = self._relu(inputs)
... return out
>>> net = Net()
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])
@@ -308,12 +308,12 @@ class FastGradientSignMethod(GradientMethod):
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindarmour.adv_robustness.attacks import FastGradientSignMethod
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._relu = nn.ReLU()
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
... def __init__(self):
... super(Net, self).__init__()
... self._relu = nn.ReLU()
... def construct(self, inputs):
... out = self._relu(inputs)
... return out
>>> net = Net()
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])
@@ -383,14 +383,13 @@ class RandomFastGradientSignMethod(FastGradientSignMethod):
>>> import mindspore.nn as nn
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindarmour.adv_robustness.attacks import RandomFastGradientSignMethod
>>>
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._relu = nn.ReLU()
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
... def __init__(self):
... super(Net, self).__init__()
... self._relu = nn.ReLU()
... def construct(self, inputs):
... out = self._relu(inputs)
... return out
>>> net = Net()
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])
@@ -434,13 +433,12 @@ class LeastLikelyClassMethod(FastGradientSignMethod):
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindarmour.adv_robustness.attacks import LeastLikelyClassMethod
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._relu = nn.ReLU()
>>>
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
... def __init__(self):
... super(Net, self).__init__()
... self._relu = nn.ReLU()
... def construct(self, inputs):
... out = self._relu(inputs)
... return out
>>> net = Net()
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])
@@ -485,12 +483,12 @@ class RandomLeastLikelyClassMethod(FastGradientSignMethod):
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindarmour.adv_robustness.attacks import RandomLeastLikelyClassMethod
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._relu = nn.ReLU()
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
... def __init__(self):
... super(Net, self).__init__()
... self._relu = nn.ReLU()
... def construct(self, inputs):
... out = self._relu(inputs)
... return out
>>> net = Net()
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])


+ 24
- 24
mindarmour/adv_robustness/attacks/iterative_gradient_method.py View File

@@ -189,12 +189,12 @@ class BasicIterativeMethod(IterativeGradientMethod):
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindarmour.adv_robustness.attacks import BasicIterativeMethod
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._relu = nn.ReLU()
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
... def __init__(self):
... super(Net, self).__init__()
... self._relu = nn.ReLU()
... def construct(self, inputs):
... out = self._relu(inputs)
... return out
>>> net = Net()
>>> attack = BasicIterativeMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
"""
@@ -232,12 +232,12 @@ class BasicIterativeMethod(IterativeGradientMethod):
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindarmour.adv_robustness.attacks import BasicIterativeMethod
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._relu = nn.ReLU()
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
... def __init__(self):
... super(Net, self).__init__()
... self._relu = nn.ReLU()
... def construct(self, inputs):
... out = self._relu(inputs)
... return out
>>> net = Net()
>>> attack = BasicIterativeMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
>>> adv_x = attack.generate([[0.3, 0.2, 0.6],
@@ -333,12 +333,12 @@ class MomentumIterativeMethod(IterativeGradientMethod):
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindarmour.adv_robustness.attacks import MomentumIterativeMethod
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._relu = nn.ReLU()
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
... def __init__(self):
... super(Net, self).__init__()
... self._relu = nn.ReLU()
... def construct(self, inputs):
... out = self._relu(inputs)
... return out
>>> net = Net()
>>> attack = MomentumIterativeMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
>>> adv_x = attack.generate([[0.5, 0.2, 0.6],
@@ -476,12 +476,12 @@ class ProjectedGradientDescent(BasicIterativeMethod):
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindarmour.adv_robustness.attacks import ProjectedGradientDescent
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._relu = nn.ReLU()
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
... def __init__(self):
... super(Net, self).__init__()
... self._relu = nn.ReLU()
... def construct(self, inputs):
... out = self._relu(inputs)
... return out
>>> net = Net()
>>> attack = ProjectedGradientDescent(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
>>> adv_x = attack.generate([[0.6, 0.2, 0.6],


+ 12
- 12
mindarmour/adv_robustness/attacks/jsma.py View File

@@ -59,12 +59,12 @@ class JSMAAttack(Attack):
>>> from mindspore.nn import Cell
>>> from mindarmour.adv_robustness.attacks import JSMAAttack
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._relu = nn.ReLU()
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
... def __init__(self):
... super(Net, self).__init__()
... self._relu = nn.ReLU()
... def construct(self, inputs):
... out = self._relu(inputs)
... return out
>>> net = Net()
>>> input_shape = (1, 5)
>>> batch_size, classes = input_shape
@@ -200,12 +200,12 @@ class JSMAAttack(Attack):
>>> from mindspore.nn import Cell
>>> from mindarmour.adv_robustness.attacks import JSMAAttack
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._relu = nn.ReLU()
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
... def __init__(self):
... super(Net, self).__init__()
... self._relu = nn.ReLU()
... def construct(self, inputs):
... out = self._relu(inputs)
... return out
>>> net = Net()
>>> input_shape = (1, 5)
>>> batch_size, classes = input_shape


+ 0
- 1
mindarmour/adv_robustness/attacks/lbfgs.py View File

@@ -57,7 +57,6 @@ class LBFGS(Attack):
>>> import numpy as np
>>> from mindarmour.adv_robustness.attacks import LBFGS
>>> from tests.ut.python.utils.mock_net import Net
>>>
>>> net = Net()
>>> attack = LBFGS(net, is_targeted=True)
"""


+ 1
- 6
mindarmour/adv_robustness/defenses/adversarial_defense.py View File

@@ -41,13 +41,11 @@ class AdversarialDefense(Defense):
>>> from mindarmour.adv_robustness.defenses import AdversarialDefense
>>> from mindspore import nn
>>> from tests.ut.python.utils.mock_net import Net
>>>
>>> net = Net()
>>> lr = 0.001
>>> momentum = 0.9
>>> batch_size = 32
>>> num_class = 10
>>>
>>> loss_fn = SoftmaxCrossEntropyWithLogits(sparse=False)
>>> optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=momentum)
>>> adv_defense = AdversarialDefense(net, loss_fn, optimizer)
@@ -203,7 +201,6 @@ class EnsembleAdversarialDefense(AdversarialDefenseWithAttacks):
>>> from mindarmour.adv_robustness.defenses import EnsembleAdversarialDefense
>>> from mindspore import nn
>>> from tests.ut.python.utils.mock_net import Net
>>>
>>> net = Net()
>>> lr = 0.001
>>> momentum = 0.9
@@ -211,12 +208,10 @@ class EnsembleAdversarialDefense(AdversarialDefenseWithAttacks):
>>> num_class = 10
>>> loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
>>> optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=momentum)
>>>
>>> fgsm = FastGradientSignMethod(net, loss_fn=loss_fn)
>>> pgd = ProjectedGradientDescent(net, loss_fn=loss_fn)
>>> ead = EnsembleAdversarialDefense(net, [fgsm, pgd], loss_fn=loss_fn,
>>> optimizer=optimizer)
>>>
... optimizer=optimizer)
>>> inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32)
>>> labels = np.random.randint(num_class, size=batch_size).astype(np.int32)
>>> labels = np.eye(num_classes)[labels].astype(np.float32)


+ 8
- 8
mindarmour/adv_robustness/detectors/black/similarity_detector.py View File

@@ -80,14 +80,14 @@ class SimilarityDetector(Detector):
>>> from mindspore import context
>>> from mindarmour.adv_robustness.detectors import SimilarityDetector
>>> class EncoderNet(Cell):
>>> def __init__(self, encode_dim):
>>> super(EncoderNet, self).__init__()
>>> self._encode_dim = encode_dim
>>> self.add = Add()
>>> def construct(self, inputs):
>>> return self.add(inputs, inputs)
>>> def get_encode_dim(self):
>>> return self._encode_dim
... def __init__(self, encode_dim):
... super(EncoderNet, self).__init__()
... self._encode_dim = encode_dim
... self.add = Add()
... def construct(self, inputs):
... return self.add(inputs, inputs)
... def get_encode_dim(self):
... return self._encode_dim
>>> np.random.seed(5)
>>> x_train = np.random.rand(10, 32, 32, 3).astype(np.float32)
>>> perm = np.random.permutation(x_train.shape[0])


+ 10
- 11
mindarmour/adv_robustness/detectors/ensemble_detector.py View File

@@ -43,18 +43,17 @@ class EnsembleDetector(Detector):
>>> from mindarmour.adv_robustness.detectors import RegionBasedDetector
>>> from mindarmour.adv_robustness.detectors import EnsembleDetector
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.add = Add()
>>> def construct(self, inputs):
>>> return self.add(inputs, inputs)
>>>
... def __init__(self):
... super(Net, self).__init__()
... self.add = Add()
... def construct(self, inputs):
... return self.add(inputs, inputs)
>>> class AutoNet(Cell):
>>> def __init__(self):
>>> super(AutoNet, self).__init__()
>>> self.add = Add()
>>> def construct(self, inputs):
>>> return self.add(inputs, inputs)
... def __init__(self):
... super(AutoNet, self).__init__()
... self.add = Add()
... def construct(self, inputs):
... return self.add(inputs, inputs)
>>> np.random.seed(6)
>>> adv = np.random.rand(4, 4).astype(np.float32)
>>> model = Model(Net())


+ 5
- 5
mindarmour/adv_robustness/detectors/mag_net.py View File

@@ -55,11 +55,11 @@ class ErrorBasedDetector(Detector):
>>> from mindspore import context
>>> from mindarmour.adv_robustness.detectors import ErrorBasedDetector
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.add = Add()
>>> def construct(self, inputs):
>>> return self.add(inputs, inputs)
... def __init__(self):
... super(Net, self).__init__()
... self.add = Add()
... def construct(self, inputs):
... return self.add(inputs, inputs)
>>> np.random.seed(5)
>>> ori = np.random.rand(4, 4, 4).astype(np.float32)
>>> np.random.seed(6)


+ 5
- 5
mindarmour/adv_robustness/detectors/region_based_detector.py View File

@@ -59,11 +59,11 @@ class RegionBasedDetector(Detector):
>>> from mindspore import context
>>> from mindarmour.adv_robustness.detectors import ErrorBasedDetector
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.add = Add()
>>> def construct(self, inputs):
>>> return self.add(inputs, inputs)
... def __init__(self):
... super(Net, self).__init__()
... self.add = Add()
... def construct(self, inputs):
... return self.add(inputs, inputs)
>>> np.random.seed(5)
>>> ori = np.random.rand(4, 4).astype(np.float32)
>>> labels = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0],


+ 6
- 6
mindarmour/adv_robustness/detectors/spatial_smoothing.py View File

@@ -50,17 +50,17 @@ class SpatialSmoothing(Detector):

Examples:
>>> import numpy as np
>>> from mindspore.ops.operations as P
>>> import mindspore.ops.operations as P
>>> from mindspore.nn import Cell
>>> from mindspore import Model
>>> from mindspore import context
>>> from mindarmour.adv_robustness.detectors import SpatialSmoothing
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._softmax = P.Softmax()
>>> def construct(self, inputs):
>>> return self._softmax(inputs)
... def __init__(self):
... super(Net, self).__init__()
... self._softmax = P.Softmax()
... def construct(self, inputs):
... return self._softmax(inputs)
>>> input_shape = (50, 3)
>>> np.random.seed(1)
>>> input_np = np.random.randn(*input_shape).astype(np.float32)


+ 30
- 1
mindarmour/adv_robustness/evaluations/attack_evaluation.py View File

@@ -47,6 +47,8 @@ class AttackEvaluate:
ValueError: If target_label is None when targeted is True.

Examples:
>>> import numpy as np
>>> from mindarmour.adv_robustness.evaluations import AttackEvaluate
>>> x = np.random.normal(size=(3, 512, 512, 3))
>>> adv_x = np.random.normal(size=(3, 512, 512, 3))
>>> y = np.array([[0.1, 0.1, 0.2, 0.6],
@@ -57,6 +59,10 @@ class AttackEvaluate:
... [0.0, 0.9, 0.1, 0.0]])
>>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y)
>>> mr = attack_eval.mis_classification_rate()
>>> acac = attack_eval.avg_conf_adv_class()
>>> l_0, l_2, l_inf = attack_eval.avg_lp_distance()
>>> ass = attack_eval.avg_ssim()
>>> nte = attack_eval.nte()
"""

def __init__(self, inputs, labels, adv_inputs, adv_preds,
@@ -97,6 +103,10 @@ class AttackEvaluate:

Returns:
float, ranges between (0, 1). The higher, the more successful the attack is.

Examples:
>>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y)
>>> mr = attack_eval.mis_classification_rate()
"""
return self._success_idxes.shape[0]*1.0 / self._inputs.shape[0]

@@ -106,6 +116,10 @@ class AttackEvaluate:

Returns:
float, ranges between (0, 1). The higher, the more successful the attack is.

Examples:
>>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y)
>>> acac = attack_eval.avg_conf_adv_class()
"""
idxes = self._success_idxes
success_num = idxes.shape[0]
@@ -121,6 +135,10 @@ class AttackEvaluate:

Returns:
float, ranges between (0, 1). The lower, the more successful the attack is.

Examples:
>>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y)
>>> acac = attack_eval.avg_conf_adv_class()
"""
idxes = self._success_idxes
success_num = idxes.shape[0]
@@ -140,6 +158,10 @@ class AttackEvaluate:
the more successful the attack is.

- If return value is -1, there is no success adversarial examples.

Examples:
>>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y)
>>> l_0, l_2, l_inf = attack_eval.avg_lp_distance()
"""
idxes = self._success_idxes
success_num = idxes.shape[0]
@@ -168,6 +190,10 @@ class AttackEvaluate:
successful the attack is.

- If return value is -1: there is no success adversarial examples.

Examples:
>>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y)
>>> ass = attack_eval.avg_ssim()
"""
success_num = self._success_idxes.shape[0]
if success_num == 0:
@@ -186,10 +212,13 @@ class AttackEvaluate:
References: `Towards Imperceptible and Robust Adversarial Example Attacks
against Neural Networks <https://arxiv.org/abs/1801.04693>`_


Returns:
float, ranges between (0, 1). The higher, the more successful the
attack is.

Examples:
>>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y)
>>> nte = attack_eval.nte()
"""
idxes = self._success_idxes
success_num = idxes.shape[0]


+ 10
- 5
mindarmour/adv_robustness/evaluations/black/defense_evaluation.py View File

@@ -57,12 +57,14 @@ class BlackDefenseEvaluate:
max_queries (int): Attack budget, the maximum number of queries.
Examples:
>>> import numpy as np
>>> from mindarmour.adv_robustness.evaluations import BlackDefenseEvaluate
>>> raw_preds = np.array([[0.1, 0.1, 0.2, 0.6],
>>> [0.1, 0.7, 0.0, 0.2],
>>> [0.8, 0.1, 0.0, 0.1]])
... [0.1, 0.7, 0.0, 0.2],
... [0.8, 0.1, 0.0, 0.1]])
>>> def_preds = np.array([[0.1, 0.1, 0.1, 0.7],
>>> [0.1, 0.6, 0.2, 0.1],
>>> [0.1, 0.2, 0.1, 0.6]])
... [0.1, 0.6, 0.2, 0.1],
... [0.1, 0.2, 0.1, 0.6]])
>>> raw_query_counts = np.array([0,20,10])
>>> def_query_counts = np.array([0,50,60])
>>> raw_query_time = np.array([0.1, 2, 1])
@@ -79,7 +81,10 @@ class BlackDefenseEvaluate:
... def_detection_counts,
... true_labels,
... max_queries)
>>> def_eval.qcv()
>>> qcv = def_eval.qcv()
>>> asv = def_eval.asv()
>>> fpr = def_eval.fpr()
>>> qrv = def_eval.qrv()
"""
def __init__(self, raw_preds, def_preds, raw_query_counts, def_query_counts,


+ 15
- 0
mindarmour/adv_robustness/evaluations/defense_evaluation.py View File

@@ -39,6 +39,9 @@ class DefenseEvaluate:
one-dimension array whose size is raw_preds.shape[0].

Examples:
>>> import numpy as np
>>> from mindarmour.adv_robustness.evaluations import DefenseEvaluate

>>> raw_preds = np.array([[0.1, 0.1, 0.2, 0.6],
... [0.1, 0.7, 0.0, 0.2],
... [0.8, 0.1, 0.0, 0.1]])
@@ -65,6 +68,9 @@ class DefenseEvaluate:

Returns:
float, the higher, the more successful the defense is.

Examples:
>>> def_eval.cav()
"""
def_succ_num = np.sum(np.argmax(self._def_preds, axis=1)
== self._true_labels)
@@ -79,6 +85,9 @@ class DefenseEvaluate:

Returns:
float, the higher, the more successful the defense is.

Examples:
>>> def_eval.crr()
"""
cond1 = np.argmax(self._def_preds, axis=1) == self._true_labels
cond2 = np.argmax(self._raw_preds, axis=1) != self._true_labels
@@ -107,6 +116,9 @@ class DefenseEvaluate:
- float, the lower, the more successful the defense is.

- If return value == -1, len(idxes) == 0.

Examples:
>>> def_eval.ccv()
"""
idxes = np.arange(self._num_samples)
cond1 = np.argmax(self._def_preds, axis=1) == self._true_labels
@@ -133,6 +145,9 @@ class DefenseEvaluate:
more successful the defense.

- If return value == -1, idxes == 0.

Examples:
>>> def_eval.cos()
"""
idxes = np.arange(self._num_samples)
cond1 = np.argmax(self._def_preds, axis=1) == self._true_labels


+ 3
- 1
mindarmour/adv_robustness/evaluations/visual_metrics.py View File

@@ -46,10 +46,12 @@ class RadarMetric:
ValueError: If scale not in ['hide', 'norm', 'sparse', 'dense'].
Examples:
>>> import numpy as np
>>> from mindarmour.adv_robustness.evaluations import RadarMetric
>>> metrics_name = ['MR', 'ACAC', 'ASS', 'NTE', 'ACTC']
>>> def_metrics = [0.9, 0.85, 0.6, 0.7, 0.8]
>>> raw_metrics = [0.5, 0.3, 0.55, 0.65, 0.7]
>>> metrics_data = [def_metrics, raw_metrics]
>>> metrics_data = np.array([def_metrics, raw_metrics])
>>> metrics_labels = ['before', 'after']
>>> rm = RadarMetric(metrics_name,
... metrics_data,


Loading…
Cancel
Save