Browse Source

Fix API Doc Example issues

tags/v1.6.0
shu-kun-zhang 3 years ago
parent
commit
5cfdb96002
32 changed files with 272 additions and 248 deletions
  1. +0
    -4
      mindarmour/adv_robustness/attacks/black/genetic_attack.py
  2. +16
    -4
      mindarmour/adv_robustness/attacks/black/hop_skip_jump_attack.py
  3. +14
    -3
      mindarmour/adv_robustness/attacks/black/natural_evolutionary_strategy.py
  4. +12
    -2
      mindarmour/adv_robustness/attacks/black/pointwise_attack.py
  5. +23
    -14
      mindarmour/adv_robustness/attacks/black/pso_attack.py
  6. +12
    -2
      mindarmour/adv_robustness/attacks/black/salt_and_pepper_attack.py
  7. +0
    -2
      mindarmour/adv_robustness/attacks/carlini_wagner.py
  8. +23
    -10
      mindarmour/adv_robustness/attacks/deep_fool.py
  9. +1
    -19
      mindarmour/adv_robustness/attacks/gradient_method.py
  10. +14
    -21
      mindarmour/adv_robustness/attacks/iterative_gradient_method.py
  11. +13
    -4
      mindarmour/adv_robustness/attacks/jsma.py
  12. +4
    -0
      mindarmour/adv_robustness/attacks/lbfgs.py
  13. +1
    -4
      mindarmour/adv_robustness/defenses/adversarial_defense.py
  14. +0
    -4
      mindarmour/adv_robustness/defenses/natural_adversarial_defense.py
  15. +0
    -4
      mindarmour/adv_robustness/defenses/projected_adversarial_defense.py
  16. +0
    -2
      mindarmour/adv_robustness/detectors/black/similarity_detector.py
  17. +0
    -2
      mindarmour/adv_robustness/detectors/ensemble_detector.py
  18. +0
    -4
      mindarmour/adv_robustness/detectors/mag_net.py
  19. +1
    -3
      mindarmour/adv_robustness/detectors/region_based_detector.py
  20. +0
    -2
      mindarmour/adv_robustness/detectors/spatial_smoothing.py
  21. +4
    -4
      mindarmour/adv_robustness/evaluations/attack_evaluation.py
  22. +8
    -8
      mindarmour/adv_robustness/evaluations/black/defense_evaluation.py
  23. +6
    -6
      mindarmour/adv_robustness/evaluations/defense_evaluation.py
  24. +4
    -4
      mindarmour/adv_robustness/evaluations/visual_metrics.py
  25. +8
    -8
      mindarmour/fuzz_testing/fuzzing.py
  26. +22
    -22
      mindarmour/privacy/diff_privacy/mechanisms/mechanisms.py
  27. +2
    -2
      mindarmour/privacy/diff_privacy/optimizer/optimizer.py
  28. +15
    -15
      mindarmour/privacy/diff_privacy/train/model.py
  29. +16
    -16
      mindarmour/privacy/sup_privacy/mask_monitor/masker.py
  30. +34
    -34
      mindarmour/privacy/sup_privacy/sup_ctrl/conctrl.py
  31. +16
    -16
      mindarmour/privacy/sup_privacy/train/model.py
  32. +3
    -3
      mindarmour/reliability/concept_drift/concept_drift_check_time_series.py

+ 0
- 4
mindarmour/adv_robustness/attacks/black/genetic_attack.py View File

@@ -75,7 +75,6 @@ class GeneticAttack(Attack):
>>> from mindspore.nn import Cell
>>> from mindarmour import BlackModel
>>> from mindarmour.adv_robustness.attacks import GeneticAttack
>>>
>>> class ModelToBeAttacked(BlackModel):
>>> def __init__(self, network):
>>> super(ModelToBeAttacked, self).__init__()
@@ -83,16 +82,13 @@ class GeneticAttack(Attack):
>>> def predict(self, inputs):
>>> result = self._network(Tensor(inputs.astype(np.float32)))
>>> return result.asnumpy()
>>>
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._softmax = M.Softmax()
>>>
>>> def construct(self, inputs):
>>> out = self._softmax(inputs)
>>> return out
>>>
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = GeneticAttack(model, sparse=False)


+ 16
- 4
mindarmour/adv_robustness/attacks/black/hop_skip_jump_attack.py View File

@@ -80,7 +80,6 @@ class HopSkipJumpAttack(Attack):
>>> from mindarmour import BlackModel
>>> from mindarmour.adv_robustness.attacks import HopSkipJumpAttack
>>> from tests.ut.python.utils.mock_net import Net
>>>
>>> class ModelToBeAttacked(BlackModel):
>>> def __init__(self, network):
>>> super(ModelToBeAttacked, self).__init__()
@@ -90,8 +89,6 @@ class HopSkipJumpAttack(Attack):
>>> inputs = inputs[np.newaxis, :]
>>> result = self._network(Tensor(inputs.astype(np.float32)))
>>> return result.asnumpy()
>>>
>>>
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = HopSkipJumpAttack(model)
@@ -188,12 +185,27 @@ class HopSkipJumpAttack(Attack):
- numpy.ndarray, query times for each sample.

Examples:
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindarmour import BlackModel
>>> from mindarmour.adv_robustness.attacks import HopSkipJumpAttack
>>> from tests.ut.python.utils.mock_net import Net
>>> class ModelToBeAttacked(BlackModel):
>>> def __init__(self, network):
>>> super(ModelToBeAttacked, self).__init__()
>>> self._network = network
>>> def predict(self, inputs):
>>> if len(inputs.shape) == 3:
>>> inputs = inputs[np.newaxis, :]
>>> result = self._network(Tensor(inputs.astype(np.float32)))
>>> return result.asnumpy()
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = HopSkipJumpAttack(model)
>>> n, c, h, w = 1, 1, 32, 32
>>> class_num = 3
>>> x_test = np.asarray(np.random.random((n,c,h,w)), np.float32)
>>> y_test = np.random.randint(0, class_num, size=n)
>>>
>>> _, adv_x, _= attack.generate(x_test, y_test)
"""
if labels is not None:


+ 14
- 3
mindarmour/adv_robustness/attacks/black/natural_evolutionary_strategy.py View File

@@ -84,7 +84,6 @@ class NES(Attack):
>>> from mindarmour import BlackModel
>>> from mindarmour.adv_robustness.attacks import NES
>>> from tests.ut.python.utils.mock_net import Net
>>>
>>> class ModelToBeAttacked(BlackModel):
>>> def __init__(self, network):
>>> super(ModelToBeAttacked, self).__init__()
@@ -94,7 +93,6 @@ class NES(Attack):
>>> inputs = inputs[np.newaxis, :]
>>> result = self._network(Tensor(inputs.astype(np.float32)))
>>> return result.asnumpy()
>>>
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> SCENE = 'Query_Limit'
@@ -157,12 +155,25 @@ class NES(Attack):
ValueError: If scene is not in ['Label_Only', 'Partial_Info', 'Query_Limit']

Examples:
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindarmour import BlackModel
>>> from mindarmour.adv_robustness.attacks import NES
>>> from tests.ut.python.utils.mock_net import Net
>>> class ModelToBeAttacked(BlackModel):
>>> def __init__(self, network):
>>> super(ModelToBeAttacked, self).__init__()
>>> self._network = network
>>> def predict(self, inputs):
>>> if len(inputs.shape) == 3:
>>> inputs = inputs[np.newaxis, :]
>>> result = self._network(Tensor(inputs.astype(np.float32)))
>>> return result.asnumpy()
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> SCENE = 'Query_Limit'
>>> TOP_K = -1
>>> attack= NES(model, SCENE, top_k=TOP_K)
>>>
>>> num_class = 5
>>> x_test = np.asarray(np.random.random((32, 32)), np.float32)
>>> target_image = np.asarray(np.random.random((32, 32)), np.float32)


+ 12
- 2
mindarmour/adv_robustness/attacks/black/pointwise_attack.py View File

@@ -52,7 +52,6 @@ class PointWiseAttack(Attack):
>>> from mindarmour import BlackModel
>>> from mindarmour.adv_robustness.attacks import PointWiseAttack
>>> from tests.ut.python.utils.mock_net import Net
>>>
>>> class ModelToBeAttacked(BlackModel):
>>> def __init__(self, network):
>>> super(ModelToBeAttacked, self).__init__()
@@ -60,7 +59,6 @@ class PointWiseAttack(Attack):
>>> def predict(self, inputs):
>>> result = self._network(Tensor(inputs.astype(np.float32)))
>>> return result.asnumpy()
>>>
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = PointWiseAttack(model)
@@ -95,6 +93,18 @@ class PointWiseAttack(Attack):
- numpy.ndarray, query times for each sample.

Examples:
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindarmour import BlackModel
>>> from mindarmour.adv_robustness.attacks import PointWiseAttack
>>> from tests.ut.python.utils.mock_net import Net
>>> class ModelToBeAttacked(BlackModel):
>>> def __init__(self, network):
>>> super(ModelToBeAttacked, self).__init__()
>>> self._network = network
>>> def predict(self, inputs):
>>> result = self._network(Tensor(inputs.astype(np.float32)))
>>> return result.asnumpy()
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = PointWiseAttack(model)


+ 23
- 14
mindarmour/adv_robustness/attacks/black/pso_attack.py View File

@@ -70,7 +70,6 @@ class PSOAttack(Attack):
>>> from mindspore.nn import Cell
>>> from mindarmour import BlackModel
>>> from mindarmour.adv_robustness.attacks import PSOAttack
>>>
>>> class ModelToBeAttacked(BlackModel):
>>> def __init__(self, network):
>>> super(ModelToBeAttacked, self).__init__()
@@ -80,7 +79,6 @@ class PSOAttack(Attack):
>>> inputs = np.expand_dims(inputs, axis=0)
>>> result = self._network(Tensor(inputs.astype(np.float32)))
>>> return result.asnumpy()
>>>
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
@@ -89,7 +87,6 @@ class PSOAttack(Attack):
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
>>>
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, sparse=False)
@@ -381,17 +378,6 @@ class PSOAttack(Attack):
- numpy.ndarray, generated adversarial examples.

- numpy.ndarray, query times for each sample.

Examples:
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, sparse=False)
>>> batch_size = 6
>>> x_test = np.random.rand(batch_size, 10)
>>> y_test = np.random.randint(low=0, high=10, size=batch_size)
>>> y_test = np.eye(10)[y_test]
>>> y_test = y_test.astype(np.float32)
>>> _, adv_data, _ = attack.generate(x_test, y_test)
"""
# inputs check
images, auxiliary_inputs, gt_boxes, gt_labels = check_detection_inputs(inputs, labels)
@@ -523,6 +509,29 @@ class PSOAttack(Attack):
- numpy.ndarray, query times for each sample.

Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor
>>> from mindspore.nn import Cell
>>> from mindarmour import BlackModel
>>> from mindarmour.adv_robustness.attacks import PSOAttack
>>> class ModelToBeAttacked(BlackModel):
>>> def __init__(self, network):
>>> super(ModelToBeAttacked, self).__init__()
>>> self._network = network
>>> def predict(self, inputs):
>>> if len(inputs.shape) == 1:
>>> inputs = np.expand_dims(inputs, axis=0)
>>> result = self._network(Tensor(inputs.astype(np.float32)))
>>> return result.asnumpy()
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._relu = nn.ReLU()
>>>
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, sparse=False)


+ 12
- 2
mindarmour/adv_robustness/attacks/black/salt_and_pepper_attack.py View File

@@ -45,7 +45,6 @@ class SaltAndPepperNoiseAttack(Attack):
>>> from mindarmour import BlackModel
>>> from mindarmour.adv_robustness.attacks import SaltAndPepperNoiseAttack
>>> from tests.ut.python.utils.mock_net import Net
>>>
>>> class ModelToBeAttacked(BlackModel):
>>> def __init__(self, network):
>>> super(ModelToBeAttacked, self).__init__()
@@ -53,7 +52,6 @@ class SaltAndPepperNoiseAttack(Attack):
>>> def predict(self, inputs):
>>> result = self._network(Tensor(inputs.astype(np.float32)))
>>> return result.asnumpy()
>>>
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = SaltAndPepperNoiseAttack(model)
@@ -85,6 +83,18 @@ class SaltAndPepperNoiseAttack(Attack):
- numpy.ndarray, query times for each sample.

Examples:
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindarmour import BlackModel
>>> from mindarmour.adv_robustness.attacks import SaltAndPepperNoiseAttack
>>> from tests.ut.python.utils.mock_net import Net
>>> class ModelToBeAttacked(BlackModel):
>>> def __init__(self, network):
>>> super(ModelToBeAttacked, self).__init__()
>>> self._network = network
>>> def predict(self, inputs):
>>> result = self._network(Tensor(inputs.astype(np.float32)))
>>> return result.asnumpy()
>>> net = Net()
>>> model = ModelToBeAttacked(net)
>>> attack = PointWiseAttack(model)


+ 0
- 2
mindarmour/adv_robustness/attacks/carlini_wagner.py View File

@@ -99,7 +99,6 @@ class CarliniWagnerL2Attack(Attack):
>>> import mindspore.ops.operations as M
>>> from mindspore.nn import Cell
>>> from mindarmour.adv_robustness.attacks import CarliniWagnerL2Attack
>>>
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
@@ -108,7 +107,6 @@ class CarliniWagnerL2Attack(Attack):
>>> def construct(self, inputs):
>>> out = self._softmax(inputs)
>>> return out
>>>
>>> input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32)
>>> label_np = np.array([3]).astype(np.int64)
>>> num_classes = input_np.shape[1]


+ 23
- 10
mindarmour/adv_robustness/attacks/deep_fool.py View File

@@ -122,18 +122,16 @@ class DeepFool(Attack):
>>> from mindspore.nn import Cell
>>> from mindspore import Tensor
>>> from mindarmour.adv_robustness.attacks import DeepFool
>>>
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._softmax = P.Softmax()
>>>
>>> def construct(self, inputs):
>>> out = self._softmax(inputs)
>>> return out
>>>
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._softmax = P.Softmax()
>>> def construct(self, inputs):
>>> out = self._softmax(inputs)
>>> return out
>>> net = Net()
>>> attack = DeepFool(net, classes, max_iters=10, norm_level=2,
bounds=(0.0, 1.0))
... bounds=(0.0, 1.0))
"""

def __init__(self, network, num_classes, model_type='classification',
@@ -181,6 +179,21 @@ class DeepFool(Attack):
NotImplementedError: If norm_level is not in [2, np.inf, '2', 'inf'].

Examples:
>>> import numpy as np
>>> import mindspore.ops.operations as P
>>> from mindspore.nn import Cell
>>> from mindspore import Tensor
>>> from mindarmour.adv_robustness.attacks import DeepFool
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._softmax = P.Softmax()
>>> def construct(self, inputs):
>>> out = self._softmax(inputs)
>>> return out
>>> net = Net()
>>> attack = DeepFool(net, classes, max_iters=10, norm_level=2,
... bounds=(0.0, 1.0))
>>> input_shape = (1, 5)
>>> _, classes = input_shape
>>> input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32)


+ 1
- 19
mindarmour/adv_robustness/attacks/gradient_method.py View File

@@ -51,8 +51,7 @@ class GradientMethod(Attack):
>>> import mindspore.nn as nn
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindspore import Tensor
>>> from mindarmour.adv_robustness.attacksimport FastGradientMethod
>>>
>>> from mindarmour.adv_robustness.attacks import FastGradientMethod
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
@@ -61,7 +60,6 @@ class GradientMethod(Attack):
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
>>>
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])
>>> net = Net()
@@ -175,16 +173,13 @@ class FastGradientMethod(GradientMethod):
>>> import mindspore.nn as nn
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindarmour.adv_robustness.attacks import FastGradientMethod
>>>
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._relu = nn.ReLU()
>>>
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
>>>
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])
>>> net = Net()
@@ -258,16 +253,13 @@ class RandomFastGradientMethod(FastGradientMethod):
>>> import mindspore.nn as nn
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindarmour.adv_robustness.attacks import RandomFastGradientMethod
>>>
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._relu = nn.ReLU()
>>>
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
>>>
>>> net = Net()
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])
@@ -315,16 +307,13 @@ class FastGradientSignMethod(GradientMethod):
>>> import mindspore.nn as nn
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindarmour.adv_robustness.attacks import FastGradientSignMethod
>>>
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._relu = nn.ReLU()
>>>
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
>>>
>>> net = Net()
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])
@@ -399,11 +388,9 @@ class RandomFastGradientSignMethod(FastGradientSignMethod):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._relu = nn.ReLU()
>>>
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
>>>
>>> net = Net()
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])
@@ -446,7 +433,6 @@ class LeastLikelyClassMethod(FastGradientSignMethod):
>>> import mindspore.nn as nn
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindarmour.adv_robustness.attacks import LeastLikelyClassMethod
>>>
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
@@ -455,7 +441,6 @@ class LeastLikelyClassMethod(FastGradientSignMethod):
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
>>>
>>> net = Net()
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])
@@ -499,16 +484,13 @@ class RandomLeastLikelyClassMethod(FastGradientSignMethod):
>>> import mindspore.nn as nn
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindarmour.adv_robustness.attacks import RandomLeastLikelyClassMethod
>>>
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._relu = nn.ReLU()
>>>
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
>>>
>>> net = Net()
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])


+ 14
- 21
mindarmour/adv_robustness/attacks/iterative_gradient_method.py View File

@@ -188,16 +188,13 @@ class BasicIterativeMethod(IterativeGradientMethod):
>>> import mindspore.nn as nn
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindarmour.adv_robustness.attacks import BasicIterativeMethod
>>>
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._relu = nn.ReLU()
>>>
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
>>>
>>> net = Net()
>>> attack = BasicIterativeMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
"""
@@ -230,21 +227,23 @@ class BasicIterativeMethod(IterativeGradientMethod):
numpy.ndarray, generated adversarial examples.

Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindarmour.adv_robustness.attacks import BasicIterativeMethod
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._relu = nn.ReLU()
>>>
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
>>>
>>> net = Net()
>>> attack = BasicIterativeMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
>>> adv_x = attack.generate([[0.3, 0.2, 0.6],
>>> [0.3, 0.2, 0.4]],
>>> [[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
>>> [0, 0, 0, 0, 0, 0, 1, 0, 0, 0]])
... [0.3, 0.2, 0.4]],
... [[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 1, 0, 0, 0]])
"""
inputs_image, inputs, labels = check_inputs_labels(inputs, labels)
arr_x = inputs_image
@@ -333,22 +332,19 @@ class MomentumIterativeMethod(IterativeGradientMethod):
>>> import mindspore.nn as nn
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindarmour.adv_robustness.attacks import MomentumIterativeMethod
>>>
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._relu = nn.ReLU()
>>>
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
>>>
>>> net = Net()
>>> attack = MomentumIterativeMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
>>> adv_x = attack.generate([[0.5, 0.2, 0.6],
>>> [0.3, 0, 0.2]],
>>> [[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
>>> [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]])
... [0.3, 0, 0.2]],
... [[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
... [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]])
"""
inputs_image, inputs, labels = check_inputs_labels(inputs, labels)
arr_x = inputs_image
@@ -406,7 +402,7 @@ class MomentumIterativeMethod(IterativeGradientMethod):

Examples:
>>> grad = self._gradient([[0.5, 0.3, 0.4]],
>>> [[0, 0, 0, 1, 0, 0, 0, 0, 0, 0])
... [[0, 0, 0, 1, 0, 0, 0, 0, 0, 0])
"""
# get grad of loss over x
inputs_tensor = to_tensor_tuple(inputs)
@@ -479,22 +475,19 @@ class ProjectedGradientDescent(BasicIterativeMethod):
>>> import mindspore.nn as nn
>>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits
>>> from mindarmour.adv_robustness.attacks import ProjectedGradientDescent
>>>
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._relu = nn.ReLU()
>>>
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
>>>
>>> net = Net()
>>> attack = ProjectedGradientDescent(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))
>>> adv_x = attack.generate([[0.6, 0.2, 0.6],
>>> [0.3, 0.3, 0.4]],
>>> [[0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
>>> [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
... [0.3, 0.3, 0.4]],
... [[0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
... [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
"""
inputs_image, inputs, labels = check_inputs_labels(inputs, labels)
arr_x = inputs_image


+ 13
- 4
mindarmour/adv_robustness/attacks/jsma.py View File

@@ -62,11 +62,9 @@ class JSMAAttack(Attack):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._relu = nn.ReLU()
>>>
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
>>>
>>> net = Net()
>>> input_shape = (1, 5)
>>> batch_size, classes = input_shape
@@ -197,11 +195,22 @@ class JSMAAttack(Attack):
numpy.ndarray, adversarial samples.

Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore.nn import Cell
>>> from mindarmour.adv_robustness.attacks import JSMAAttack
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._relu = nn.ReLU()
>>> def construct(self, inputs):
>>> out = self._relu(inputs)
>>> return out
>>> net = Net()
>>> input_shape = (1, 5)
>>> batch_size, classes = input_shape
>>> input_np = np.random.random(input_shape).astype(np.float32)
>>> label_np = np.random.randint(classes, size=batch_size)
>>> batch_size, classes = input_shape
>>>
>>> attack = JSMAAttack(net, classes, max_iteration=5)
>>> advs = attack.generate(input_np, label_np)
"""


+ 4
- 0
mindarmour/adv_robustness/attacks/lbfgs.py View File

@@ -99,6 +99,10 @@ class LBFGS(Attack):
numpy.ndarray, generated adversarial examples.

Examples:
>>> import numpy as np
>>> from mindarmour.adv_robustness.attacks import LBFGS
>>> from tests.ut.python.utils.mock_net import Net
>>> net = Net()
>>> attack = LBFGS(net, is_targeted=True)
>>> adv = attack.generate([[0.1, 0.2, 0.6], [0.3, 0, 0.4]], [2, 2])
"""


+ 1
- 4
mindarmour/adv_robustness/defenses/adversarial_defense.py View File

@@ -115,7 +115,6 @@ class AdversarialDefenseWithAttacks(AdversarialDefense):
>>> from mindarmour.adv_robustness.defenses import AdversarialDefense
>>> from mindspore import nn
>>> from tests.ut.python.utils.mock_net import Net
>>>
>>> net = Net()
>>> lr = 0.001
>>> momentum = 0.9
@@ -123,12 +122,10 @@ class AdversarialDefenseWithAttacks(AdversarialDefense):
>>> num_class = 10
>>> loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
>>> optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=momentum)
>>>
>>> fgsm = FastGradientSignMethod(net, loss_fn=loss_fn)
>>> pgd = ProjectedGradientDescent(net, loss_fn=loss_fn)
>>> ead = AdversarialDefenseWithAttack(net, [fgsm, pgd], loss_fn=loss_fn,
>>> optimizer=optimizer)
>>>
... optimizer=optimizer)
>>> inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32)
>>> labels = np.random.randint(num_class, size=batch_size).astype(np.int32)
>>> labels = np.eye(num_classes)[labels].astype(np.float32)


+ 0
- 4
mindarmour/adv_robustness/defenses/natural_adversarial_defense.py View File

@@ -41,18 +41,14 @@ class NaturalAdversarialDefense(AdversarialDefenseWithAttacks):
>>> from mindarmour.adv_robustness.defenses import NaturalAdversarialDefense
>>> from mindspore import nn
>>> from tests.ut.python.utils.mock_net import Net
>>>
>>> net = Net()
>>> lr = 0.001
>>> momentum = 0.9
>>> batch_size = 32
>>> num_class = 10
>>>
>>> loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
>>> optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=momentum)
>>>
>>> nad = NaturalAdversarialDefense(net, loss_fn=loss_fn, optimizer=optimizer)
>>>
>>> inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32)
>>> labels = np.random.randint(num_class, size=batch_size).astype(np.int32)
>>> labels = np.eye(num_classes)[labels].astype(np.float32)


+ 0
- 4
mindarmour/adv_robustness/defenses/projected_adversarial_defense.py View File

@@ -46,18 +46,14 @@ class ProjectedAdversarialDefense(AdversarialDefenseWithAttacks):
>>> from mindarmour.adv_robustness.defenses import ProjectedAdversarialDefense
>>> from mindspore import nn
>>> from tests.ut.python.utils.mock_net import Net
>>>
>>> net = Net()
>>> lr = 0.001
>>> momentum = 0.9
>>> batch_size = 32
>>> num_class = 10
>>>
>>> loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
>>> optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=momentum)
>>>
>>> pad = ProjectedAdversarialDefense(net, loss_fn=loss_fn, optimizer=optimizer)
>>>
>>> inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32)
>>> labels = np.random.randint(num_class, size=batch_size).astype(np.int32)
>>> labels = np.eye(num_classes)[labels].astype(np.float32)


+ 0
- 2
mindarmour/adv_robustness/detectors/black/similarity_detector.py View File

@@ -79,7 +79,6 @@ class SimilarityDetector(Detector):
>>> from mindspore import Model
>>> from mindspore import context
>>> from mindarmour.adv_robustness.detectors import SimilarityDetector
>>>
>>> class EncoderNet(Cell):
>>> def __init__(self, encode_dim):
>>> super(EncoderNet, self).__init__()
@@ -89,7 +88,6 @@ class SimilarityDetector(Detector):
>>> return self.add(inputs, inputs)
>>> def get_encode_dim(self):
>>> return self._encode_dim
>>>
>>> np.random.seed(5)
>>> x_train = np.random.rand(10, 32, 32, 3).astype(np.float32)
>>> perm = np.random.permutation(x_train.shape[0])


+ 0
- 2
mindarmour/adv_robustness/detectors/ensemble_detector.py View File

@@ -42,7 +42,6 @@ class EnsembleDetector(Detector):
>>> from mindarmour.adv_robustness.detectors import ErrorBasedDetector
>>> from mindarmour.adv_robustness.detectors import RegionBasedDetector
>>> from mindarmour.adv_robustness.detectors import EnsembleDetector
>>>
>>> class Net(Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
@@ -56,7 +55,6 @@ class EnsembleDetector(Detector):
>>> self.add = Add()
>>> def construct(self, inputs):
>>> return self.add(inputs, inputs)
>>>
>>> np.random.seed(6)
>>> adv = np.random.rand(4, 4).astype(np.float32)
>>> model = Model(Net())


+ 0
- 4
mindarmour/adv_robustness/detectors/mag_net.py View File

@@ -58,10 +58,8 @@ class ErrorBasedDetector(Detector):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.add = Add()
>>>
>>> def construct(self, inputs):
>>> return self.add(inputs, inputs)
>>>
>>> np.random.seed(5)
>>> ori = np.random.rand(4, 4, 4).astype(np.float32)
>>> np.random.seed(6)
@@ -195,10 +193,8 @@ class DivergenceBasedDetector(ErrorBasedDetector):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.add = Add()
>>>
>>> def construct(self, inputs):
>>> return self.add(inputs, inputs)
>>>
>>> np.random.seed(5)
>>> ori = np.random.rand(4, 4, 4).astype(np.float32)
>>> np.random.seed(6)


+ 1
- 3
mindarmour/adv_robustness/detectors/region_based_detector.py View File

@@ -62,14 +62,12 @@ class RegionBasedDetector(Detector):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.add = Add()
>>>
>>> def construct(self, inputs):
>>> return self.add(inputs, inputs)
>>>
>>> np.random.seed(5)
>>> ori = np.random.rand(4, 4).astype(np.float32)
>>> labels = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0],
[0, 1, 0, 0]]).astype(np.int32)
... [0, 1, 0, 0]]).astype(np.int32)
>>> np.random.seed(6)
>>> adv = np.random.rand(4, 4).astype(np.float32)
>>> model = Model(Net())


+ 0
- 2
mindarmour/adv_robustness/detectors/spatial_smoothing.py View File

@@ -59,10 +59,8 @@ class SpatialSmoothing(Detector):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self._softmax = P.Softmax()
>>>
>>> def construct(self, inputs):
>>> return self._softmax(inputs)
>>>
>>> input_shape = (50, 3)
>>> np.random.seed(1)
>>> input_np = np.random.randn(*input_shape).astype(np.float32)


+ 4
- 4
mindarmour/adv_robustness/evaluations/attack_evaluation.py View File

@@ -50,11 +50,11 @@ class AttackEvaluate:
>>> x = np.random.normal(size=(3, 512, 512, 3))
>>> adv_x = np.random.normal(size=(3, 512, 512, 3))
>>> y = np.array([[0.1, 0.1, 0.2, 0.6],
>>> [0.1, 0.7, 0.0, 0.2],
>>> [0.8, 0.1, 0.0, 0.1]])
... [0.1, 0.7, 0.0, 0.2],
... [0.8, 0.1, 0.0, 0.1]])
>>> adv_y = np.array([[0.1, 0.1, 0.2, 0.6],
>>> [0.1, 0.0, 0.8, 0.1],
>>> [0.0, 0.9, 0.1, 0.0]])
... [0.1, 0.0, 0.8, 0.1],
... [0.0, 0.9, 0.1, 0.0]])
>>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y)
>>> mr = attack_eval.mis_classification_rate()
"""


+ 8
- 8
mindarmour/adv_robustness/evaluations/black/defense_evaluation.py View File

@@ -71,14 +71,14 @@ class BlackDefenseEvaluate:
>>> true_labels = np.array([3, 1, 0])
>>> max_queries = 100
>>> def_eval = BlackDefenseEvaluate(raw_preds,
>>> def_preds,
>>> raw_query_counts,
>>> def_query_counts,
>>> raw_query_time,
>>> def_query_time,
>>> def_detection_counts,
>>> true_labels,
>>> max_queries)
... def_preds,
... raw_query_counts,
... def_query_counts,
... raw_query_time,
... def_query_time,
... def_detection_counts,
... true_labels,
... max_queries)
>>> def_eval.qcv()
"""


+ 6
- 6
mindarmour/adv_robustness/evaluations/defense_evaluation.py View File

@@ -40,15 +40,15 @@ class DefenseEvaluate:

Examples:
>>> raw_preds = np.array([[0.1, 0.1, 0.2, 0.6],
>>> [0.1, 0.7, 0.0, 0.2],
>>> [0.8, 0.1, 0.0, 0.1]])
... [0.1, 0.7, 0.0, 0.2],
... [0.8, 0.1, 0.0, 0.1]])
>>> def_preds = np.array([[0.1, 0.1, 0.1, 0.7],
>>> [0.1, 0.6, 0.2, 0.1],
>>> [0.1, 0.2, 0.1, 0.6]])
... [0.1, 0.6, 0.2, 0.1],
... [0.1, 0.2, 0.1, 0.6]])
>>> true_labels = np.array([3, 1, 0])
>>> def_eval = DefenseEvaluate(raw_preds,
>>> def_preds,
>>> true_labels)
... def_preds,
... true_labels)
>>> def_eval.cav()
"""
def __init__(self, raw_preds, def_preds, true_labels):


+ 4
- 4
mindarmour/adv_robustness/evaluations/visual_metrics.py View File

@@ -52,10 +52,10 @@ class RadarMetric:
>>> metrics_data = [def_metrics, raw_metrics]
>>> metrics_labels = ['before', 'after']
>>> rm = RadarMetric(metrics_name,
>>> metrics_data,
>>> metrics_labels,
>>> title='',
>>> scale='sparse')
... metrics_data,
... metrics_labels,
... title='',
... scale='sparse')
>>> rm.show()
"""


+ 8
- 8
mindarmour/fuzz_testing/fuzzing.py View File

@@ -107,17 +107,17 @@ class Fuzzer:
>>> net = Net()
>>> model = Model(net)
>>> mutate_config = [{'method': 'Blur',
>>> 'params': {'auto_param': [True]}},
>>> {'method': 'Contrast',
>>> 'params': {'factor': [2]}},
>>> {'method': 'Translate',
>>> 'params': {'x_bias': [0.1, 0.2], 'y_bias': [0.2]}},
>>> {'method': 'FGSM',
>>> 'params': {'eps': [0.1, 0.2, 0.3], 'alpha': [0.1]}}]
... 'params': {'auto_param': [True]}},
... {'method': 'Contrast',
... 'params': {'factor': [2]}},
... {'method': 'Translate',
... 'params': {'x_bias': [0.1, 0.2], 'y_bias': [0.2]}},
... {'method': 'FGSM',
... 'params': {'eps': [0.1, 0.2, 0.3], 'alpha': [0.1]}}]
>>> nc = KMultisectionNeuronCoverage(model, train_images, segmented_num=100)
>>> model_fuzz_test = Fuzzer(model)
>>> samples, gt_labels, preds, strategies, metrics = model_fuzz_test.fuzzing(mutate_config, initial_seeds,
>>> nc, max_iters=100)
... nc, max_iters=100)
"""

def __init__(self, target_model):


+ 22
- 22
mindarmour/privacy/diff_privacy/mechanisms/mechanisms.py View File

@@ -71,10 +71,10 @@ class ClipMechanismsFactory:
>>> target_unclipped_quantile = 0.9
>>> clip_mechanism = ClipMechanismsFactory()
>>> ada_clip = clip_mechanism.create('Gaussian',
>>> decay_policy=decay_policy,
>>> learning_rate=learning_rate,
>>> target_unclipped_quantile=target_unclipped_quantile,
>>> fraction_stddev=beta_stddev)
... decay_policy=decay_policy,
... learning_rate=learning_rate,
... target_unclipped_quantile=target_unclipped_quantile,
... fraction_stddev=beta_stddev)
>>> next_norm_bound = ada_clip(beta, norm_bound)

"""
@@ -125,25 +125,25 @@ class NoiseMechanismsFactory:
>>> epochs = 1
>>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
>>> noise_mech = NoiseMechanismsFactory().create('Gaussian',
>>> norm_bound=norm_bound,
>>> initial_noise_multiplier=initial_noise_multiplier)
... norm_bound=norm_bound,
... initial_noise_multiplier=initial_noise_multiplier)
>>> clip_mech = ClipMechanismsFactory().create('Gaussian',
>>> decay_policy='Linear',
>>> learning_rate=0.001,
>>> target_unclipped_quantile=0.9,
>>> fraction_stddev=0.01)
... decay_policy='Linear',
... learning_rate=0.001,
... target_unclipped_quantile=0.9,
... fraction_stddev=0.01)
>>> net_opt = nn.Momentum(network.trainable_params(), learning_rate=0.1,
>>> momentum=0.9)
... momentum=0.9)
>>> model = DPModel(micro_batches=2,
>>> clip_mech=clip_mech,
>>> norm_bound=norm_bound,
>>> noise_mech=noise_mech,
>>> network=network,
>>> loss_fn=loss,
>>> optimizer=net_opt,
>>> metrics=None)
... clip_mech=clip_mech,
... norm_bound=norm_bound,
... noise_mech=noise_mech,
... network=network,
... loss_fn=loss,
... optimizer=net_opt,
... metrics=None)
>>> ms_ds = ds.GeneratorDataset(dataset_generator,
>>> ['data', 'label'])
... ['data', 'label'])
>>> model.train(epochs, ms_ds, dataset_sink_mode=False)
"""
if mech_name == 'Gaussian':
@@ -386,9 +386,9 @@ class AdaClippingWithGaussianRandom(Cell):
>>> learning_rate = 0.001
>>> target_unclipped_quantile = 0.9
>>> ada_clip = AdaClippingWithGaussianRandom(decay_policy=decay_policy,
>>> learning_rate=learning_rate,
>>> target_unclipped_quantile=target_unclipped_quantile,
>>> fraction_stddev=beta_stddev)
... learning_rate=learning_rate,
... target_unclipped_quantile=target_unclipped_quantile,
... fraction_stddev=beta_stddev)
>>> next_norm_bound = ada_clip(beta, norm_bound)

"""


+ 2
- 2
mindarmour/privacy/diff_privacy/optimizer/optimizer.py View File

@@ -65,8 +65,8 @@ class DPOptimizerClassFactory:
>>> GaussianSGD = DPOptimizerClassFactory(micro_batches=2)
>>> GaussianSGD.set_mechanisms('Gaussian', norm_bound=1.0, initial_noise_multiplier=1.5)
>>> net_opt = GaussianSGD.create('Momentum')(params=network.trainable_params(),
>>> learning_rate=0.001,
>>> momentum=0.9)
... learning_rate=0.001,
... momentum=0.9)
"""

def __init__(self, micro_batches=2):


+ 15
- 15
mindarmour/privacy/diff_privacy/train/model.py View File

@@ -94,25 +94,25 @@ class DPModel(Model):
>>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
>>> factory_opt = DPOptimizerClassFactory(micro_batches=micro_batches)
>>> factory_opt.set_mechanisms('Gaussian',
>>> norm_bound=norm_bound,
>>> initial_noise_multiplier=initial_noise_multiplier)
... norm_bound=norm_bound,
... initial_noise_multiplier=initial_noise_multiplier)
>>> net_opt = factory_opt.create('Momentum')(network.trainable_params(),
>>> learning_rate=0.1, momentum=0.9)
... learning_rate=0.1, momentum=0.9)
>>> clip_mech = ClipMechanismsFactory().create('Gaussian',
>>> decay_policy='Linear',
>>> learning_rate=0.01,
>>> target_unclipped_quantile=0.9,
>>> fraction_stddev=0.01)
... decay_policy='Linear',
... learning_rate=0.01,
... target_unclipped_quantile=0.9,
... fraction_stddev=0.01)
>>> model = DPModel(micro_batches=micro_batches,
>>> norm_bound=norm_bound,
>>> clip_mech=clip_mech,
>>> noise_mech=None,
>>> network=network,
>>> loss_fn=loss,
>>> optimizer=net_opt,
>>> metrics=None)
... norm_bound=norm_bound,
... clip_mech=clip_mech,
... noise_mech=None,
... network=network,
... loss_fn=loss,
... optimizer=net_opt,
... metrics=None)
>>> ms_ds = ds.GeneratorDataset(dataset_generator,
>>> ['data', 'label'])
... ['data', 'label'])
>>> model.train(epochs, ms_ds, dataset_sink_mode=False)
"""



+ 16
- 16
mindarmour/privacy/sup_privacy/mask_monitor/masker.py View File

@@ -34,30 +34,30 @@ class SuppressMasker(Callback):
>>> masklayers = []
>>> masklayers.append(MaskLayerDes("conv1.weight", 0, False, True, 10))
>>> suppress_ctrl_instance = SuppressPrivacyFactory().create(networks=networks_l5,
>>> mask_layers=masklayers,
>>> policy="local_train",
>>> end_epoch=10,
>>> batch_num=(int)(10000/cfg.batch_size),
>>> start_epoch=3,
>>> mask_times=1000,
>>> lr=lr,
>>> sparse_end=0.90,
>>> sparse_start=0.0)
... mask_layers=masklayers,
... policy="local_train",
... end_epoch=10,
... batch_num=(int)(10000/cfg.batch_size),
... start_epoch=3,
... mask_times=1000,
... lr=lr,
... sparse_end=0.90,
... sparse_start=0.0)
>>> net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
>>> net_opt = nn.Momentum(params=networks_l5.trainable_params(), learning_rate=lr, momentum=0.0)
>>> config_ck = CheckpointConfig(save_checkpoint_steps=(int)(samples/cfg.batch_size), keep_checkpoint_max=10)
>>> model_instance = SuppressModel(network=networks_l5,
>>> loss_fn=net_loss,
>>> optimizer=net_opt,
>>> metrics={"Accuracy": Accuracy()})
... loss_fn=net_loss,
... optimizer=net_opt,
... metrics={"Accuracy": Accuracy()})
>>> model_instance.link_suppress_ctrl(suppress_ctrl_instance)
>>> ds_train = generate_mnist_dataset("./MNIST_unzip/train",
>>> batch_size=cfg.batch_size, repeat_size=1, samples=samples)
... batch_size=cfg.batch_size, repeat_size=1, samples=samples)
>>> ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet",
>>> directory="./trained_ckpt_file/",
>>> config=config_ck)
... directory="./trained_ckpt_file/",
... config=config_ck)
>>> model_instance.train(epoch_size, ds_train, callbacks=[ckpoint_cb, LossMonitor(), suppress_masker],
>>> dataset_sink_mode=False)
... dataset_sink_mode=False)
"""

def __init__(self, model, suppress_ctrl):


+ 34
- 34
mindarmour/privacy/sup_privacy/sup_ctrl/conctrl.py View File

@@ -61,31 +61,31 @@ class SuppressPrivacyFactory:
>>> mask_layers = []
>>> mask_layers.append(MaskLayerDes("conv1.weight", 0, False, True, 10))
>>> suppress_ctrl_instance = SuppressPrivacyFactory().create(networks=networks_l5,
>>> mask_layers=mask_layers,
>>> policy="local_train",
>>> end_epoch=10,
>>> batch_num=(int)(10000/cfg.batch_size),
>>> start_epoch=3,
>>> mask_times=1000,
>>> lr=lr,
>>> sparse_end=0.90,
>>> sparse_start=0.0)
... mask_layers=mask_layers,
... policy="local_train",
... end_epoch=10,
... batch_num=(int)(10000/cfg.batch_size),
... start_epoch=3,
... mask_times=1000,
... lr=lr,
... sparse_end=0.90,
... sparse_start=0.0)
>>> net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
>>> net_opt = nn.Momentum(params=networks_l5.trainable_params(), learning_rate=lr, momentum=0.0)
>>> config_ck = CheckpointConfig(save_checkpoint_steps=(int)(samples/cfg.batch_size),
>>> keep_checkpoint_max=10)
... keep_checkpoint_max=10)
>>> model_instance = SuppressModel(network=networks_l5,
>>> loss_fn=net_loss,
>>> optimizer=net_opt,
>>> metrics={"Accuracy": Accuracy()})
... loss_fn=net_loss,
... optimizer=net_opt,
... metrics={"Accuracy": Accuracy()})
>>> model_instance.link_suppress_ctrl(suppress_ctrl_instance)
>>> ds_train = generate_mnist_dataset("./MNIST_unzip/train",
>>> batch_size=cfg.batch_size, repeat_size=1, samples=samples)
... batch_size=cfg.batch_size, repeat_size=1, samples=samples)
>>> ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet",
>>> directory="./trained_ckpt_file/",
>>> config=config_ck)
... directory="./trained_ckpt_file/",
... config=config_ck)
>>> model_instance.train(epoch_size, ds_train, callbacks=[ckpoint_cb, LossMonitor(), suppress_masker],
>>> dataset_sink_mode=False)
... dataset_sink_mode=False)
"""
check_param_type('policy', policy, str)
if policy == "local_train":
@@ -113,31 +113,31 @@ class SuppressCtrl(Cell):
>>> masklayers = []
>>> masklayers.append(MaskLayerDes("conv1.weight", 0, False, True, 10))
>>> suppress_ctrl_instance = SuppressPrivacyFactory().create(networks=networks_l5,
>>> mask_layers=masklayers,
>>> policy="local_train",
>>> end_epoch=10,
>>> batch_num=(int)(10000/cfg.batch_size),
>>> start_epoch=3,
>>> mask_times=1000,
>>> lr=lr,
>>> sparse_end=0.90,
>>> sparse_start=0.0)
... mask_layers=masklayers,
... policy="local_train",
... end_epoch=10,
... batch_num=(int)(10000/cfg.batch_size),
... start_epoch=3,
... mask_times=1000,
... lr=lr,
... sparse_end=0.90,
... sparse_start=0.0)
>>> net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
>>> net_opt = nn.Momentum(params=networks_l5.trainable_params(), learning_rate=lr, momentum=0.0)
>>> config_ck = CheckpointConfig(save_checkpoint_steps=(int)(samples/cfg.batch_size),
>>> keep_checkpoint_max=10)
... keep_checkpoint_max=10)
>>> model_instance = SuppressModel(network=networks_l5,
>>> loss_fn=net_loss,
>>> optimizer=net_opt,
>>> metrics={"Accuracy": Accuracy()})
... loss_fn=net_loss,
... optimizer=net_opt,
... metrics={"Accuracy": Accuracy()})
>>> model_instance.link_suppress_ctrl(suppress_ctrl_instance)
>>> ds_train = generate_mnist_dataset("./MNIST_unzip/train",
>>> batch_size=cfg.batch_size, repeat_size=1, samples=samples)
... batch_size=cfg.batch_size, repeat_size=1, samples=samples)
>>> ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet",
>>> directory="./trained_ckpt_file/",
>>> config=config_ck)
... directory="./trained_ckpt_file/",
... config=config_ck)
>>> model_instance.train(epoch_size, ds_train, callbacks=[ckpoint_cb, LossMonitor(), suppress_masker],
>>> dataset_sink_mode=False)
... dataset_sink_mode=False)
"""
def __init__(self, networks, mask_layers, end_epoch, batch_num, start_epoch, mask_times, lr,
sparse_end, sparse_start):


+ 16
- 16
mindarmour/privacy/sup_privacy/train/model.py View File

@@ -69,30 +69,30 @@ class SuppressModel(Model):
>>> mask_layers = []
>>> mask_layers.append(MaskLayerDes("conv1.weight", 0, False, True, 10))
>>> suppress_ctrl_instance = SuppressPrivacyFactory().create(networks=networks_l5,
>>> mask_layers=mask_layers,
>>> policy="local_train",
>>> end_epoch=10,
>>> batch_num=(int)(10000/cfg.batch_size),
>>> start_epoch=3,
>>> mask_times=1000,
>>> lr=lr,
>>> sparse_end=0.90,
>>> sparse_start=0.0)
... mask_layers=mask_layers,
... policy="local_train",
... end_epoch=10,
... batch_num=(int)(10000/cfg.batch_size),
... start_epoch=3,
... mask_times=1000,
... lr=lr,
... sparse_end=0.90,
... sparse_start=0.0)
>>> net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
>>> net_opt = nn.Momentum(params=networks_l5.trainable_params(), learning_rate=lr, momentum=0.0)
>>> config_ck = CheckpointConfig(save_checkpoint_steps=(int)(samples/cfg.batch_size), keep_checkpoint_max=10)
>>> model_instance = SuppressModel(network=networks_l5,
>>> loss_fn=net_loss,
>>> optimizer=net_opt,
>>> metrics={"Accuracy": Accuracy()})
... loss_fn=net_loss,
... optimizer=net_opt,
... metrics={"Accuracy": Accuracy()})
>>> model_instance.link_suppress_ctrl(suppress_ctrl_instance)
>>> ds_train = generate_mnist_dataset("./MNIST_unzip/train",
>>> batch_size=cfg.batch_size, repeat_size=1, samples=samples)
... batch_size=cfg.batch_size, repeat_size=1, samples=samples)
>>> ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet",
>>> directory="./trained_ckpt_file/",
>>> config=config_ck)
... directory="./trained_ckpt_file/",
... config=config_ck)
>>> model_instance.train(epoch_size, ds_train, callbacks=[ckpoint_cb, LossMonitor(), suppress_masker],
>>> dataset_sink_mode=False)
... dataset_sink_mode=False)
"""

def __init__(self,


+ 3
- 3
mindarmour/reliability/concept_drift/concept_drift_check_time_series.py View File

@@ -23,7 +23,7 @@ from mindarmour.utils._check_param import check_param_type, check_param_in_range


class ConceptDriftCheckTimeSeries:
"""
r"""
ConceptDriftCheckTimeSeries is used for example series distribution change detection.

Args:
@@ -39,7 +39,7 @@ class ConceptDriftCheckTimeSeries:

Examples:
>>> concept = ConceptDriftCheckTimeSeries(window_size=100, rolling_window=10,
>>> step=10, threshold_index=1.5, need_label=False)
... step=10, threshold_index=1.5, need_label=False)
>>> data_example = 5*np.random.rand(1000)
>>> data_example[200: 800] = 20*np.random.rand(600)
>>> score, threshold, concept_drift_location = concept.concept_check(data_example)
@@ -161,7 +161,7 @@ class ConceptDriftCheckTimeSeries:

Examples:
>>> concept = ConceptDriftCheckTimeSeries(window_size=100, rolling_window=10,
>>> step=10, threshold_index=1.5, need_label=False)
... step=10, threshold_index=1.5, need_label=False)
>>> data_example = 5*np.random.rand(1000)
>>> data_example[200: 800] = 20*np.random.rand(600)
>>> score, drift_threshold, drift_location = concept.concept_check(data_example)


Loading…
Cancel
Save