diff --git a/mindarmour/adv_robustness/attacks/black/genetic_attack.py b/mindarmour/adv_robustness/attacks/black/genetic_attack.py index 4263f0b..f7e499b 100644 --- a/mindarmour/adv_robustness/attacks/black/genetic_attack.py +++ b/mindarmour/adv_robustness/attacks/black/genetic_attack.py @@ -75,7 +75,6 @@ class GeneticAttack(Attack): >>> from mindspore.nn import Cell >>> from mindarmour import BlackModel >>> from mindarmour.adv_robustness.attacks import GeneticAttack - >>> >>> class ModelToBeAttacked(BlackModel): >>> def __init__(self, network): >>> super(ModelToBeAttacked, self).__init__() @@ -83,16 +82,13 @@ class GeneticAttack(Attack): >>> def predict(self, inputs): >>> result = self._network(Tensor(inputs.astype(np.float32))) >>> return result.asnumpy() - >>> >>> class Net(Cell): >>> def __init__(self): >>> super(Net, self).__init__() >>> self._softmax = M.Softmax() - >>> >>> def construct(self, inputs): >>> out = self._softmax(inputs) >>> return out - >>> >>> net = Net() >>> model = ModelToBeAttacked(net) >>> attack = GeneticAttack(model, sparse=False) diff --git a/mindarmour/adv_robustness/attacks/black/hop_skip_jump_attack.py b/mindarmour/adv_robustness/attacks/black/hop_skip_jump_attack.py index 98b8c34..67fc5b0 100644 --- a/mindarmour/adv_robustness/attacks/black/hop_skip_jump_attack.py +++ b/mindarmour/adv_robustness/attacks/black/hop_skip_jump_attack.py @@ -80,7 +80,6 @@ class HopSkipJumpAttack(Attack): >>> from mindarmour import BlackModel >>> from mindarmour.adv_robustness.attacks import HopSkipJumpAttack >>> from tests.ut.python.utils.mock_net import Net - >>> >>> class ModelToBeAttacked(BlackModel): >>> def __init__(self, network): >>> super(ModelToBeAttacked, self).__init__() @@ -90,8 +89,6 @@ class HopSkipJumpAttack(Attack): >>> inputs = inputs[np.newaxis, :] >>> result = self._network(Tensor(inputs.astype(np.float32))) >>> return result.asnumpy() - >>> - >>> >>> net = Net() >>> model = ModelToBeAttacked(net) >>> attack = HopSkipJumpAttack(model) @@ -188,12 +185,27 @@ class HopSkipJumpAttack(Attack): - numpy.ndarray, query times for each sample. Examples: + >>> import numpy as np + >>> from mindspore import Tensor + >>> from mindarmour import BlackModel + >>> from mindarmour.adv_robustness.attacks import HopSkipJumpAttack + >>> from tests.ut.python.utils.mock_net import Net + >>> class ModelToBeAttacked(BlackModel): + >>> def __init__(self, network): + >>> super(ModelToBeAttacked, self).__init__() + >>> self._network = network + >>> def predict(self, inputs): + >>> if len(inputs.shape) == 3: + >>> inputs = inputs[np.newaxis, :] + >>> result = self._network(Tensor(inputs.astype(np.float32))) + >>> return result.asnumpy() + >>> net = Net() + >>> model = ModelToBeAttacked(net) >>> attack = HopSkipJumpAttack(model) >>> n, c, h, w = 1, 1, 32, 32 >>> class_num = 3 >>> x_test = np.asarray(np.random.random((n,c,h,w)), np.float32) >>> y_test = np.random.randint(0, class_num, size=n) - >>> >>> _, adv_x, _= attack.generate(x_test, y_test) """ if labels is not None: diff --git a/mindarmour/adv_robustness/attacks/black/natural_evolutionary_strategy.py b/mindarmour/adv_robustness/attacks/black/natural_evolutionary_strategy.py index 60ae91e..a3be5bc 100644 --- a/mindarmour/adv_robustness/attacks/black/natural_evolutionary_strategy.py +++ b/mindarmour/adv_robustness/attacks/black/natural_evolutionary_strategy.py @@ -84,7 +84,6 @@ class NES(Attack): >>> from mindarmour import BlackModel >>> from mindarmour.adv_robustness.attacks import NES >>> from tests.ut.python.utils.mock_net import Net - >>> >>> class ModelToBeAttacked(BlackModel): >>> def __init__(self, network): >>> super(ModelToBeAttacked, self).__init__() @@ -94,7 +93,6 @@ class NES(Attack): >>> inputs = inputs[np.newaxis, :] >>> result = self._network(Tensor(inputs.astype(np.float32))) >>> return result.asnumpy() - >>> >>> net = Net() >>> model = ModelToBeAttacked(net) >>> SCENE = 'Query_Limit' @@ -157,12 +155,25 @@ class NES(Attack): ValueError: If scene is not in ['Label_Only', 'Partial_Info', 'Query_Limit'] Examples: + >>> import numpy as np + >>> from mindspore import Tensor + >>> from mindarmour import BlackModel + >>> from mindarmour.adv_robustness.attacks import NES + >>> from tests.ut.python.utils.mock_net import Net + >>> class ModelToBeAttacked(BlackModel): + >>> def __init__(self, network): + >>> super(ModelToBeAttacked, self).__init__() + >>> self._network = network + >>> def predict(self, inputs): + >>> if len(inputs.shape) == 3: + >>> inputs = inputs[np.newaxis, :] + >>> result = self._network(Tensor(inputs.astype(np.float32))) + >>> return result.asnumpy() >>> net = Net() >>> model = ModelToBeAttacked(net) >>> SCENE = 'Query_Limit' >>> TOP_K = -1 >>> attack= NES(model, SCENE, top_k=TOP_K) - >>> >>> num_class = 5 >>> x_test = np.asarray(np.random.random((32, 32)), np.float32) >>> target_image = np.asarray(np.random.random((32, 32)), np.float32) diff --git a/mindarmour/adv_robustness/attacks/black/pointwise_attack.py b/mindarmour/adv_robustness/attacks/black/pointwise_attack.py index c4d824a..695a2e2 100644 --- a/mindarmour/adv_robustness/attacks/black/pointwise_attack.py +++ b/mindarmour/adv_robustness/attacks/black/pointwise_attack.py @@ -52,7 +52,6 @@ class PointWiseAttack(Attack): >>> from mindarmour import BlackModel >>> from mindarmour.adv_robustness.attacks import PointWiseAttack >>> from tests.ut.python.utils.mock_net import Net - >>> >>> class ModelToBeAttacked(BlackModel): >>> def __init__(self, network): >>> super(ModelToBeAttacked, self).__init__() @@ -60,7 +59,6 @@ class PointWiseAttack(Attack): >>> def predict(self, inputs): >>> result = self._network(Tensor(inputs.astype(np.float32))) >>> return result.asnumpy() - >>> >>> net = Net() >>> model = ModelToBeAttacked(net) >>> attack = PointWiseAttack(model) @@ -95,6 +93,18 @@ class PointWiseAttack(Attack): - numpy.ndarray, query times for each sample. Examples: + >>> import numpy as np + >>> from mindspore import Tensor + >>> from mindarmour import BlackModel + >>> from mindarmour.adv_robustness.attacks import PointWiseAttack + >>> from tests.ut.python.utils.mock_net import Net + >>> class ModelToBeAttacked(BlackModel): + >>> def __init__(self, network): + >>> super(ModelToBeAttacked, self).__init__() + >>> self._network = network + >>> def predict(self, inputs): + >>> result = self._network(Tensor(inputs.astype(np.float32))) + >>> return result.asnumpy() >>> net = Net() >>> model = ModelToBeAttacked(net) >>> attack = PointWiseAttack(model) diff --git a/mindarmour/adv_robustness/attacks/black/pso_attack.py b/mindarmour/adv_robustness/attacks/black/pso_attack.py index 0ec5ad5..2d43808 100644 --- a/mindarmour/adv_robustness/attacks/black/pso_attack.py +++ b/mindarmour/adv_robustness/attacks/black/pso_attack.py @@ -70,7 +70,6 @@ class PSOAttack(Attack): >>> from mindspore.nn import Cell >>> from mindarmour import BlackModel >>> from mindarmour.adv_robustness.attacks import PSOAttack - >>> >>> class ModelToBeAttacked(BlackModel): >>> def __init__(self, network): >>> super(ModelToBeAttacked, self).__init__() @@ -80,7 +79,6 @@ class PSOAttack(Attack): >>> inputs = np.expand_dims(inputs, axis=0) >>> result = self._network(Tensor(inputs.astype(np.float32))) >>> return result.asnumpy() - >>> >>> class Net(Cell): >>> def __init__(self): >>> super(Net, self).__init__() @@ -89,7 +87,6 @@ class PSOAttack(Attack): >>> def construct(self, inputs): >>> out = self._relu(inputs) >>> return out - >>> >>> net = Net() >>> model = ModelToBeAttacked(net) >>> attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, sparse=False) @@ -381,17 +378,6 @@ class PSOAttack(Attack): - numpy.ndarray, generated adversarial examples. - numpy.ndarray, query times for each sample. - - Examples: - >>> net = Net() - >>> model = ModelToBeAttacked(net) - >>> attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, sparse=False) - >>> batch_size = 6 - >>> x_test = np.random.rand(batch_size, 10) - >>> y_test = np.random.randint(low=0, high=10, size=batch_size) - >>> y_test = np.eye(10)[y_test] - >>> y_test = y_test.astype(np.float32) - >>> _, adv_data, _ = attack.generate(x_test, y_test) """ # inputs check images, auxiliary_inputs, gt_boxes, gt_labels = check_detection_inputs(inputs, labels) @@ -523,6 +509,29 @@ class PSOAttack(Attack): - numpy.ndarray, query times for each sample. Examples: + >>> import numpy as np + >>> import mindspore.nn as nn + >>> from mindspore import Tensor + >>> from mindspore.nn import Cell + >>> from mindarmour import BlackModel + >>> from mindarmour.adv_robustness.attacks import PSOAttack + >>> class ModelToBeAttacked(BlackModel): + >>> def __init__(self, network): + >>> super(ModelToBeAttacked, self).__init__() + >>> self._network = network + >>> def predict(self, inputs): + >>> if len(inputs.shape) == 1: + >>> inputs = np.expand_dims(inputs, axis=0) + >>> result = self._network(Tensor(inputs.astype(np.float32))) + >>> return result.asnumpy() + >>> class Net(Cell): + >>> def __init__(self): + >>> super(Net, self).__init__() + >>> self._relu = nn.ReLU() + >>> + >>> def construct(self, inputs): + >>> out = self._relu(inputs) + >>> return out >>> net = Net() >>> model = ModelToBeAttacked(net) >>> attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, sparse=False) diff --git a/mindarmour/adv_robustness/attacks/black/salt_and_pepper_attack.py b/mindarmour/adv_robustness/attacks/black/salt_and_pepper_attack.py index 786ab69..57354fe 100644 --- a/mindarmour/adv_robustness/attacks/black/salt_and_pepper_attack.py +++ b/mindarmour/adv_robustness/attacks/black/salt_and_pepper_attack.py @@ -45,7 +45,6 @@ class SaltAndPepperNoiseAttack(Attack): >>> from mindarmour import BlackModel >>> from mindarmour.adv_robustness.attacks import SaltAndPepperNoiseAttack >>> from tests.ut.python.utils.mock_net import Net - >>> >>> class ModelToBeAttacked(BlackModel): >>> def __init__(self, network): >>> super(ModelToBeAttacked, self).__init__() @@ -53,7 +52,6 @@ class SaltAndPepperNoiseAttack(Attack): >>> def predict(self, inputs): >>> result = self._network(Tensor(inputs.astype(np.float32))) >>> return result.asnumpy() - >>> >>> net = Net() >>> model = ModelToBeAttacked(net) >>> attack = SaltAndPepperNoiseAttack(model) @@ -85,6 +83,18 @@ class SaltAndPepperNoiseAttack(Attack): - numpy.ndarray, query times for each sample. Examples: + >>> import numpy as np + >>> from mindspore import Tensor + >>> from mindarmour import BlackModel + >>> from mindarmour.adv_robustness.attacks import SaltAndPepperNoiseAttack + >>> from tests.ut.python.utils.mock_net import Net + >>> class ModelToBeAttacked(BlackModel): + >>> def __init__(self, network): + >>> super(ModelToBeAttacked, self).__init__() + >>> self._network = network + >>> def predict(self, inputs): + >>> result = self._network(Tensor(inputs.astype(np.float32))) + >>> return result.asnumpy() >>> net = Net() >>> model = ModelToBeAttacked(net) >>> attack = PointWiseAttack(model) diff --git a/mindarmour/adv_robustness/attacks/carlini_wagner.py b/mindarmour/adv_robustness/attacks/carlini_wagner.py index a65532d..475e3c6 100644 --- a/mindarmour/adv_robustness/attacks/carlini_wagner.py +++ b/mindarmour/adv_robustness/attacks/carlini_wagner.py @@ -99,7 +99,6 @@ class CarliniWagnerL2Attack(Attack): >>> import mindspore.ops.operations as M >>> from mindspore.nn import Cell >>> from mindarmour.adv_robustness.attacks import CarliniWagnerL2Attack - >>> >>> class Net(Cell): >>> def __init__(self): >>> super(Net, self).__init__() @@ -108,7 +107,6 @@ class CarliniWagnerL2Attack(Attack): >>> def construct(self, inputs): >>> out = self._softmax(inputs) >>> return out - >>> >>> input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32) >>> label_np = np.array([3]).astype(np.int64) >>> num_classes = input_np.shape[1] diff --git a/mindarmour/adv_robustness/attacks/deep_fool.py b/mindarmour/adv_robustness/attacks/deep_fool.py index 2d20327..bd0d5b8 100644 --- a/mindarmour/adv_robustness/attacks/deep_fool.py +++ b/mindarmour/adv_robustness/attacks/deep_fool.py @@ -122,18 +122,16 @@ class DeepFool(Attack): >>> from mindspore.nn import Cell >>> from mindspore import Tensor >>> from mindarmour.adv_robustness.attacks import DeepFool - >>> - >>> def __init__(self): - >>> super(Net, self).__init__() - >>> self._softmax = P.Softmax() - >>> - >>> def construct(self, inputs): - >>> out = self._softmax(inputs) - >>> return out - >>> + >>> class Net(Cell): + >>> def __init__(self): + >>> super(Net, self).__init__() + >>> self._softmax = P.Softmax() + >>> def construct(self, inputs): + >>> out = self._softmax(inputs) + >>> return out >>> net = Net() >>> attack = DeepFool(net, classes, max_iters=10, norm_level=2, - bounds=(0.0, 1.0)) + ... bounds=(0.0, 1.0)) """ def __init__(self, network, num_classes, model_type='classification', @@ -181,6 +179,21 @@ class DeepFool(Attack): NotImplementedError: If norm_level is not in [2, np.inf, '2', 'inf']. Examples: + >>> import numpy as np + >>> import mindspore.ops.operations as P + >>> from mindspore.nn import Cell + >>> from mindspore import Tensor + >>> from mindarmour.adv_robustness.attacks import DeepFool + >>> class Net(Cell): + >>> def __init__(self): + >>> super(Net, self).__init__() + >>> self._softmax = P.Softmax() + >>> def construct(self, inputs): + >>> out = self._softmax(inputs) + >>> return out + >>> net = Net() + >>> attack = DeepFool(net, classes, max_iters=10, norm_level=2, + ... bounds=(0.0, 1.0)) >>> input_shape = (1, 5) >>> _, classes = input_shape >>> input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32) diff --git a/mindarmour/adv_robustness/attacks/gradient_method.py b/mindarmour/adv_robustness/attacks/gradient_method.py index e2e7bb8..52d4436 100644 --- a/mindarmour/adv_robustness/attacks/gradient_method.py +++ b/mindarmour/adv_robustness/attacks/gradient_method.py @@ -51,8 +51,7 @@ class GradientMethod(Attack): >>> import mindspore.nn as nn >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits >>> from mindspore import Tensor - >>> from mindarmour.adv_robustness.attacksimport FastGradientMethod - >>> + >>> from mindarmour.adv_robustness.attacks import FastGradientMethod >>> class Net(Cell): >>> def __init__(self): >>> super(Net, self).__init__() @@ -61,7 +60,6 @@ class GradientMethod(Attack): >>> def construct(self, inputs): >>> out = self._relu(inputs) >>> return out - >>> >>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) >>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) >>> net = Net() @@ -175,16 +173,13 @@ class FastGradientMethod(GradientMethod): >>> import mindspore.nn as nn >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits >>> from mindarmour.adv_robustness.attacks import FastGradientMethod - >>> >>> class Net(Cell): >>> def __init__(self): >>> super(Net, self).__init__() >>> self._relu = nn.ReLU() - >>> >>> def construct(self, inputs): >>> out = self._relu(inputs) >>> return out - >>> >>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) >>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) >>> net = Net() @@ -258,16 +253,13 @@ class RandomFastGradientMethod(FastGradientMethod): >>> import mindspore.nn as nn >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits >>> from mindarmour.adv_robustness.attacks import RandomFastGradientMethod - >>> >>> class Net(Cell): >>> def __init__(self): >>> super(Net, self).__init__() >>> self._relu = nn.ReLU() - >>> >>> def construct(self, inputs): >>> out = self._relu(inputs) >>> return out - >>> >>> net = Net() >>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) >>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) @@ -315,16 +307,13 @@ class FastGradientSignMethod(GradientMethod): >>> import mindspore.nn as nn >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits >>> from mindarmour.adv_robustness.attacks import FastGradientSignMethod - >>> >>> class Net(Cell): >>> def __init__(self): >>> super(Net, self).__init__() >>> self._relu = nn.ReLU() - >>> >>> def construct(self, inputs): >>> out = self._relu(inputs) >>> return out - >>> >>> net = Net() >>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) >>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) @@ -399,11 +388,9 @@ class RandomFastGradientSignMethod(FastGradientSignMethod): >>> def __init__(self): >>> super(Net, self).__init__() >>> self._relu = nn.ReLU() - >>> >>> def construct(self, inputs): >>> out = self._relu(inputs) >>> return out - >>> >>> net = Net() >>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) >>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) @@ -446,7 +433,6 @@ class LeastLikelyClassMethod(FastGradientSignMethod): >>> import mindspore.nn as nn >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits >>> from mindarmour.adv_robustness.attacks import LeastLikelyClassMethod - >>> >>> class Net(Cell): >>> def __init__(self): >>> super(Net, self).__init__() @@ -455,7 +441,6 @@ class LeastLikelyClassMethod(FastGradientSignMethod): >>> def construct(self, inputs): >>> out = self._relu(inputs) >>> return out - >>> >>> net = Net() >>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) >>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) @@ -499,16 +484,13 @@ class RandomLeastLikelyClassMethod(FastGradientSignMethod): >>> import mindspore.nn as nn >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits >>> from mindarmour.adv_robustness.attacks import RandomLeastLikelyClassMethod - >>> >>> class Net(Cell): >>> def __init__(self): >>> super(Net, self).__init__() >>> self._relu = nn.ReLU() - >>> >>> def construct(self, inputs): >>> out = self._relu(inputs) >>> return out - >>> >>> net = Net() >>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]]) >>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]) diff --git a/mindarmour/adv_robustness/attacks/iterative_gradient_method.py b/mindarmour/adv_robustness/attacks/iterative_gradient_method.py index ae0e909..86a9ba5 100644 --- a/mindarmour/adv_robustness/attacks/iterative_gradient_method.py +++ b/mindarmour/adv_robustness/attacks/iterative_gradient_method.py @@ -188,16 +188,13 @@ class BasicIterativeMethod(IterativeGradientMethod): >>> import mindspore.nn as nn >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits >>> from mindarmour.adv_robustness.attacks import BasicIterativeMethod - >>> >>> class Net(Cell): >>> def __init__(self): >>> super(Net, self).__init__() >>> self._relu = nn.ReLU() - >>> >>> def construct(self, inputs): >>> out = self._relu(inputs) >>> return out - >>> >>> net = Net() >>> attack = BasicIterativeMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) """ @@ -230,21 +227,23 @@ class BasicIterativeMethod(IterativeGradientMethod): numpy.ndarray, generated adversarial examples. Examples: + >>> import numpy as np + >>> import mindspore.nn as nn + >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits + >>> from mindarmour.adv_robustness.attacks import BasicIterativeMethod >>> class Net(Cell): >>> def __init__(self): >>> super(Net, self).__init__() >>> self._relu = nn.ReLU() - >>> >>> def construct(self, inputs): >>> out = self._relu(inputs) >>> return out - >>> >>> net = Net() >>> attack = BasicIterativeMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) >>> adv_x = attack.generate([[0.3, 0.2, 0.6], - >>> [0.3, 0.2, 0.4]], - >>> [[0, 0, 1, 0, 0, 0, 0, 0, 0, 0], - >>> [0, 0, 0, 0, 0, 0, 1, 0, 0, 0]]) + ... [0.3, 0.2, 0.4]], + ... [[0, 0, 1, 0, 0, 0, 0, 0, 0, 0], + ... [0, 0, 0, 0, 0, 0, 1, 0, 0, 0]]) """ inputs_image, inputs, labels = check_inputs_labels(inputs, labels) arr_x = inputs_image @@ -333,22 +332,19 @@ class MomentumIterativeMethod(IterativeGradientMethod): >>> import mindspore.nn as nn >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits >>> from mindarmour.adv_robustness.attacks import MomentumIterativeMethod - >>> >>> class Net(Cell): >>> def __init__(self): >>> super(Net, self).__init__() >>> self._relu = nn.ReLU() - >>> >>> def construct(self, inputs): >>> out = self._relu(inputs) >>> return out - >>> >>> net = Net() >>> attack = MomentumIterativeMethod(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) >>> adv_x = attack.generate([[0.5, 0.2, 0.6], - >>> [0.3, 0, 0.2]], - >>> [[0, 0, 0, 0, 0, 0, 0, 0, 1, 0], - >>> [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]]) + ... [0.3, 0, 0.2]], + ... [[0, 0, 0, 0, 0, 0, 0, 0, 1, 0], + ... [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]]) """ inputs_image, inputs, labels = check_inputs_labels(inputs, labels) arr_x = inputs_image @@ -406,7 +402,7 @@ class MomentumIterativeMethod(IterativeGradientMethod): Examples: >>> grad = self._gradient([[0.5, 0.3, 0.4]], - >>> [[0, 0, 0, 1, 0, 0, 0, 0, 0, 0]) + ... [[0, 0, 0, 1, 0, 0, 0, 0, 0, 0]) """ # get grad of loss over x inputs_tensor = to_tensor_tuple(inputs) @@ -479,22 +475,19 @@ class ProjectedGradientDescent(BasicIterativeMethod): >>> import mindspore.nn as nn >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits >>> from mindarmour.adv_robustness.attacks import ProjectedGradientDescent - >>> >>> class Net(Cell): >>> def __init__(self): >>> super(Net, self).__init__() >>> self._relu = nn.ReLU() - >>> >>> def construct(self, inputs): >>> out = self._relu(inputs) >>> return out - >>> >>> net = Net() >>> attack = ProjectedGradientDescent(net, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False)) >>> adv_x = attack.generate([[0.6, 0.2, 0.6], - >>> [0.3, 0.3, 0.4]], - >>> [[0, 0, 0, 0, 0, 0, 0, 0, 0, 1], - >>> [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]]) + ... [0.3, 0.3, 0.4]], + ... [[0, 0, 0, 0, 0, 0, 0, 0, 0, 1], + ... [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]]) """ inputs_image, inputs, labels = check_inputs_labels(inputs, labels) arr_x = inputs_image diff --git a/mindarmour/adv_robustness/attacks/jsma.py b/mindarmour/adv_robustness/attacks/jsma.py index 560a5a6..ff4bf73 100644 --- a/mindarmour/adv_robustness/attacks/jsma.py +++ b/mindarmour/adv_robustness/attacks/jsma.py @@ -62,11 +62,9 @@ class JSMAAttack(Attack): >>> def __init__(self): >>> super(Net, self).__init__() >>> self._relu = nn.ReLU() - >>> >>> def construct(self, inputs): >>> out = self._relu(inputs) >>> return out - >>> >>> net = Net() >>> input_shape = (1, 5) >>> batch_size, classes = input_shape @@ -197,11 +195,22 @@ class JSMAAttack(Attack): numpy.ndarray, adversarial samples. Examples: + >>> import numpy as np + >>> import mindspore.nn as nn + >>> from mindspore.nn import Cell + >>> from mindarmour.adv_robustness.attacks import JSMAAttack + >>> class Net(Cell): + >>> def __init__(self): + >>> super(Net, self).__init__() + >>> self._relu = nn.ReLU() + >>> def construct(self, inputs): + >>> out = self._relu(inputs) + >>> return out + >>> net = Net() >>> input_shape = (1, 5) + >>> batch_size, classes = input_shape >>> input_np = np.random.random(input_shape).astype(np.float32) >>> label_np = np.random.randint(classes, size=batch_size) - >>> batch_size, classes = input_shape - >>> >>> attack = JSMAAttack(net, classes, max_iteration=5) >>> advs = attack.generate(input_np, label_np) """ diff --git a/mindarmour/adv_robustness/attacks/lbfgs.py b/mindarmour/adv_robustness/attacks/lbfgs.py index dd7912c..d701e37 100644 --- a/mindarmour/adv_robustness/attacks/lbfgs.py +++ b/mindarmour/adv_robustness/attacks/lbfgs.py @@ -99,6 +99,10 @@ class LBFGS(Attack): numpy.ndarray, generated adversarial examples. Examples: + >>> import numpy as np + >>> from mindarmour.adv_robustness.attacks import LBFGS + >>> from tests.ut.python.utils.mock_net import Net + >>> net = Net() >>> attack = LBFGS(net, is_targeted=True) >>> adv = attack.generate([[0.1, 0.2, 0.6], [0.3, 0, 0.4]], [2, 2]) """ diff --git a/mindarmour/adv_robustness/defenses/adversarial_defense.py b/mindarmour/adv_robustness/defenses/adversarial_defense.py index b0a10a5..49e9eef 100644 --- a/mindarmour/adv_robustness/defenses/adversarial_defense.py +++ b/mindarmour/adv_robustness/defenses/adversarial_defense.py @@ -115,7 +115,6 @@ class AdversarialDefenseWithAttacks(AdversarialDefense): >>> from mindarmour.adv_robustness.defenses import AdversarialDefense >>> from mindspore import nn >>> from tests.ut.python.utils.mock_net import Net - >>> >>> net = Net() >>> lr = 0.001 >>> momentum = 0.9 @@ -123,12 +122,10 @@ class AdversarialDefenseWithAttacks(AdversarialDefense): >>> num_class = 10 >>> loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False) >>> optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=momentum) - >>> >>> fgsm = FastGradientSignMethod(net, loss_fn=loss_fn) >>> pgd = ProjectedGradientDescent(net, loss_fn=loss_fn) >>> ead = AdversarialDefenseWithAttack(net, [fgsm, pgd], loss_fn=loss_fn, - >>> optimizer=optimizer) - >>> + ... optimizer=optimizer) >>> inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32) >>> labels = np.random.randint(num_class, size=batch_size).astype(np.int32) >>> labels = np.eye(num_classes)[labels].astype(np.float32) diff --git a/mindarmour/adv_robustness/defenses/natural_adversarial_defense.py b/mindarmour/adv_robustness/defenses/natural_adversarial_defense.py index c4372f6..fca27d7 100644 --- a/mindarmour/adv_robustness/defenses/natural_adversarial_defense.py +++ b/mindarmour/adv_robustness/defenses/natural_adversarial_defense.py @@ -41,18 +41,14 @@ class NaturalAdversarialDefense(AdversarialDefenseWithAttacks): >>> from mindarmour.adv_robustness.defenses import NaturalAdversarialDefense >>> from mindspore import nn >>> from tests.ut.python.utils.mock_net import Net - >>> >>> net = Net() >>> lr = 0.001 >>> momentum = 0.9 >>> batch_size = 32 >>> num_class = 10 - >>> >>> loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False) >>> optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=momentum) - >>> >>> nad = NaturalAdversarialDefense(net, loss_fn=loss_fn, optimizer=optimizer) - >>> >>> inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32) >>> labels = np.random.randint(num_class, size=batch_size).astype(np.int32) >>> labels = np.eye(num_classes)[labels].astype(np.float32) diff --git a/mindarmour/adv_robustness/defenses/projected_adversarial_defense.py b/mindarmour/adv_robustness/defenses/projected_adversarial_defense.py index 74d4817..97ff3a0 100644 --- a/mindarmour/adv_robustness/defenses/projected_adversarial_defense.py +++ b/mindarmour/adv_robustness/defenses/projected_adversarial_defense.py @@ -46,18 +46,14 @@ class ProjectedAdversarialDefense(AdversarialDefenseWithAttacks): >>> from mindarmour.adv_robustness.defenses import ProjectedAdversarialDefense >>> from mindspore import nn >>> from tests.ut.python.utils.mock_net import Net - >>> >>> net = Net() >>> lr = 0.001 >>> momentum = 0.9 >>> batch_size = 32 >>> num_class = 10 - >>> >>> loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False) >>> optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=momentum) - >>> >>> pad = ProjectedAdversarialDefense(net, loss_fn=loss_fn, optimizer=optimizer) - >>> >>> inputs = np.random.rand(batch_size, 1, 32, 32).astype(np.float32) >>> labels = np.random.randint(num_class, size=batch_size).astype(np.int32) >>> labels = np.eye(num_classes)[labels].astype(np.float32) diff --git a/mindarmour/adv_robustness/detectors/black/similarity_detector.py b/mindarmour/adv_robustness/detectors/black/similarity_detector.py index 453a1ed..7eee082 100644 --- a/mindarmour/adv_robustness/detectors/black/similarity_detector.py +++ b/mindarmour/adv_robustness/detectors/black/similarity_detector.py @@ -79,7 +79,6 @@ class SimilarityDetector(Detector): >>> from mindspore import Model >>> from mindspore import context >>> from mindarmour.adv_robustness.detectors import SimilarityDetector - >>> >>> class EncoderNet(Cell): >>> def __init__(self, encode_dim): >>> super(EncoderNet, self).__init__() @@ -89,7 +88,6 @@ class SimilarityDetector(Detector): >>> return self.add(inputs, inputs) >>> def get_encode_dim(self): >>> return self._encode_dim - >>> >>> np.random.seed(5) >>> x_train = np.random.rand(10, 32, 32, 3).astype(np.float32) >>> perm = np.random.permutation(x_train.shape[0]) diff --git a/mindarmour/adv_robustness/detectors/ensemble_detector.py b/mindarmour/adv_robustness/detectors/ensemble_detector.py index bb34698..2c91a72 100644 --- a/mindarmour/adv_robustness/detectors/ensemble_detector.py +++ b/mindarmour/adv_robustness/detectors/ensemble_detector.py @@ -42,7 +42,6 @@ class EnsembleDetector(Detector): >>> from mindarmour.adv_robustness.detectors import ErrorBasedDetector >>> from mindarmour.adv_robustness.detectors import RegionBasedDetector >>> from mindarmour.adv_robustness.detectors import EnsembleDetector - >>> >>> class Net(Cell): >>> def __init__(self): >>> super(Net, self).__init__() @@ -56,7 +55,6 @@ class EnsembleDetector(Detector): >>> self.add = Add() >>> def construct(self, inputs): >>> return self.add(inputs, inputs) - >>> >>> np.random.seed(6) >>> adv = np.random.rand(4, 4).astype(np.float32) >>> model = Model(Net()) diff --git a/mindarmour/adv_robustness/detectors/mag_net.py b/mindarmour/adv_robustness/detectors/mag_net.py index 71748aa..087b706 100644 --- a/mindarmour/adv_robustness/detectors/mag_net.py +++ b/mindarmour/adv_robustness/detectors/mag_net.py @@ -58,10 +58,8 @@ class ErrorBasedDetector(Detector): >>> def __init__(self): >>> super(Net, self).__init__() >>> self.add = Add() - >>> >>> def construct(self, inputs): >>> return self.add(inputs, inputs) - >>> >>> np.random.seed(5) >>> ori = np.random.rand(4, 4, 4).astype(np.float32) >>> np.random.seed(6) @@ -195,10 +193,8 @@ class DivergenceBasedDetector(ErrorBasedDetector): >>> def __init__(self): >>> super(Net, self).__init__() >>> self.add = Add() - >>> >>> def construct(self, inputs): >>> return self.add(inputs, inputs) - >>> >>> np.random.seed(5) >>> ori = np.random.rand(4, 4, 4).astype(np.float32) >>> np.random.seed(6) diff --git a/mindarmour/adv_robustness/detectors/region_based_detector.py b/mindarmour/adv_robustness/detectors/region_based_detector.py index e32a6aa..d11d37b 100644 --- a/mindarmour/adv_robustness/detectors/region_based_detector.py +++ b/mindarmour/adv_robustness/detectors/region_based_detector.py @@ -62,14 +62,12 @@ class RegionBasedDetector(Detector): >>> def __init__(self): >>> super(Net, self).__init__() >>> self.add = Add() - >>> >>> def construct(self, inputs): >>> return self.add(inputs, inputs) - >>> >>> np.random.seed(5) >>> ori = np.random.rand(4, 4).astype(np.float32) >>> labels = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0], - [0, 1, 0, 0]]).astype(np.int32) + ... [0, 1, 0, 0]]).astype(np.int32) >>> np.random.seed(6) >>> adv = np.random.rand(4, 4).astype(np.float32) >>> model = Model(Net()) diff --git a/mindarmour/adv_robustness/detectors/spatial_smoothing.py b/mindarmour/adv_robustness/detectors/spatial_smoothing.py index 54ab1c3..cf9e4d1 100644 --- a/mindarmour/adv_robustness/detectors/spatial_smoothing.py +++ b/mindarmour/adv_robustness/detectors/spatial_smoothing.py @@ -59,10 +59,8 @@ class SpatialSmoothing(Detector): >>> def __init__(self): >>> super(Net, self).__init__() >>> self._softmax = P.Softmax() - >>> >>> def construct(self, inputs): >>> return self._softmax(inputs) - >>> >>> input_shape = (50, 3) >>> np.random.seed(1) >>> input_np = np.random.randn(*input_shape).astype(np.float32) diff --git a/mindarmour/adv_robustness/evaluations/attack_evaluation.py b/mindarmour/adv_robustness/evaluations/attack_evaluation.py index d101ffa..1e6bd9f 100644 --- a/mindarmour/adv_robustness/evaluations/attack_evaluation.py +++ b/mindarmour/adv_robustness/evaluations/attack_evaluation.py @@ -50,11 +50,11 @@ class AttackEvaluate: >>> x = np.random.normal(size=(3, 512, 512, 3)) >>> adv_x = np.random.normal(size=(3, 512, 512, 3)) >>> y = np.array([[0.1, 0.1, 0.2, 0.6], - >>> [0.1, 0.7, 0.0, 0.2], - >>> [0.8, 0.1, 0.0, 0.1]]) + ... [0.1, 0.7, 0.0, 0.2], + ... [0.8, 0.1, 0.0, 0.1]]) >>> adv_y = np.array([[0.1, 0.1, 0.2, 0.6], - >>> [0.1, 0.0, 0.8, 0.1], - >>> [0.0, 0.9, 0.1, 0.0]]) + ... [0.1, 0.0, 0.8, 0.1], + ... [0.0, 0.9, 0.1, 0.0]]) >>> attack_eval = AttackEvaluate(x, y, adv_x, adv_y) >>> mr = attack_eval.mis_classification_rate() """ diff --git a/mindarmour/adv_robustness/evaluations/black/defense_evaluation.py b/mindarmour/adv_robustness/evaluations/black/defense_evaluation.py index 7c5c2ea..5733701 100644 --- a/mindarmour/adv_robustness/evaluations/black/defense_evaluation.py +++ b/mindarmour/adv_robustness/evaluations/black/defense_evaluation.py @@ -71,14 +71,14 @@ class BlackDefenseEvaluate: >>> true_labels = np.array([3, 1, 0]) >>> max_queries = 100 >>> def_eval = BlackDefenseEvaluate(raw_preds, - >>> def_preds, - >>> raw_query_counts, - >>> def_query_counts, - >>> raw_query_time, - >>> def_query_time, - >>> def_detection_counts, - >>> true_labels, - >>> max_queries) + ... def_preds, + ... raw_query_counts, + ... def_query_counts, + ... raw_query_time, + ... def_query_time, + ... def_detection_counts, + ... true_labels, + ... max_queries) >>> def_eval.qcv() """ diff --git a/mindarmour/adv_robustness/evaluations/defense_evaluation.py b/mindarmour/adv_robustness/evaluations/defense_evaluation.py index 8f783ef..4b222b2 100644 --- a/mindarmour/adv_robustness/evaluations/defense_evaluation.py +++ b/mindarmour/adv_robustness/evaluations/defense_evaluation.py @@ -40,15 +40,15 @@ class DefenseEvaluate: Examples: >>> raw_preds = np.array([[0.1, 0.1, 0.2, 0.6], - >>> [0.1, 0.7, 0.0, 0.2], - >>> [0.8, 0.1, 0.0, 0.1]]) + ... [0.1, 0.7, 0.0, 0.2], + ... [0.8, 0.1, 0.0, 0.1]]) >>> def_preds = np.array([[0.1, 0.1, 0.1, 0.7], - >>> [0.1, 0.6, 0.2, 0.1], - >>> [0.1, 0.2, 0.1, 0.6]]) + ... [0.1, 0.6, 0.2, 0.1], + ... [0.1, 0.2, 0.1, 0.6]]) >>> true_labels = np.array([3, 1, 0]) >>> def_eval = DefenseEvaluate(raw_preds, - >>> def_preds, - >>> true_labels) + ... def_preds, + ... true_labels) >>> def_eval.cav() """ def __init__(self, raw_preds, def_preds, true_labels): diff --git a/mindarmour/adv_robustness/evaluations/visual_metrics.py b/mindarmour/adv_robustness/evaluations/visual_metrics.py index 8f5e02e..83eddab 100644 --- a/mindarmour/adv_robustness/evaluations/visual_metrics.py +++ b/mindarmour/adv_robustness/evaluations/visual_metrics.py @@ -52,10 +52,10 @@ class RadarMetric: >>> metrics_data = [def_metrics, raw_metrics] >>> metrics_labels = ['before', 'after'] >>> rm = RadarMetric(metrics_name, - >>> metrics_data, - >>> metrics_labels, - >>> title='', - >>> scale='sparse') + ... metrics_data, + ... metrics_labels, + ... title='', + ... scale='sparse') >>> rm.show() """ diff --git a/mindarmour/fuzz_testing/fuzzing.py b/mindarmour/fuzz_testing/fuzzing.py index 1ecdc84..93dafce 100644 --- a/mindarmour/fuzz_testing/fuzzing.py +++ b/mindarmour/fuzz_testing/fuzzing.py @@ -107,17 +107,17 @@ class Fuzzer: >>> net = Net() >>> model = Model(net) >>> mutate_config = [{'method': 'Blur', - >>> 'params': {'auto_param': [True]}}, - >>> {'method': 'Contrast', - >>> 'params': {'factor': [2]}}, - >>> {'method': 'Translate', - >>> 'params': {'x_bias': [0.1, 0.2], 'y_bias': [0.2]}}, - >>> {'method': 'FGSM', - >>> 'params': {'eps': [0.1, 0.2, 0.3], 'alpha': [0.1]}}] + ... 'params': {'auto_param': [True]}}, + ... {'method': 'Contrast', + ... 'params': {'factor': [2]}}, + ... {'method': 'Translate', + ... 'params': {'x_bias': [0.1, 0.2], 'y_bias': [0.2]}}, + ... {'method': 'FGSM', + ... 'params': {'eps': [0.1, 0.2, 0.3], 'alpha': [0.1]}}] >>> nc = KMultisectionNeuronCoverage(model, train_images, segmented_num=100) >>> model_fuzz_test = Fuzzer(model) >>> samples, gt_labels, preds, strategies, metrics = model_fuzz_test.fuzzing(mutate_config, initial_seeds, - >>> nc, max_iters=100) + ... nc, max_iters=100) """ def __init__(self, target_model): diff --git a/mindarmour/privacy/diff_privacy/mechanisms/mechanisms.py b/mindarmour/privacy/diff_privacy/mechanisms/mechanisms.py index b987372..e4e5ecf 100644 --- a/mindarmour/privacy/diff_privacy/mechanisms/mechanisms.py +++ b/mindarmour/privacy/diff_privacy/mechanisms/mechanisms.py @@ -71,10 +71,10 @@ class ClipMechanismsFactory: >>> target_unclipped_quantile = 0.9 >>> clip_mechanism = ClipMechanismsFactory() >>> ada_clip = clip_mechanism.create('Gaussian', - >>> decay_policy=decay_policy, - >>> learning_rate=learning_rate, - >>> target_unclipped_quantile=target_unclipped_quantile, - >>> fraction_stddev=beta_stddev) + ... decay_policy=decay_policy, + ... learning_rate=learning_rate, + ... target_unclipped_quantile=target_unclipped_quantile, + ... fraction_stddev=beta_stddev) >>> next_norm_bound = ada_clip(beta, norm_bound) """ @@ -125,25 +125,25 @@ class NoiseMechanismsFactory: >>> epochs = 1 >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) >>> noise_mech = NoiseMechanismsFactory().create('Gaussian', - >>> norm_bound=norm_bound, - >>> initial_noise_multiplier=initial_noise_multiplier) + ... norm_bound=norm_bound, + ... initial_noise_multiplier=initial_noise_multiplier) >>> clip_mech = ClipMechanismsFactory().create('Gaussian', - >>> decay_policy='Linear', - >>> learning_rate=0.001, - >>> target_unclipped_quantile=0.9, - >>> fraction_stddev=0.01) + ... decay_policy='Linear', + ... learning_rate=0.001, + ... target_unclipped_quantile=0.9, + ... fraction_stddev=0.01) >>> net_opt = nn.Momentum(network.trainable_params(), learning_rate=0.1, - >>> momentum=0.9) + ... momentum=0.9) >>> model = DPModel(micro_batches=2, - >>> clip_mech=clip_mech, - >>> norm_bound=norm_bound, - >>> noise_mech=noise_mech, - >>> network=network, - >>> loss_fn=loss, - >>> optimizer=net_opt, - >>> metrics=None) + ... clip_mech=clip_mech, + ... norm_bound=norm_bound, + ... noise_mech=noise_mech, + ... network=network, + ... loss_fn=loss, + ... optimizer=net_opt, + ... metrics=None) >>> ms_ds = ds.GeneratorDataset(dataset_generator, - >>> ['data', 'label']) + ... ['data', 'label']) >>> model.train(epochs, ms_ds, dataset_sink_mode=False) """ if mech_name == 'Gaussian': @@ -386,9 +386,9 @@ class AdaClippingWithGaussianRandom(Cell): >>> learning_rate = 0.001 >>> target_unclipped_quantile = 0.9 >>> ada_clip = AdaClippingWithGaussianRandom(decay_policy=decay_policy, - >>> learning_rate=learning_rate, - >>> target_unclipped_quantile=target_unclipped_quantile, - >>> fraction_stddev=beta_stddev) + ... learning_rate=learning_rate, + ... target_unclipped_quantile=target_unclipped_quantile, + ... fraction_stddev=beta_stddev) >>> next_norm_bound = ada_clip(beta, norm_bound) """ diff --git a/mindarmour/privacy/diff_privacy/optimizer/optimizer.py b/mindarmour/privacy/diff_privacy/optimizer/optimizer.py index 238988e..8927a5f 100644 --- a/mindarmour/privacy/diff_privacy/optimizer/optimizer.py +++ b/mindarmour/privacy/diff_privacy/optimizer/optimizer.py @@ -65,8 +65,8 @@ class DPOptimizerClassFactory: >>> GaussianSGD = DPOptimizerClassFactory(micro_batches=2) >>> GaussianSGD.set_mechanisms('Gaussian', norm_bound=1.0, initial_noise_multiplier=1.5) >>> net_opt = GaussianSGD.create('Momentum')(params=network.trainable_params(), - >>> learning_rate=0.001, - >>> momentum=0.9) + ... learning_rate=0.001, + ... momentum=0.9) """ def __init__(self, micro_batches=2): diff --git a/mindarmour/privacy/diff_privacy/train/model.py b/mindarmour/privacy/diff_privacy/train/model.py index 93fc952..b145b32 100644 --- a/mindarmour/privacy/diff_privacy/train/model.py +++ b/mindarmour/privacy/diff_privacy/train/model.py @@ -94,25 +94,25 @@ class DPModel(Model): >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) >>> factory_opt = DPOptimizerClassFactory(micro_batches=micro_batches) >>> factory_opt.set_mechanisms('Gaussian', - >>> norm_bound=norm_bound, - >>> initial_noise_multiplier=initial_noise_multiplier) + ... norm_bound=norm_bound, + ... initial_noise_multiplier=initial_noise_multiplier) >>> net_opt = factory_opt.create('Momentum')(network.trainable_params(), - >>> learning_rate=0.1, momentum=0.9) + ... learning_rate=0.1, momentum=0.9) >>> clip_mech = ClipMechanismsFactory().create('Gaussian', - >>> decay_policy='Linear', - >>> learning_rate=0.01, - >>> target_unclipped_quantile=0.9, - >>> fraction_stddev=0.01) + ... decay_policy='Linear', + ... learning_rate=0.01, + ... target_unclipped_quantile=0.9, + ... fraction_stddev=0.01) >>> model = DPModel(micro_batches=micro_batches, - >>> norm_bound=norm_bound, - >>> clip_mech=clip_mech, - >>> noise_mech=None, - >>> network=network, - >>> loss_fn=loss, - >>> optimizer=net_opt, - >>> metrics=None) + ... norm_bound=norm_bound, + ... clip_mech=clip_mech, + ... noise_mech=None, + ... network=network, + ... loss_fn=loss, + ... optimizer=net_opt, + ... metrics=None) >>> ms_ds = ds.GeneratorDataset(dataset_generator, - >>> ['data', 'label']) + ... ['data', 'label']) >>> model.train(epochs, ms_ds, dataset_sink_mode=False) """ diff --git a/mindarmour/privacy/sup_privacy/mask_monitor/masker.py b/mindarmour/privacy/sup_privacy/mask_monitor/masker.py index 0f67b12..4b9e629 100644 --- a/mindarmour/privacy/sup_privacy/mask_monitor/masker.py +++ b/mindarmour/privacy/sup_privacy/mask_monitor/masker.py @@ -34,30 +34,30 @@ class SuppressMasker(Callback): >>> masklayers = [] >>> masklayers.append(MaskLayerDes("conv1.weight", 0, False, True, 10)) >>> suppress_ctrl_instance = SuppressPrivacyFactory().create(networks=networks_l5, - >>> mask_layers=masklayers, - >>> policy="local_train", - >>> end_epoch=10, - >>> batch_num=(int)(10000/cfg.batch_size), - >>> start_epoch=3, - >>> mask_times=1000, - >>> lr=lr, - >>> sparse_end=0.90, - >>> sparse_start=0.0) + ... mask_layers=masklayers, + ... policy="local_train", + ... end_epoch=10, + ... batch_num=(int)(10000/cfg.batch_size), + ... start_epoch=3, + ... mask_times=1000, + ... lr=lr, + ... sparse_end=0.90, + ... sparse_start=0.0) >>> net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") >>> net_opt = nn.Momentum(params=networks_l5.trainable_params(), learning_rate=lr, momentum=0.0) >>> config_ck = CheckpointConfig(save_checkpoint_steps=(int)(samples/cfg.batch_size), keep_checkpoint_max=10) >>> model_instance = SuppressModel(network=networks_l5, - >>> loss_fn=net_loss, - >>> optimizer=net_opt, - >>> metrics={"Accuracy": Accuracy()}) + ... loss_fn=net_loss, + ... optimizer=net_opt, + ... metrics={"Accuracy": Accuracy()}) >>> model_instance.link_suppress_ctrl(suppress_ctrl_instance) >>> ds_train = generate_mnist_dataset("./MNIST_unzip/train", - >>> batch_size=cfg.batch_size, repeat_size=1, samples=samples) + ... batch_size=cfg.batch_size, repeat_size=1, samples=samples) >>> ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", - >>> directory="./trained_ckpt_file/", - >>> config=config_ck) + ... directory="./trained_ckpt_file/", + ... config=config_ck) >>> model_instance.train(epoch_size, ds_train, callbacks=[ckpoint_cb, LossMonitor(), suppress_masker], - >>> dataset_sink_mode=False) + ... dataset_sink_mode=False) """ def __init__(self, model, suppress_ctrl): diff --git a/mindarmour/privacy/sup_privacy/sup_ctrl/conctrl.py b/mindarmour/privacy/sup_privacy/sup_ctrl/conctrl.py index 2e6407b..94ab25a 100644 --- a/mindarmour/privacy/sup_privacy/sup_ctrl/conctrl.py +++ b/mindarmour/privacy/sup_privacy/sup_ctrl/conctrl.py @@ -61,31 +61,31 @@ class SuppressPrivacyFactory: >>> mask_layers = [] >>> mask_layers.append(MaskLayerDes("conv1.weight", 0, False, True, 10)) >>> suppress_ctrl_instance = SuppressPrivacyFactory().create(networks=networks_l5, - >>> mask_layers=mask_layers, - >>> policy="local_train", - >>> end_epoch=10, - >>> batch_num=(int)(10000/cfg.batch_size), - >>> start_epoch=3, - >>> mask_times=1000, - >>> lr=lr, - >>> sparse_end=0.90, - >>> sparse_start=0.0) + ... mask_layers=mask_layers, + ... policy="local_train", + ... end_epoch=10, + ... batch_num=(int)(10000/cfg.batch_size), + ... start_epoch=3, + ... mask_times=1000, + ... lr=lr, + ... sparse_end=0.90, + ... sparse_start=0.0) >>> net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") >>> net_opt = nn.Momentum(params=networks_l5.trainable_params(), learning_rate=lr, momentum=0.0) >>> config_ck = CheckpointConfig(save_checkpoint_steps=(int)(samples/cfg.batch_size), - >>> keep_checkpoint_max=10) + ... keep_checkpoint_max=10) >>> model_instance = SuppressModel(network=networks_l5, - >>> loss_fn=net_loss, - >>> optimizer=net_opt, - >>> metrics={"Accuracy": Accuracy()}) + ... loss_fn=net_loss, + ... optimizer=net_opt, + ... metrics={"Accuracy": Accuracy()}) >>> model_instance.link_suppress_ctrl(suppress_ctrl_instance) >>> ds_train = generate_mnist_dataset("./MNIST_unzip/train", - >>> batch_size=cfg.batch_size, repeat_size=1, samples=samples) + ... batch_size=cfg.batch_size, repeat_size=1, samples=samples) >>> ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", - >>> directory="./trained_ckpt_file/", - >>> config=config_ck) + ... directory="./trained_ckpt_file/", + ... config=config_ck) >>> model_instance.train(epoch_size, ds_train, callbacks=[ckpoint_cb, LossMonitor(), suppress_masker], - >>> dataset_sink_mode=False) + ... dataset_sink_mode=False) """ check_param_type('policy', policy, str) if policy == "local_train": @@ -113,31 +113,31 @@ class SuppressCtrl(Cell): >>> masklayers = [] >>> masklayers.append(MaskLayerDes("conv1.weight", 0, False, True, 10)) >>> suppress_ctrl_instance = SuppressPrivacyFactory().create(networks=networks_l5, - >>> mask_layers=masklayers, - >>> policy="local_train", - >>> end_epoch=10, - >>> batch_num=(int)(10000/cfg.batch_size), - >>> start_epoch=3, - >>> mask_times=1000, - >>> lr=lr, - >>> sparse_end=0.90, - >>> sparse_start=0.0) + ... mask_layers=masklayers, + ... policy="local_train", + ... end_epoch=10, + ... batch_num=(int)(10000/cfg.batch_size), + ... start_epoch=3, + ... mask_times=1000, + ... lr=lr, + ... sparse_end=0.90, + ... sparse_start=0.0) >>> net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") >>> net_opt = nn.Momentum(params=networks_l5.trainable_params(), learning_rate=lr, momentum=0.0) >>> config_ck = CheckpointConfig(save_checkpoint_steps=(int)(samples/cfg.batch_size), - >>> keep_checkpoint_max=10) + ... keep_checkpoint_max=10) >>> model_instance = SuppressModel(network=networks_l5, - >>> loss_fn=net_loss, - >>> optimizer=net_opt, - >>> metrics={"Accuracy": Accuracy()}) + ... loss_fn=net_loss, + ... optimizer=net_opt, + ... metrics={"Accuracy": Accuracy()}) >>> model_instance.link_suppress_ctrl(suppress_ctrl_instance) >>> ds_train = generate_mnist_dataset("./MNIST_unzip/train", - >>> batch_size=cfg.batch_size, repeat_size=1, samples=samples) + ... batch_size=cfg.batch_size, repeat_size=1, samples=samples) >>> ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", - >>> directory="./trained_ckpt_file/", - >>> config=config_ck) + ... directory="./trained_ckpt_file/", + ... config=config_ck) >>> model_instance.train(epoch_size, ds_train, callbacks=[ckpoint_cb, LossMonitor(), suppress_masker], - >>> dataset_sink_mode=False) + ... dataset_sink_mode=False) """ def __init__(self, networks, mask_layers, end_epoch, batch_num, start_epoch, mask_times, lr, sparse_end, sparse_start): diff --git a/mindarmour/privacy/sup_privacy/train/model.py b/mindarmour/privacy/sup_privacy/train/model.py index b9b49bd..0e08de9 100644 --- a/mindarmour/privacy/sup_privacy/train/model.py +++ b/mindarmour/privacy/sup_privacy/train/model.py @@ -69,30 +69,30 @@ class SuppressModel(Model): >>> mask_layers = [] >>> mask_layers.append(MaskLayerDes("conv1.weight", 0, False, True, 10)) >>> suppress_ctrl_instance = SuppressPrivacyFactory().create(networks=networks_l5, - >>> mask_layers=mask_layers, - >>> policy="local_train", - >>> end_epoch=10, - >>> batch_num=(int)(10000/cfg.batch_size), - >>> start_epoch=3, - >>> mask_times=1000, - >>> lr=lr, - >>> sparse_end=0.90, - >>> sparse_start=0.0) + ... mask_layers=mask_layers, + ... policy="local_train", + ... end_epoch=10, + ... batch_num=(int)(10000/cfg.batch_size), + ... start_epoch=3, + ... mask_times=1000, + ... lr=lr, + ... sparse_end=0.90, + ... sparse_start=0.0) >>> net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") >>> net_opt = nn.Momentum(params=networks_l5.trainable_params(), learning_rate=lr, momentum=0.0) >>> config_ck = CheckpointConfig(save_checkpoint_steps=(int)(samples/cfg.batch_size), keep_checkpoint_max=10) >>> model_instance = SuppressModel(network=networks_l5, - >>> loss_fn=net_loss, - >>> optimizer=net_opt, - >>> metrics={"Accuracy": Accuracy()}) + ... loss_fn=net_loss, + ... optimizer=net_opt, + ... metrics={"Accuracy": Accuracy()}) >>> model_instance.link_suppress_ctrl(suppress_ctrl_instance) >>> ds_train = generate_mnist_dataset("./MNIST_unzip/train", - >>> batch_size=cfg.batch_size, repeat_size=1, samples=samples) + ... batch_size=cfg.batch_size, repeat_size=1, samples=samples) >>> ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", - >>> directory="./trained_ckpt_file/", - >>> config=config_ck) + ... directory="./trained_ckpt_file/", + ... config=config_ck) >>> model_instance.train(epoch_size, ds_train, callbacks=[ckpoint_cb, LossMonitor(), suppress_masker], - >>> dataset_sink_mode=False) + ... dataset_sink_mode=False) """ def __init__(self, diff --git a/mindarmour/reliability/concept_drift/concept_drift_check_time_series.py b/mindarmour/reliability/concept_drift/concept_drift_check_time_series.py index b086a3a..61e553f 100644 --- a/mindarmour/reliability/concept_drift/concept_drift_check_time_series.py +++ b/mindarmour/reliability/concept_drift/concept_drift_check_time_series.py @@ -23,7 +23,7 @@ from mindarmour.utils._check_param import check_param_type, check_param_in_range class ConceptDriftCheckTimeSeries: - """ + r""" ConceptDriftCheckTimeSeries is used for example series distribution change detection. Args: @@ -39,7 +39,7 @@ class ConceptDriftCheckTimeSeries: Examples: >>> concept = ConceptDriftCheckTimeSeries(window_size=100, rolling_window=10, - >>> step=10, threshold_index=1.5, need_label=False) + ... step=10, threshold_index=1.5, need_label=False) >>> data_example = 5*np.random.rand(1000) >>> data_example[200: 800] = 20*np.random.rand(600) >>> score, threshold, concept_drift_location = concept.concept_check(data_example) @@ -161,7 +161,7 @@ class ConceptDriftCheckTimeSeries: Examples: >>> concept = ConceptDriftCheckTimeSeries(window_size=100, rolling_window=10, - >>> step=10, threshold_index=1.5, need_label=False) + ... step=10, threshold_index=1.5, need_label=False) >>> data_example = 5*np.random.rand(1000) >>> data_example[200: 800] = 20*np.random.rand(600) >>> score, drift_threshold, drift_location = concept.concept_check(data_example)