diff --git a/mindarmour/adv_robustness/attacks/attack.py b/mindarmour/adv_robustness/attacks/attack.py index e138407..8a05bf1 100644 --- a/mindarmour/adv_robustness/attacks/attack.py +++ b/mindarmour/adv_robustness/attacks/attack.py @@ -49,11 +49,6 @@ class Attack: Returns: numpy.ndarray, generated adversarial examples - - Examples: - >>> inputs = np.array([[0.2, 0.4, 0.5, 0.2], [0.7, 0.2, 0.4, 0.3]]) - >>> labels = np.array([3, 0]) - >>> advs = attack.batch_generate(inputs, labels, batch_size=2) """ inputs_image, inputs, labels = check_inputs_labels(inputs, labels) arr_x = inputs diff --git a/mindarmour/adv_robustness/attacks/black/genetic_attack.py b/mindarmour/adv_robustness/attacks/black/genetic_attack.py index 69d7b0c..087d463 100644 --- a/mindarmour/adv_robustness/attacks/black/genetic_attack.py +++ b/mindarmour/adv_robustness/attacks/black/genetic_attack.py @@ -69,7 +69,6 @@ class GeneticAttack(Attack): c (Union[int, float]): Weight of perturbation loss. Default: 0.1. Examples: - >>> import numpy as np >>> import mindspore.ops.operations as M >>> from mindspore import Tensor >>> from mindspore.nn import Cell diff --git a/mindarmour/adv_robustness/attacks/black/hop_skip_jump_attack.py b/mindarmour/adv_robustness/attacks/black/hop_skip_jump_attack.py index b0038a4..22e41e1 100644 --- a/mindarmour/adv_robustness/attacks/black/hop_skip_jump_attack.py +++ b/mindarmour/adv_robustness/attacks/black/hop_skip_jump_attack.py @@ -75,7 +75,6 @@ class HopSkipJumpAttack(Attack): ValueError: If constraint not in ['l2', 'linf'] Examples: - >>> import numpy as np >>> from mindspore import Tensor >>> from mindarmour import BlackModel >>> from mindarmour.adv_robustness.attacks import HopSkipJumpAttack diff --git a/mindarmour/adv_robustness/attacks/black/natural_evolutionary_strategy.py b/mindarmour/adv_robustness/attacks/black/natural_evolutionary_strategy.py index bcb0596..45e89b4 100644 --- a/mindarmour/adv_robustness/attacks/black/natural_evolutionary_strategy.py +++ b/mindarmour/adv_robustness/attacks/black/natural_evolutionary_strategy.py @@ -79,7 +79,6 @@ class NES(Attack): input labels are one-hot-encoded. Default: True. Examples: - >>> import numpy as np >>> from mindspore import Tensor >>> from mindarmour import BlackModel >>> from mindarmour.adv_robustness.attacks import NES diff --git a/mindarmour/adv_robustness/attacks/black/pointwise_attack.py b/mindarmour/adv_robustness/attacks/black/pointwise_attack.py index 4c9ccc5..1f33a6e 100644 --- a/mindarmour/adv_robustness/attacks/black/pointwise_attack.py +++ b/mindarmour/adv_robustness/attacks/black/pointwise_attack.py @@ -47,7 +47,6 @@ class PointWiseAttack(Attack): Default: True. Examples: - >>> import numpy as np >>> from mindspore import Tensor >>> from mindarmour import BlackModel >>> from mindarmour.adv_robustness.attacks import PointWiseAttack diff --git a/mindarmour/adv_robustness/attacks/black/pso_attack.py b/mindarmour/adv_robustness/attacks/black/pso_attack.py index b5ed105..a88fc4e 100644 --- a/mindarmour/adv_robustness/attacks/black/pso_attack.py +++ b/mindarmour/adv_robustness/attacks/black/pso_attack.py @@ -64,7 +64,6 @@ class PSOAttack(Attack): specifically for model_type='detection'. Reserve_ratio should be in the range of (0, 1). Default: 0.3. Examples: - >>> import numpy as np >>> import mindspore.nn as nn >>> from mindspore import Tensor >>> from mindspore.nn import Cell diff --git a/mindarmour/adv_robustness/attacks/black/salt_and_pepper_attack.py b/mindarmour/adv_robustness/attacks/black/salt_and_pepper_attack.py index 003f638..d6e1b1f 100644 --- a/mindarmour/adv_robustness/attacks/black/salt_and_pepper_attack.py +++ b/mindarmour/adv_robustness/attacks/black/salt_and_pepper_attack.py @@ -40,7 +40,6 @@ class SaltAndPepperNoiseAttack(Attack): Default: True. Examples: - >>> import numpy as np >>> from mindspore import Tensor >>> from mindarmour import BlackModel >>> from mindarmour.adv_robustness.attacks import SaltAndPepperNoiseAttack diff --git a/mindarmour/adv_robustness/attacks/carlini_wagner.py b/mindarmour/adv_robustness/attacks/carlini_wagner.py index 67c3a70..9843505 100644 --- a/mindarmour/adv_robustness/attacks/carlini_wagner.py +++ b/mindarmour/adv_robustness/attacks/carlini_wagner.py @@ -95,7 +95,6 @@ class CarliniWagnerL2Attack(Attack): input labels are onehot-coded. Default: True. Examples: - >>> import numpy as np >>> import mindspore.ops.operations as M >>> from mindspore.nn import Cell >>> from mindarmour.adv_robustness.attacks import CarliniWagnerL2Attack diff --git a/mindarmour/adv_robustness/attacks/deep_fool.py b/mindarmour/adv_robustness/attacks/deep_fool.py index 819a20d..9de6380 100644 --- a/mindarmour/adv_robustness/attacks/deep_fool.py +++ b/mindarmour/adv_robustness/attacks/deep_fool.py @@ -117,7 +117,6 @@ class DeepFool(Attack): input labels are onehot-coded. Default: True. Examples: - >>> import numpy as np >>> import mindspore.ops.operations as P >>> from mindspore.nn import Cell >>> from mindspore import Tensor diff --git a/mindarmour/adv_robustness/attacks/gradient_method.py b/mindarmour/adv_robustness/attacks/gradient_method.py index 5f033e1..685b02f 100644 --- a/mindarmour/adv_robustness/attacks/gradient_method.py +++ b/mindarmour/adv_robustness/attacks/gradient_method.py @@ -149,8 +149,6 @@ class FastGradientMethod(GradientMethod): is already equipped with loss function. Default: None. Examples: - >>> import numpy as np - >>> import mindspore.nn as nn >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits >>> from mindarmour.adv_robustness.attacks import FastGradientMethod >>> class Net(Cell): @@ -230,8 +228,6 @@ class RandomFastGradientMethod(FastGradientMethod): ValueError: eps is smaller than alpha! Examples: - >>> import numpy as np - >>> import mindspore.nn as nn >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits >>> from mindarmour.adv_robustness.attacks import RandomFastGradientMethod >>> class Net(Cell): @@ -285,8 +281,6 @@ class FastGradientSignMethod(GradientMethod): is already equipped with loss function. Default: None. Examples: - >>> import numpy as np - >>> import mindspore.nn as nn >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits >>> from mindarmour.adv_robustness.attacks import FastGradientSignMethod >>> class Net(Cell): @@ -362,8 +356,6 @@ class RandomFastGradientSignMethod(FastGradientSignMethod): ValueError: eps is smaller than alpha! Examples: - >>> import numpy as np - >>> import mindspore.nn as nn >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits >>> from mindarmour.adv_robustness.attacks import RandomFastGradientSignMethod >>> class Net(Cell): @@ -412,8 +404,6 @@ class LeastLikelyClassMethod(FastGradientSignMethod): is already equipped with loss function. Default: None. Examples: - >>> import numpy as np - >>> import mindspore.nn as nn >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits >>> from mindarmour.adv_robustness.attacks import LeastLikelyClassMethod >>> class Net(Cell): @@ -463,8 +453,6 @@ class RandomLeastLikelyClassMethod(FastGradientSignMethod): ValueError: eps is smaller than alpha! Examples: - >>> import numpy as np - >>> import mindspore.nn as nn >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits >>> from mindarmour.adv_robustness.attacks import RandomLeastLikelyClassMethod >>> class Net(Cell): diff --git a/mindarmour/adv_robustness/attacks/iterative_gradient_method.py b/mindarmour/adv_robustness/attacks/iterative_gradient_method.py index d95e5a8..c3c15aa 100644 --- a/mindarmour/adv_robustness/attacks/iterative_gradient_method.py +++ b/mindarmour/adv_robustness/attacks/iterative_gradient_method.py @@ -178,8 +178,6 @@ class BasicIterativeMethod(IterativeGradientMethod): is already equipped with loss function. Default: None. Examples: - >>> import numpy as np - >>> import mindspore.nn as nn >>> from mindspore.ops import operations as P >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits >>> from mindarmour.adv_robustness.attacks import BasicIterativeMethod @@ -282,8 +280,6 @@ class MomentumIterativeMethod(IterativeGradientMethod): is already equipped with loss function. Default: None. Examples: - >>> import numpy as np - >>> import mindspore.nn as nn >>> from mindspore.ops import operations as P >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits >>> from mindarmour.adv_robustness.attacks import MomentumIterativeMethod @@ -428,8 +424,6 @@ class ProjectedGradientDescent(BasicIterativeMethod): is already equipped with loss function. Default: None. Examples: - >>> import numpy as np - >>> import mindspore.nn as nn >>> from mindspore.ops import operations as P >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits >>> from mindarmour.adv_robustness.attacks import ProjectedGradientDescent @@ -526,8 +520,6 @@ class DiverseInputIterativeMethod(BasicIterativeMethod): is already equipped with loss function. Default: None. Examples: - >>> import numpy as np - >>> import mindspore.nn as nn >>> from mindspore.ops import operations as P >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits >>> from mindarmour.adv_robustness.attacks import DiverseInputIterativeMethod @@ -583,8 +575,6 @@ class MomentumDiverseInputIterativeMethod(MomentumIterativeMethod): is already equipped with loss function. Default: None. Examples: - >>> import numpy as np - >>> import mindspore.nn as nn >>> from mindspore.ops import operations as P >>> from mindspore.nn import Cell, SoftmaxCrossEntropyWithLogits >>> from mindarmour.adv_robustness.attacks import MomentumDiverseInputIterativeMethod diff --git a/mindarmour/adv_robustness/attacks/jsma.py b/mindarmour/adv_robustness/attacks/jsma.py index a91750b..3101a3a 100644 --- a/mindarmour/adv_robustness/attacks/jsma.py +++ b/mindarmour/adv_robustness/attacks/jsma.py @@ -54,8 +54,6 @@ class JSMAAttack(Attack): input labels are onehot-coded. Default: True. Examples: - >>> import numpy as np - >>> import mindspore.nn as nn >>> from mindspore.nn import Cell >>> from mindarmour.adv_robustness.attacks import JSMAAttack >>> class Net(Cell): diff --git a/mindarmour/adv_robustness/attacks/lbfgs.py b/mindarmour/adv_robustness/attacks/lbfgs.py index 75783da..da8d5fd 100644 --- a/mindarmour/adv_robustness/attacks/lbfgs.py +++ b/mindarmour/adv_robustness/attacks/lbfgs.py @@ -54,7 +54,6 @@ class LBFGS(Attack): input labels are onehot-coded. Default: False. Examples: - >>> import numpy as np >>> from mindarmour.adv_robustness.attacks import LBFGS >>> from tests.ut.python.utils.mock_net import Net >>> net = Net() diff --git a/mindarmour/adv_robustness/defenses/adversarial_defense.py b/mindarmour/adv_robustness/defenses/adversarial_defense.py index a5dc204..1dbfc34 100644 --- a/mindarmour/adv_robustness/defenses/adversarial_defense.py +++ b/mindarmour/adv_robustness/defenses/adversarial_defense.py @@ -36,10 +36,8 @@ class AdversarialDefense(Defense): optimizer (Cell): Optimizer used to train the network. Default: None. Examples: - >>> import numpy as np >>> from mindspore.nn.optim.momentum import Momentum >>> from mindarmour.adv_robustness.defenses import AdversarialDefense - >>> from mindspore import nn >>> from tests.ut.python.utils.mock_net import Net >>> net = Net() >>> lr = 0.001 @@ -109,12 +107,10 @@ class AdversarialDefenseWithAttacks(AdversarialDefense): ValueError: If replace_ratio is not between 0 and 1. Examples: - >>> import numpy as np >>> from mindspore.nn.optim.momentum import Momentum >>> from mindarmour.adv_robustness.attacks import FastGradientSignMethod >>> from mindarmour.adv_robustness.attacks import ProjectedGradientDescent >>> from mindarmour.adv_robustness.defenses import AdversarialDefenseWithAttacks - >>> from mindspore import nn >>> from tests.ut.python.utils.mock_net import Net >>> net = Net() >>> lr = 0.001 @@ -157,9 +153,6 @@ class AdversarialDefenseWithAttacks(AdversarialDefense): Returns: numpy.ndarray, loss of adversarial defense operation. - - Examples: - >>> adv_defense.defense(inputs, labels) """ inputs, labels = check_pair_numpy_param('inputs', inputs, 'labels', labels) @@ -200,12 +193,10 @@ class EnsembleAdversarialDefense(AdversarialDefenseWithAttacks): ValueError: If replace_ratio is not between 0 and 1. Examples: - >>> import numpy as np >>> from mindspore.nn.optim.momentum import Momentum >>> from mindarmour.adv_robustness.attacks import FastGradientSignMethod >>> from mindarmour.adv_robustness.attacks import ProjectedGradientDescent >>> from mindarmour.adv_robustness.defenses import EnsembleAdversarialDefense - >>> from mindspore import nn >>> from tests.ut.python.utils.mock_net import Net >>> net = Net() >>> lr = 0.001 diff --git a/mindarmour/adv_robustness/defenses/natural_adversarial_defense.py b/mindarmour/adv_robustness/defenses/natural_adversarial_defense.py index 2f666c0..111ddf6 100644 --- a/mindarmour/adv_robustness/defenses/natural_adversarial_defense.py +++ b/mindarmour/adv_robustness/defenses/natural_adversarial_defense.py @@ -36,10 +36,8 @@ class NaturalAdversarialDefense(AdversarialDefenseWithAttacks): eps (float): Step size of the attack method(FGSM). Default: 0.1. Examples: - >>> import numpy as np >>> from mindspore.nn.optim.momentum import Momentum >>> from mindarmour.adv_robustness.defenses import NaturalAdversarialDefense - >>> from mindspore import nn >>> from tests.ut.python.utils.mock_net import Net >>> net = Net() >>> lr = 0.001 diff --git a/mindarmour/adv_robustness/defenses/projected_adversarial_defense.py b/mindarmour/adv_robustness/defenses/projected_adversarial_defense.py index 878b91e..19f7e39 100644 --- a/mindarmour/adv_robustness/defenses/projected_adversarial_defense.py +++ b/mindarmour/adv_robustness/defenses/projected_adversarial_defense.py @@ -41,10 +41,8 @@ class ProjectedAdversarialDefense(AdversarialDefenseWithAttacks): norm_level (str): Norm type. 'inf' or 'l2'. Default: 'inf'. Examples: - >>> import numpy as np >>> from mindspore.nn.optim.momentum import Momentum >>> from mindarmour.adv_robustness.defenses import ProjectedAdversarialDefense - >>> from mindspore import nn >>> from tests.ut.python.utils.mock_net import Net >>> net = Net() >>> lr = 0.001 diff --git a/mindarmour/adv_robustness/detectors/black/similarity_detector.py b/mindarmour/adv_robustness/detectors/black/similarity_detector.py index 93c4d8d..4fed46f 100644 --- a/mindarmour/adv_robustness/detectors/black/similarity_detector.py +++ b/mindarmour/adv_robustness/detectors/black/similarity_detector.py @@ -73,11 +73,9 @@ class SimilarityDetector(Detector): Default: 0.001 Examples: - >>> import numpy as np >>> from mindspore.ops.operations import Add >>> from mindspore.nn import Cell >>> from mindspore import Model - >>> from mindspore import context >>> from mindarmour.adv_robustness.detectors import SimilarityDetector >>> class EncoderNet(Cell): ... def __init__(self, encode_dim): diff --git a/mindarmour/adv_robustness/detectors/ensemble_detector.py b/mindarmour/adv_robustness/detectors/ensemble_detector.py index 6b607ed..4fb95be 100644 --- a/mindarmour/adv_robustness/detectors/ensemble_detector.py +++ b/mindarmour/adv_robustness/detectors/ensemble_detector.py @@ -34,11 +34,9 @@ class EnsembleDetector(Detector): policy (str): Decision policy, could be 'vote', 'all' or 'any'. Default: 'vote' Examples: - >>> import numpy as np >>> from mindspore.ops.operations import Add >>> from mindspore.nn import Cell >>> from mindspore import Model - >>> from mindspore import context >>> from mindarmour.adv_robustness.detectors import ErrorBasedDetector >>> from mindarmour.adv_robustness.detectors import RegionBasedDetector >>> from mindarmour.adv_robustness.detectors import EnsembleDetector diff --git a/mindarmour/adv_robustness/detectors/mag_net.py b/mindarmour/adv_robustness/detectors/mag_net.py index 436879e..2944cae 100644 --- a/mindarmour/adv_robustness/detectors/mag_net.py +++ b/mindarmour/adv_robustness/detectors/mag_net.py @@ -48,11 +48,9 @@ class ErrorBasedDetector(Detector): bounds (tuple): (clip_min, clip_max). Default: (0.0, 1.0). Examples: - >>> import numpy as np >>> from mindspore.ops.operations import Add >>> from mindspore.nn import Cell >>> from mindspore import Model - >>> from mindspore import context >>> from mindarmour.adv_robustness.detectors import ErrorBasedDetector >>> class Net(Cell): ... def __init__(self): @@ -183,11 +181,9 @@ class DivergenceBasedDetector(ErrorBasedDetector): In form of (clip_min, clip_max). Default: (0.0, 1.0). Examples: - >>> import numpy as np >>> import mindspore.ops.operations as P >>> from mindspore.nn import Cell >>> from mindspore import Model - >>> from mindspore import context >>> from mindarmour.adv_robustness.detectors import DivergenceBasedDetector >>> class PredNet(Cell): ... def __init__(self): diff --git a/mindarmour/adv_robustness/detectors/region_based_detector.py b/mindarmour/adv_robustness/detectors/region_based_detector.py index 20cb21f..a1bc603 100644 --- a/mindarmour/adv_robustness/detectors/region_based_detector.py +++ b/mindarmour/adv_robustness/detectors/region_based_detector.py @@ -52,11 +52,9 @@ class RegionBasedDetector(Detector): input labels are one-hot-encoded. Default: False. Examples: - >>> import numpy as np >>> from mindspore.ops.operations import Add >>> from mindspore.nn import Cell >>> from mindspore import Model - >>> from mindspore import context >>> from mindarmour.adv_robustness.detectors import RegionBasedDetector >>> class Net(Cell): ... def __init__(self): diff --git a/mindarmour/adv_robustness/detectors/spatial_smoothing.py b/mindarmour/adv_robustness/detectors/spatial_smoothing.py index bc43a0c..91a788d 100644 --- a/mindarmour/adv_robustness/detectors/spatial_smoothing.py +++ b/mindarmour/adv_robustness/detectors/spatial_smoothing.py @@ -49,11 +49,9 @@ class SpatialSmoothing(Detector): benign samples. Default: 0.05. Examples: - >>> import numpy as np >>> import mindspore.ops.operations as P >>> from mindspore.nn import Cell >>> from mindspore import Model - >>> from mindspore import context >>> from mindarmour.adv_robustness.detectors import SpatialSmoothing >>> class Net(Cell): ... def __init__(self): diff --git a/mindarmour/adv_robustness/evaluations/attack_evaluation.py b/mindarmour/adv_robustness/evaluations/attack_evaluation.py index ebf449f..5085b2c 100644 --- a/mindarmour/adv_robustness/evaluations/attack_evaluation.py +++ b/mindarmour/adv_robustness/evaluations/attack_evaluation.py @@ -47,7 +47,6 @@ class AttackEvaluate: ValueError: If target_label is None when targeted is True. Examples: - >>> import numpy as np >>> from mindarmour.adv_robustness.evaluations import AttackEvaluate >>> x = np.random.normal(size=(3, 512, 512, 3)) >>> adv_x = np.random.normal(size=(3, 512, 512, 3)) diff --git a/mindarmour/adv_robustness/evaluations/black/defense_evaluation.py b/mindarmour/adv_robustness/evaluations/black/defense_evaluation.py index 84e6cf9..1b73bae 100644 --- a/mindarmour/adv_robustness/evaluations/black/defense_evaluation.py +++ b/mindarmour/adv_robustness/evaluations/black/defense_evaluation.py @@ -57,7 +57,6 @@ class BlackDefenseEvaluate: max_queries (int): Attack budget, the maximum number of queries. Examples: - >>> import numpy as np >>> from mindarmour.adv_robustness.evaluations import BlackDefenseEvaluate >>> raw_preds = np.array([[0.1, 0.1, 0.2, 0.6], ... [0.1, 0.7, 0.0, 0.2], diff --git a/mindarmour/adv_robustness/evaluations/defense_evaluation.py b/mindarmour/adv_robustness/evaluations/defense_evaluation.py index e0b5839..cf2085b 100644 --- a/mindarmour/adv_robustness/evaluations/defense_evaluation.py +++ b/mindarmour/adv_robustness/evaluations/defense_evaluation.py @@ -39,9 +39,7 @@ class DefenseEvaluate: one-dimension array whose size is raw_preds.shape[0]. Examples: - >>> import numpy as np >>> from mindarmour.adv_robustness.evaluations import DefenseEvaluate - >>> raw_preds = np.array([[0.1, 0.1, 0.2, 0.6], ... [0.1, 0.7, 0.0, 0.2], ... [0.8, 0.1, 0.0, 0.1]]) diff --git a/mindarmour/adv_robustness/evaluations/visual_metrics.py b/mindarmour/adv_robustness/evaluations/visual_metrics.py index 639b57b..ac8ae01 100644 --- a/mindarmour/adv_robustness/evaluations/visual_metrics.py +++ b/mindarmour/adv_robustness/evaluations/visual_metrics.py @@ -46,7 +46,6 @@ class RadarMetric: ValueError: If scale not in ['hide', 'norm', 'sparse', 'dense']. Examples: - >>> import numpy as np >>> from mindarmour.adv_robustness.evaluations import RadarMetric >>> metrics_name = ['MR', 'ACAC', 'ASS', 'NTE', 'ACTC'] >>> def_metrics = [0.9, 0.85, 0.6, 0.7, 0.8] diff --git a/mindarmour/privacy/diff_privacy/mechanisms/mechanisms.py b/mindarmour/privacy/diff_privacy/mechanisms/mechanisms.py index e4e5ecf..4d302c4 100644 --- a/mindarmour/privacy/diff_privacy/mechanisms/mechanisms.py +++ b/mindarmour/privacy/diff_privacy/mechanisms/mechanisms.py @@ -34,7 +34,12 @@ TAG = 'NoiseMechanism' class ClipMechanismsFactory: - """ Factory class of clip mechanisms""" + """ + Factory class of clip mechanisms + + For details, please check `Tutorial `_ + + """ def __init__(self): pass @@ -63,6 +68,9 @@ class ClipMechanismsFactory: Mechanisms, class of noise generated Mechanism. Examples: + >>> from mindspore import Tensor + >>> from mindspore.common import dtype as mstype + >>> from mindarmour.privacy.diff_privacy import ClipMechanismsFactory >>> decay_policy = 'Linear' >>> beta = Tensor(0.5, mstype.float32) >>> norm_bound = Tensor(1.0, mstype.float32) @@ -71,10 +79,10 @@ class ClipMechanismsFactory: >>> target_unclipped_quantile = 0.9 >>> clip_mechanism = ClipMechanismsFactory() >>> ada_clip = clip_mechanism.create('Gaussian', - ... decay_policy=decay_policy, - ... learning_rate=learning_rate, - ... target_unclipped_quantile=target_unclipped_quantile, - ... fraction_stddev=beta_stddev) + ... decay_policy=decay_policy, + ... learning_rate=learning_rate, + ... target_unclipped_quantile=target_unclipped_quantile, + ... fraction_stddev=beta_stddev) >>> next_norm_bound = ada_clip(beta, norm_bound) """ @@ -86,8 +94,11 @@ class ClipMechanismsFactory: class NoiseMechanismsFactory: - """ Factory class of noise mechanisms""" + """ Factory class of noise mechanisms + + For details, please check `Tutorial `_ + """ def __init__(self): pass @@ -117,34 +128,13 @@ class NoiseMechanismsFactory: Mechanisms, class of noise generated Mechanism. Examples: + >>> from mindarmour.privacy.diff_privacy import NoiseMechanismsFactory >>> norm_bound = 1.0 >>> initial_noise_multiplier = 1.0 - >>> network = LeNet5() - >>> batch_size = 32 - >>> batches = 128 - >>> epochs = 1 - >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) - >>> noise_mech = NoiseMechanismsFactory().create('Gaussian', - ... norm_bound=norm_bound, - ... initial_noise_multiplier=initial_noise_multiplier) - >>> clip_mech = ClipMechanismsFactory().create('Gaussian', - ... decay_policy='Linear', - ... learning_rate=0.001, - ... target_unclipped_quantile=0.9, - ... fraction_stddev=0.01) - >>> net_opt = nn.Momentum(network.trainable_params(), learning_rate=0.1, - ... momentum=0.9) - >>> model = DPModel(micro_batches=2, - ... clip_mech=clip_mech, - ... norm_bound=norm_bound, - ... noise_mech=noise_mech, - ... network=network, - ... loss_fn=loss, - ... optimizer=net_opt, - ... metrics=None) - >>> ms_ds = ds.GeneratorDataset(dataset_generator, - ... ['data', 'label']) - >>> model.train(epochs, ms_ds, dataset_sink_mode=False) + >>> noise_mechanism = NoiseMechanismsFactory() + >>> clip = noise_mechanism.create('Gaussian', + ... norm_bound=norm_bound, + ... initial_noise_multiplier=initial_noise_multiplier) """ if mech_name == 'Gaussian': return NoiseGaussianRandom(norm_bound=norm_bound, @@ -192,6 +182,9 @@ class NoiseGaussianRandom(_Mechanisms): Tensor, generated noise with shape like given gradients. Examples: + >>> from mindspore import Tensor + >>> from mindspore.common import dtype as mstype + >>> from mindarmour.privacy.diff_privacy import NoiseGaussianRandom >>> gradients = Tensor([0.2, 0.9], mstype.float32) >>> norm_bound = 0.1 >>> initial_noise_multiplier = 1.0 @@ -199,7 +192,6 @@ class NoiseGaussianRandom(_Mechanisms): >>> decay_policy = None >>> net = NoiseGaussianRandom(norm_bound, initial_noise_multiplier, seed, decay_policy) >>> res = net(gradients) - >>> print(res) """ def __init__(self, norm_bound=1.0, initial_noise_multiplier=1.0, seed=0, decay_policy=None): @@ -259,6 +251,9 @@ class NoiseAdaGaussianRandom(NoiseGaussianRandom): Tensor, generated noise with shape like given gradients. Examples: + >>> from mindspore import Tensor + >>> from mindspore.common import dtype as mstype + >>> from mindarmour.privacy.diff_privacy import NoiseAdaGaussianRandom >>> gradients = Tensor([0.2, 0.9], mstype.float32) >>> norm_bound = 1.0 >>> initial_noise_multiplier = 1.0 @@ -267,7 +262,6 @@ class NoiseAdaGaussianRandom(NoiseGaussianRandom): >>> decay_policy = "Exp" >>> net = NoiseAdaGaussianRandom(norm_bound, initial_noise_multiplier, seed, noise_decay_rate, decay_policy) >>> res = net(gradients) - >>> print(res) """ def __init__(self, norm_bound=1.0, initial_noise_multiplier=1.0, seed=0, noise_decay_rate=6e-6, decay_policy='Exp'): @@ -379,6 +373,9 @@ class AdaClippingWithGaussianRandom(Cell): Tensor, undated norm clip . Examples: + >>> from mindspore import Tensor + >>> from mindspore.common import dtype as mstype + >>> from mindarmour.privacy.diff_privacy import AdaClippingWithGaussianRandom >>> decay_policy = 'Linear' >>> beta = Tensor(0.5, mstype.float32) >>> norm_bound = Tensor(1.0, mstype.float32) diff --git a/mindarmour/privacy/diff_privacy/monitor/monitor.py b/mindarmour/privacy/diff_privacy/monitor/monitor.py index 62449c1..1be43a0 100644 --- a/mindarmour/privacy/diff_privacy/monitor/monitor.py +++ b/mindarmour/privacy/diff_privacy/monitor/monitor.py @@ -38,6 +38,8 @@ class PrivacyMonitorFactory: """ Create a privacy monitor class. + For details, please check `Tutorial `_ + Args: policy (str): Monitor policy, 'rdp' and 'zcdp' are supported by now. If policy is 'rdp', the monitor will compute the @@ -55,8 +57,8 @@ class PrivacyMonitorFactory: Callback, a privacy monitor. Examples: - >>> rdp = PrivacyMonitorFactory.create(policy='rdp', - >>> num_samples=60000, batch_size=32) + >>> from mindarmour.privacy.diff_privacy import PrivacyMonitorFactory + >>> rdp = PrivacyMonitorFactory.create(policy='rdp', num_samples=60000, batch_size=32) """ if policy == 'rdp': return RDPMonitor(*args, **kwargs) @@ -72,6 +74,8 @@ class RDPMonitor(Callback): mechanism is said to have ε'-Renyi differential privacy of order α, it also satisfies conventional differential privacy (ε, δ) as below: + For details, please check `Tutorial `_ + .. math:: (ε'+\frac{log(1/δ)}{α-1}, δ) @@ -114,20 +118,8 @@ class RDPMonitor(Callback): to device after each step training. Default: False. Examples: - >>> network = Net() - >>> net_loss = nn.SoftmaxCrossEntropyWithLogits() - >>> epochs = 2 - >>> norm_clip = 1.0 - >>> initial_noise_multiplier = 1.5 - >>> mech = NoiseMechanismsFactory().create('AdaGaussian', - >>> norm_bound=norm_clip, initial_noise_multiplier=initial_noise_multiplier) - >>> net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) - >>> model = DPModel(micro_batches=2, norm_clip=norm_clip, - >>> mech=mech, network=network, loss_fn=loss, optimizer=net_opt, metrics=None) - >>> rdp = PrivacyMonitorFactory.create(policy='rdp', - >>> num_samples=60000, batch_size=256, - >>> initial_noise_multiplier=initial_noise_multiplier) - >>> model.train(epochs, ds, callbacks=[rdp], dataset_sink_mode=False) + >>> from mindarmour.privacy.diff_privacy import PrivacyMonitorFactory + >>> rdp = PrivacyMonitorFactory.create(policy='rdp', num_samples=100, batch_size=32) """ def __init__(self, num_samples, batch_size, initial_noise_multiplier=1.5, @@ -206,8 +198,7 @@ class RDPMonitor(Callback): int, the recommended maximum training epochs. Examples: - >>> rdp = PrivacyMonitorFactory.create(policy='rdp', - >>> num_samples=60000, batch_size=32) + >>> rdp = PrivacyMonitorFactory.create(policy='rdp', num_samples=100, batch_size=32) >>> suggest_epoch = rdp.max_epoch_suggest() """ if self._target_delta is not None and self._max_eps is None: @@ -376,6 +367,8 @@ class ZCDPMonitor(Callback): if a randomized mechanism is said to have ρ-zCDP, it also satisfies conventional differential privacy (ε, δ) as below: + For details, please check `Tutorial `_ + .. math:: (ρ+2\sqrt{ρ*log(1/δ)}, δ) @@ -407,20 +400,11 @@ class ZCDPMonitor(Callback): to device after each step training. Default: False. Examples: - >>> network = Net() - >>> net_loss = nn.SoftmaxCrossEntropyWithLogits() - >>> epochs = 2 - >>> norm_clip = 1.0 - >>> initial_noise_multiplier = 1.5 - >>> mech = NoiseMechanismsFactory().create('AdaGaussian', - >>> norm_bound=norm_clip, initial_noise_multiplier=initial_noise_multiplier) - >>> net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) - >>> model = DPModel(micro_batches=2, norm_clip=norm_clip, - >>> mech=mech, network=network, loss_fn=loss, optimizer=net_opt, metrics=None) + >>> from mindarmour.privacy.diff_privacy import PrivacyMonitorFactory >>> zcdp = PrivacyMonitorFactory.create(policy='zcdp', - >>> num_samples=60000, batch_size=256, - >>> initial_noise_multiplier=initial_noise_multiplier) - >>> model.train(epochs, ds, callbacks=[zcdp], dataset_sink_mode=False) + ... num_samples=100, + ... batch_size=32, + ... initial_noise_multiplier=1.5) """ def __init__(self, num_samples, batch_size, initial_noise_multiplier=1.5, diff --git a/mindarmour/privacy/diff_privacy/optimizer/optimizer.py b/mindarmour/privacy/diff_privacy/optimizer/optimizer.py index 8927a5f..a2b6d39 100644 --- a/mindarmour/privacy/diff_privacy/optimizer/optimizer.py +++ b/mindarmour/privacy/diff_privacy/optimizer/optimizer.py @@ -62,6 +62,9 @@ class DPOptimizerClassFactory: Optimizer, Optimizer class. Examples: + >>> from mindarmour.privacy.diff_privacy import DPOptimizerClassFactory + >>> from tests.ut.python.utils.mock_net import Net + >>> network = Net() >>> GaussianSGD = DPOptimizerClassFactory(micro_batches=2) >>> GaussianSGD.set_mechanisms('Gaussian', norm_bound=1.0, initial_noise_multiplier=1.5) >>> net_opt = GaussianSGD.create('Momentum')(params=network.trainable_params(), diff --git a/mindarmour/privacy/diff_privacy/train/model.py b/mindarmour/privacy/diff_privacy/train/model.py index b145b32..f304553 100644 --- a/mindarmour/privacy/diff_privacy/train/model.py +++ b/mindarmour/privacy/diff_privacy/train/model.py @@ -69,6 +69,8 @@ class DPModel(Model): """ This class is overload mindspore.train.model.Model. + For details, please check `Tutorial `_ + Args: micro_batches (int): The number of small batches split from an original batch. Default: 2. @@ -83,37 +85,6 @@ class DPModel(Model): ValueError: If DPOptimizer and noise_mecn are both None or not None. ValueError: If noise_mech or DPOtimizer's mech method is adaptive while clip_mech is not None. - Examples: - >>> norm_bound = 1.0 - >>> initial_noise_multiplier = 0.01 - >>> network = LeNet5() - >>> batch_size = 32 - >>> batches = 128 - >>> epochs = 1 - >>> micro_batches = 2 - >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) - >>> factory_opt = DPOptimizerClassFactory(micro_batches=micro_batches) - >>> factory_opt.set_mechanisms('Gaussian', - ... norm_bound=norm_bound, - ... initial_noise_multiplier=initial_noise_multiplier) - >>> net_opt = factory_opt.create('Momentum')(network.trainable_params(), - ... learning_rate=0.1, momentum=0.9) - >>> clip_mech = ClipMechanismsFactory().create('Gaussian', - ... decay_policy='Linear', - ... learning_rate=0.01, - ... target_unclipped_quantile=0.9, - ... fraction_stddev=0.01) - >>> model = DPModel(micro_batches=micro_batches, - ... norm_bound=norm_bound, - ... clip_mech=clip_mech, - ... noise_mech=None, - ... network=network, - ... loss_fn=loss, - ... optimizer=net_opt, - ... metrics=None) - >>> ms_ds = ds.GeneratorDataset(dataset_generator, - ... ['data', 'label']) - >>> model.train(epochs, ms_ds, dataset_sink_mode=False) """ def __init__(self, micro_batches=2, norm_bound=1.0, noise_mech=None,