From 60a6e6cbaa10cbdf128de4afe6473cfee88a66a3 Mon Sep 17 00:00:00 2001 From: shu-kun-zhang Date: Thu, 24 Mar 2022 11:47:52 +0800 Subject: [PATCH] Fix API Example and Class Description Issues --- .jenkins/test/config/dependent_packages.yaml | 2 +- mindarmour/adv_robustness/attacks/attack.py | 1 + .../attacks/black/natural_evolutionary_strategy.py | 2 +- .../adv_robustness/attacks/carlini_wagner.py | 4 +++- .../adv_robustness/attacks/gradient_method.py | 15 ++++++++++--- .../attacks/iterative_gradient_method.py | 14 +++++++++--- mindarmour/adv_robustness/attacks/jsma.py | 6 +++-- mindarmour/adv_robustness/attacks/lbfgs.py | 3 ++- .../adv_robustness/defenses/adversarial_defense.py | 6 +++-- .../detectors/black/similarity_detector.py | 2 -- .../adv_robustness/detectors/ensemble_detector.py | 3 ++- mindarmour/adv_robustness/detectors/mag_net.py | 3 ++- .../detectors/region_based_detector.py | 4 +++- .../adv_robustness/detectors/spatial_smoothing.py | 4 ++++ mindarmour/fuzz_testing/fuzzing.py | 2 +- .../privacy/diff_privacy/mechanisms/mechanisms.py | 26 ++++++++++------------ mindarmour/privacy/diff_privacy/monitor/monitor.py | 13 ++++++----- mindarmour/privacy/diff_privacy/train/model.py | 3 ++- mindarmour/privacy/evaluation/attacker.py | 2 +- .../privacy/evaluation/membership_inference.py | 7 ++++-- .../privacy/sup_privacy/mask_monitor/masker.py | 2 +- mindarmour/privacy/sup_privacy/sup_ctrl/conctrl.py | 5 ++++- mindarmour/privacy/sup_privacy/train/model.py | 4 +++- .../concept_drift/concept_drift_check_images.py | 18 ++++++++++----- .../model_fault_injection/fault_injection.py | 7 +++--- mindarmour/utils/logger.py | 2 ++ mindarmour/utils/util.py | 1 + 27 files changed, 106 insertions(+), 55 deletions(-) diff --git a/.jenkins/test/config/dependent_packages.yaml b/.jenkins/test/config/dependent_packages.yaml index dc434a6..1b6f403 100644 --- a/.jenkins/test/config/dependent_packages.yaml +++ b/.jenkins/test/config/dependent_packages.yaml @@ -1,2 +1,2 @@ mindspore: - 'mindspore/mindspore/daily/202203/20220320/master_20220320041531_3e442945369de2d9dd20e9e2e9d3c7524a128ee7_newest/' + 'mindspore/mindspore/version/202203/20220323/master_20220323111039_5c9f2e6d5dd9c7da631461b5003bbf5920d5f792/' diff --git a/mindarmour/adv_robustness/attacks/attack.py b/mindarmour/adv_robustness/attacks/attack.py index 4035d04..35b8df5 100644 --- a/mindarmour/adv_robustness/attacks/attack.py +++ b/mindarmour/adv_robustness/attacks/attack.py @@ -31,6 +31,7 @@ TAG = 'Attack' class Attack: """ The abstract base class for all attack classes creating adversarial examples. + The adversarial examples are generated by adding adversarial noises to the original sample. """ def __init__(self): pass diff --git a/mindarmour/adv_robustness/attacks/black/natural_evolutionary_strategy.py b/mindarmour/adv_robustness/attacks/black/natural_evolutionary_strategy.py index 45e89b4..e7d7402 100644 --- a/mindarmour/adv_robustness/attacks/black/natural_evolutionary_strategy.py +++ b/mindarmour/adv_robustness/attacks/black/natural_evolutionary_strategy.py @@ -142,7 +142,7 @@ class NES(Attack): def generate(self, inputs, labels): """ - Main algorithm for NES. + Generate adversarial examples based on input data and target labels. Args: inputs (numpy.ndarray): Benign input samples. diff --git a/mindarmour/adv_robustness/attacks/carlini_wagner.py b/mindarmour/adv_robustness/attacks/carlini_wagner.py index e63df14..b02ab33 100644 --- a/mindarmour/adv_robustness/attacks/carlini_wagner.py +++ b/mindarmour/adv_robustness/attacks/carlini_wagner.py @@ -58,7 +58,9 @@ def _best_logits_of_other_class(logits, target_class, value=1): class CarliniWagnerL2Attack(Attack): """ - The Carlini & Wagner attack using L2 norm. + The Carlini & Wagner attack using L2 norm generates the adversarial examples + by utilizing two separate losses: an adversarial loss to make the generated example + actually adversarial, and a distance loss to constraint the quality of the adversarial example. References: `Nicholas Carlini, David Wagner: "Towards Evaluating the Robustness of Neural Networks" `_ diff --git a/mindarmour/adv_robustness/attacks/gradient_method.py b/mindarmour/adv_robustness/attacks/gradient_method.py index 685b02f..2c789e1 100644 --- a/mindarmour/adv_robustness/attacks/gradient_method.py +++ b/mindarmour/adv_robustness/attacks/gradient_method.py @@ -204,6 +204,8 @@ class FastGradientMethod(GradientMethod): class RandomFastGradientMethod(FastGradientMethod): """ Fast Gradient Method use Random perturbation. + An one-step attack based on gradients calculation. The adversarial noises + are generated based on the gradients of inputs, and then randomly perturbed. References: `Florian Tramer, Alexey Kurakin, Nicolas Papernot, "Ensemble adversarial training: Attacks and defenses" in ICLR, 2018 @@ -260,8 +262,8 @@ class RandomFastGradientMethod(FastGradientMethod): class FastGradientSignMethod(GradientMethod): """ - Use the sign instead of the value of the gradient to the input. This attack is - often referred to as Fast Gradient Sign Method and was introduced previously. + The Fast Gradient Sign Method attack calculates the gradient of the input + data, and then uses the sign of the gradient to create adversarial noises. References: `Ian J. Goodfellow, J. Shlens, and C. Szegedy, "Explaining and harnessing adversarial examples," in ICLR, 2015 @@ -335,6 +337,9 @@ class FastGradientSignMethod(GradientMethod): class RandomFastGradientSignMethod(FastGradientSignMethod): """ Fast Gradient Sign Method using random perturbation. + The Random Fast Gradient Sign Method attack calculates the gradient of the input + data, and then uses the sign of the gradient with random perturbation + to create adversarial noises. References: `F. Tramer, et al., "Ensemble adversarial training: Attacks and defenses," in ICLR, 2018 `_ @@ -387,7 +392,8 @@ class RandomFastGradientSignMethod(FastGradientSignMethod): class LeastLikelyClassMethod(FastGradientSignMethod): """ - Least-Likely Class Method. + The Single Step Least-Likely Class Method, a variant of FGSM, targets the + least-likely class to generate the adversarial examples. References: `F. Tramer, et al., "Ensemble adversarial training: Attacks and defenses," in ICLR, 2018 `_ @@ -435,6 +441,9 @@ class RandomLeastLikelyClassMethod(FastGradientSignMethod): """ Least-Likely Class Method use Random perturbation. + The Single Step Least-Likely Class Method with Random Perturbation, a variant of Random FGSM, + targets the least-likely class to generate the adversarial examples. + References: `F. Tramer, et al., "Ensemble adversarial training: Attacks and defenses," in ICLR, 2018 `_ diff --git a/mindarmour/adv_robustness/attacks/iterative_gradient_method.py b/mindarmour/adv_robustness/attacks/iterative_gradient_method.py index c3c15aa..6567630 100644 --- a/mindarmour/adv_robustness/attacks/iterative_gradient_method.py +++ b/mindarmour/adv_robustness/attacks/iterative_gradient_method.py @@ -257,7 +257,10 @@ class BasicIterativeMethod(IterativeGradientMethod): class MomentumIterativeMethod(IterativeGradientMethod): """ - The Momentum Iterative Method attack. + The Momentum Iterative Method attack accelerates the gradient descent algorithm, + such as FGSM, FGM, and LLCM, by accumulating a velocity vector in the gradient + direction of the loss function across iterations, and thus generates the adversarial examples. + References: `Y. Dong, et al., "Boosting adversarial attacks with momentum," arXiv:1710.06081, 2017 `_ @@ -502,7 +505,9 @@ class ProjectedGradientDescent(BasicIterativeMethod): class DiverseInputIterativeMethod(BasicIterativeMethod): """ - The Diverse Input Iterative Method attack. + The Diverse Input Iterative Method attack follows the basic iterative method, + and applies random transformation to the input data at each iteration. Such transformation + on the input data could improve the transferability of the adversarial examples. References: `Xie, Cihang and Zhang, et al., "Improving Transferability of Adversarial Examples With Input Diversity," in CVPR, 2019 `_ @@ -555,7 +560,10 @@ class DiverseInputIterativeMethod(BasicIterativeMethod): class MomentumDiverseInputIterativeMethod(MomentumIterativeMethod): """ - The Momentum Diverse Input Iterative Method attack. + The Momentum Diverse Input Iterative Method attack is a momentum iterative method, + and applies random transformation to the input data at each iteration. Such transformation + on the input data could improve the transferability of the adversarial examples. + References: `Xie, Cihang and Zhang, et al., "Improving Transferability of Adversarial Examples With Input Diversity," in CVPR, 2019 `_ diff --git a/mindarmour/adv_robustness/attacks/jsma.py b/mindarmour/adv_robustness/attacks/jsma.py index 3101a3a..5f04967 100644 --- a/mindarmour/adv_robustness/attacks/jsma.py +++ b/mindarmour/adv_robustness/attacks/jsma.py @@ -32,8 +32,10 @@ TAG = 'JSMA' class JSMAAttack(Attack): """ - JSMA is an targeted & iterative attack based on saliency map of - input features. + Jacobian-based Saliency Map Attack is a targeted and iterative attack based on saliency + map of the input features. It uses the gradient of loss with each class labels with respect + to every component of the input. Then a saliency map is used to select the dimension which + produces the maximum error. Reference: `The limitations of deep learning in adversarial settings `_ diff --git a/mindarmour/adv_robustness/attacks/lbfgs.py b/mindarmour/adv_robustness/attacks/lbfgs.py index da8d5fd..5fae6ed 100644 --- a/mindarmour/adv_robustness/attacks/lbfgs.py +++ b/mindarmour/adv_robustness/attacks/lbfgs.py @@ -34,7 +34,8 @@ TAG = 'LBFGS' class LBFGS(Attack): """ - Uses L-BFGS-B to minimize the distance between the input and the adversarial example. + In L-BFGS-B attack, the Limited-Memory BFGS optimizaiton algorithm is used + to minimize the distance between the inputs and the adversarial examples. References: `Pedro Tabacof, Eduardo Valle. "Exploring the Space of Adversarial Images" `_ diff --git a/mindarmour/adv_robustness/defenses/adversarial_defense.py b/mindarmour/adv_robustness/defenses/adversarial_defense.py index 11a44a1..9cd133d 100644 --- a/mindarmour/adv_robustness/defenses/adversarial_defense.py +++ b/mindarmour/adv_robustness/defenses/adversarial_defense.py @@ -88,7 +88,8 @@ class AdversarialDefense(Defense): class AdversarialDefenseWithAttacks(AdversarialDefense): """ - Adversarial defense with attacks. + Adversarial training using specific attacking method and the given + adversarial examples to enhance model robustness. Args: network (Cell): A MindSpore network to be defensed. @@ -174,7 +175,8 @@ class AdversarialDefenseWithAttacks(AdversarialDefense): class EnsembleAdversarialDefense(AdversarialDefenseWithAttacks): """ - Ensemble adversarial defense. + Adversarial training using a list of specific attacking methods + and the given adversarial examples to enhance model robustness. Args: network (Cell): A MindSpore network to be defensed. diff --git a/mindarmour/adv_robustness/detectors/black/similarity_detector.py b/mindarmour/adv_robustness/detectors/black/similarity_detector.py index 1db6c5f..47ef8c9 100644 --- a/mindarmour/adv_robustness/detectors/black/similarity_detector.py +++ b/mindarmour/adv_robustness/detectors/black/similarity_detector.py @@ -98,9 +98,7 @@ class SimilarityDetector(Detector): >>> detector.set_threshold(num_nearest_neighbors[-1], thresholds[-1]) >>> detector.detect(benign_queries) >>> detections = detector.get_detection_interval() - >>> detector.detect_diff() >>> detected_queries = detector.get_detected_queries() - >>> detector.transform(x_train) """ def __init__(self, trans_model, max_k_neighbor=1000, chunk_size=1000, diff --git a/mindarmour/adv_robustness/detectors/ensemble_detector.py b/mindarmour/adv_robustness/detectors/ensemble_detector.py index 4fb95be..3a42e55 100644 --- a/mindarmour/adv_robustness/detectors/ensemble_detector.py +++ b/mindarmour/adv_robustness/detectors/ensemble_detector.py @@ -27,7 +27,8 @@ TAG = 'EnsembleDetector' class EnsembleDetector(Detector): """ - Ensemble detector. + The ensemble detector uses a list of detectors to detect the adversarial + examples from the input samples. Args: detectors (Union[tuple, list]): List of detector methods. diff --git a/mindarmour/adv_robustness/detectors/mag_net.py b/mindarmour/adv_robustness/detectors/mag_net.py index 2944cae..f26f583 100644 --- a/mindarmour/adv_robustness/detectors/mag_net.py +++ b/mindarmour/adv_robustness/detectors/mag_net.py @@ -166,7 +166,8 @@ class ErrorBasedDetector(Detector): class DivergenceBasedDetector(ErrorBasedDetector): """ - This class implement a divergence-based detector. + The divergence-based detector learns to distinguish normal and adversarial + examples by their js-divergence. Reference: `MagNet: a Two-Pronged Defense against Adversarial Examples, by Dongyu Meng and Hao Chen, at CCS 2017. diff --git a/mindarmour/adv_robustness/detectors/region_based_detector.py b/mindarmour/adv_robustness/detectors/region_based_detector.py index a1bc603..91af5f4 100644 --- a/mindarmour/adv_robustness/detectors/region_based_detector.py +++ b/mindarmour/adv_robustness/detectors/region_based_detector.py @@ -34,7 +34,9 @@ TAG = 'RegionBasedDetector' class RegionBasedDetector(Detector): """ - This class implement a region-based detector. + The region-based detector uses the fact that adversarial examples are close + to the classification boundary, and ensembles information around the given example + to predict whether it is an adversarial example or not. Reference: `Mitigating evasion attacks to deep neural networks via region-based classification `_ diff --git a/mindarmour/adv_robustness/detectors/spatial_smoothing.py b/mindarmour/adv_robustness/detectors/spatial_smoothing.py index 91a788d..a415612 100644 --- a/mindarmour/adv_robustness/detectors/spatial_smoothing.py +++ b/mindarmour/adv_robustness/detectors/spatial_smoothing.py @@ -38,6 +38,10 @@ def _median_filter_np(inputs, size=2): class SpatialSmoothing(Detector): """ Detect method based on spatial smoothing. + Using Gaussian filtering, median filtering, and mean filtering, to blur + the original image. When the model has a large threshold difference + between the predicted values before and after the sample is blurred, + it is judged as an adversarial example. Args: model (Model): Target model. diff --git a/mindarmour/fuzz_testing/fuzzing.py b/mindarmour/fuzz_testing/fuzzing.py index d6456ce..02e7478 100644 --- a/mindarmour/fuzz_testing/fuzzing.py +++ b/mindarmour/fuzz_testing/fuzzing.py @@ -169,7 +169,7 @@ class Fuzzer: >>> initial_seeds = [] >>> # make initial seeds >>> for img, label in zip(test_images, test_labels): - >>> initial_seeds.append([img, label]) + ... initial_seeds.append([img, label]) >>> initial_seeds = initial_seeds[:10] >>> nc = KMultisectionNeuronCoverage(model, train_images, segmented_num=100, incremental=True) >>> model_fuzz_test = Fuzzer(model) diff --git a/mindarmour/privacy/diff_privacy/mechanisms/mechanisms.py b/mindarmour/privacy/diff_privacy/mechanisms/mechanisms.py index 4d302c4..649715f 100644 --- a/mindarmour/privacy/diff_privacy/mechanisms/mechanisms.py +++ b/mindarmour/privacy/diff_privacy/mechanisms/mechanisms.py @@ -36,6 +36,8 @@ TAG = 'NoiseMechanism' class ClipMechanismsFactory: """ Factory class of clip mechanisms + Wrapper of clip noise generating mechanisms. It supports Adaptive Clipping with + Gaussian Random Noise for now. For details, please check `Tutorial `_ @@ -55,7 +57,7 @@ class ClipMechanismsFactory: learning_rate(float): Learning rate of update norm clip. Default: 0.001. target_unclipped_quantile(float): Target quantile of norm clip. Default: 0.9. fraction_stddev(float): The stddev of Gaussian normal which used in - empirical_fraction, the formula is :math:`empirical fraction + N(0, fraction sstddev)`. + empirical_fraction, the formula is :math:`empirical_fraction + N(0, fraction_stddev)`. Default: 0.01. seed(int): Original random seed, if seed=0 random normal will use secure random number. IF seed!=0 random normal will generate values using @@ -95,6 +97,8 @@ class ClipMechanismsFactory: class NoiseMechanismsFactory: """ Factory class of noise mechanisms + Wrapper of noise generating mechanisms. It supports Gaussian Random Noise and + Adaptive Gaussian Random Noise for now. For details, please check `Tutorial `_ @@ -165,7 +169,8 @@ class _Mechanisms(Cell): class NoiseGaussianRandom(_Mechanisms): """ - Gaussian noise generated mechanism. + Generate noise in Gaussian Distribution with :math:`mean=0` and + :math:`standard deviation = norm_bound * initial_noise_multiplier`. Args: norm_bound(float): Clipping bound for the l2 norm of the gradients. @@ -178,9 +183,6 @@ class NoiseGaussianRandom(_Mechanisms): given seed. Default: 0. decay_policy(str): Mechanisms parameters update policy. Default: None. - Returns: - Tensor, generated noise with shape like given gradients. - Examples: >>> from mindspore import Tensor >>> from mindspore.common import dtype as mstype @@ -230,8 +232,7 @@ class NoiseAdaGaussianRandom(NoiseGaussianRandom): """ Adaptive Gaussian noise generated mechanism. Noise would be decayed with training. Decay mode could be 'Time' mode, 'Step' mode, 'Exp' mode. - `self._noise_multiplier` will be update during the model.train, using - _MechanismsParamsUpdater. + `self._noise_multiplier` will be update during model training process. Args: norm_bound(float): Clipping bound for the l2 norm of the gradients. @@ -247,9 +248,6 @@ class NoiseAdaGaussianRandom(NoiseGaussianRandom): decay_policy(str): Noise decay strategy include 'Step', 'Time', 'Exp'. Default: 'Exp'. - Returns: - Tensor, generated noise with shape like given gradients. - Examples: >>> from mindspore import Tensor >>> from mindspore.common import dtype as mstype @@ -350,10 +348,10 @@ class _MechanismsParamsUpdater(Cell): class AdaClippingWithGaussianRandom(Cell): """ - Adaptive clipping. If `decay_policy` is 'Linear', the update formula :math:`norm bound = norm bound - - learning rate*(beta - target unclipped quantile)`. - If `decay_policy` is 'Geometric', the update formula is :math:`norm bound = - norm bound*exp(-learning rate*(empirical fraction - target unclipped quantile))`. + Adaptive clipping. If `decay_policy` is 'Linear', the update formula :math:`norm_bound = norm_bound - + learning_rate*(beta - target_unclipped_quantile)`. + If `decay_policy` is 'Geometric', the update formula is :math:`norm_bound = + norm_bound*exp(-learning_rate*(empirical_fraction - target_unclipped_quantile))`. where beta is the empirical fraction of samples with the value at most `target_unclipped_quantile`. diff --git a/mindarmour/privacy/diff_privacy/monitor/monitor.py b/mindarmour/privacy/diff_privacy/monitor/monitor.py index 57cfd12..067f10b 100644 --- a/mindarmour/privacy/diff_privacy/monitor/monitor.py +++ b/mindarmour/privacy/diff_privacy/monitor/monitor.py @@ -28,6 +28,8 @@ TAG = 'DP monitor' class PrivacyMonitorFactory: """ Factory class of DP training's privacy monitor. + For details, please check `Tutorial `_ + """ def __init__(self): @@ -38,8 +40,6 @@ class PrivacyMonitorFactory: """ Create a privacy monitor class. - For details, please check `Tutorial `_ - Args: policy (str): Monitor policy, 'rdp' and 'zcdp' are supported by now. If policy is 'rdp', the monitor will compute the @@ -74,11 +74,11 @@ class RDPMonitor(Callback): mechanism is said to have ε'-Renyi differential privacy of order α, it also satisfies conventional differential privacy (ε, δ) as below: - For details, please check `Tutorial `_ - .. math:: (ε'+\frac{log(1/δ)}{α-1}, δ) + For details, please check `Tutorial `_ + Reference: `Rényi Differential Privacy of the Sampled Gaussian Mechanism `_ @@ -363,14 +363,15 @@ class ZCDPMonitor(Callback): if a randomized mechanism is said to have ρ-zCDP, it also satisfies conventional differential privacy (ε, δ) as below: - For details, please check `Tutorial `_ - .. math:: (ρ+2\sqrt{ρ*log(1/δ)}, δ) It should be noted that ZCDPMonitor is not suitable for subsampling noise mechanisms(such as NoiseAdaGaussianRandom and NoiseGaussianRandom). The matching noise mechanism of ZCDP will be developed in the future. + + For details, please check `Tutorial `_ + Reference: `Concentrated Differentially Private Gradient Descent with Adaptive per-Iteration Privacy Budget `_ diff --git a/mindarmour/privacy/diff_privacy/train/model.py b/mindarmour/privacy/diff_privacy/train/model.py index faade7f..058ed1e 100644 --- a/mindarmour/privacy/diff_privacy/train/model.py +++ b/mindarmour/privacy/diff_privacy/train/model.py @@ -67,6 +67,7 @@ def tensor_grad_scale(scale, grad): class DPModel(Model): """ + DPModel is used for constructing a model for differential privacy training. This class is overload mindspore.train.model.Model. For details, please check `Tutorial `_ @@ -82,7 +83,7 @@ class DPModel(Model): Default: None. Raises: - ValueError: If DPOptimizer and noise_mecn are both None or not None. + ValueError: If DPOptimizer and noise_mech are both None or not None. ValueError: If noise_mech or DPOtimizer's mech method is adaptive while clip_mech is not None. """ diff --git a/mindarmour/privacy/evaluation/attacker.py b/mindarmour/privacy/evaluation/attacker.py index 5aea26c..496110c 100644 --- a/mindarmour/privacy/evaluation/attacker.py +++ b/mindarmour/privacy/evaluation/attacker.py @@ -139,7 +139,7 @@ def _get_attack_model(features, labels, config, n_jobs=-1): sklearn.BaseEstimator, trained model specify by config["method"]. Examples: - >>> from mindarmour.privacy.evaluation.attacker import get_attack_model + >>> from mindarmour.privacy.evaluation.attacker import _get_attack_model >>> features = np.random.randn(10, 10) >>> labels = np.random.randint(0, 2, 10) >>> config = {"method": "knn", "params": {"n_neighbors": [3, 5]}} diff --git a/mindarmour/privacy/evaluation/membership_inference.py b/mindarmour/privacy/evaluation/membership_inference.py index fdb8acf..0bd08e8 100644 --- a/mindarmour/privacy/evaluation/membership_inference.py +++ b/mindarmour/privacy/evaluation/membership_inference.py @@ -94,8 +94,11 @@ def _softmax_cross_entropy(logits, labels, epsilon=1e-12): class MembershipInference: """ - Evaluation proposed by Shokri, Stronati, Song and Shmatikov is a grey-box attack. - The attack requires loss or logits results of training samples. + Proposed by Shokri, Stronati, Song and Shmatikov, membership inference is a grey-box attack + for inferring user's privacy data. It requires loss or logits results of the training samples. + (Privacy refers to some sensitive attributes of a single user). + + For details, please refer to the `Tutorial `_ References: `Reza Shokri, Marco Stronati, Congzheng Song, Vitaly Shmatikov. Membership Inference Attacks against Machine Learning Models. 2017. diff --git a/mindarmour/privacy/sup_privacy/mask_monitor/masker.py b/mindarmour/privacy/sup_privacy/mask_monitor/masker.py index ef7f906..1a54ae9 100644 --- a/mindarmour/privacy/sup_privacy/mask_monitor/masker.py +++ b/mindarmour/privacy/sup_privacy/mask_monitor/masker.py @@ -25,6 +25,7 @@ TAG = 'suppress masker' class SuppressMasker(Callback): """ + Periodicity check suppress privacy function status and toggle suppress operation. For details, please check `Tutorial `_ Args: @@ -33,7 +34,6 @@ class SuppressMasker(Callback): Examples: >>> import mindspore.nn as nn - >>> import mindspore.dataset as ds >>> import mindspore.ops.operations as P >>> from mindspore import context >>> from mindspore.nn import Accuracy diff --git a/mindarmour/privacy/sup_privacy/sup_ctrl/conctrl.py b/mindarmour/privacy/sup_privacy/sup_ctrl/conctrl.py index ec4acd4..7c15eb3 100644 --- a/mindarmour/privacy/sup_privacy/sup_ctrl/conctrl.py +++ b/mindarmour/privacy/sup_privacy/sup_ctrl/conctrl.py @@ -63,7 +63,6 @@ class SuppressPrivacyFactory: Examples: >>> import mindspore.nn as nn - >>> import mindspore.dataset as ds >>> import mindspore.ops.operations as P >>> from mindspore import context >>> from mindspore.nn import Accuracy @@ -113,6 +112,10 @@ class SuppressPrivacyFactory: class SuppressCtrl(Cell): """ + Complete suppress privacy operation, including computing suppress ration, + finding the parameters that should be suppressed, and suppress these + parameters permanently. + For details, please check `Tutorial `_ Args: diff --git a/mindarmour/privacy/sup_privacy/train/model.py b/mindarmour/privacy/sup_privacy/train/model.py index 3b06a8d..ac49434 100644 --- a/mindarmour/privacy/sup_privacy/train/model.py +++ b/mindarmour/privacy/sup_privacy/train/model.py @@ -56,7 +56,9 @@ def tensor_grad_scale(scale, grad): class SuppressModel(Model): """ - This class is overload mindspore.train.model.Model. + Complete model train function. The suppress privacy function is embedded into the overload + mindspore.train.model.Model. + For details, please check `Tutorial `_ Args: diff --git a/mindarmour/reliability/concept_drift/concept_drift_check_images.py b/mindarmour/reliability/concept_drift/concept_drift_check_images.py index 2967561..3a6873e 100644 --- a/mindarmour/reliability/concept_drift/concept_drift_check_images.py +++ b/mindarmour/reliability/concept_drift/concept_drift_check_images.py @@ -25,7 +25,7 @@ from mindarmour.utils._check_param import check_param_type, check_param_in_range class OodDetector: """ - Train the OOD detector. + The abstract class of the out-of-distribution detector. Args: model (Model):The training model. @@ -55,7 +55,9 @@ class OodDetector: def get_optimal_threshold(self, label, ds_eval): """ - Get the optimal threshold. + Get the optimal threshold. Try to find an optimal threshold value to + detect OOD examples. The optimal threshold is calculated by a labeled + dateset `ds_eval`. Args: label (numpy.ndarray): The label whether an image is in-distribution and out-of-distribution. @@ -67,7 +69,9 @@ class OodDetector: def ood_predict(self, threshold, ds_test): """ - The out-of-distribution detection. + The out-of-distribution detection. This function aims to detect whether images, + regarded as `ds_test`, are OOD examples or not. If the prediction score of one + image is larger than `threshold`, this image is out-of-distribution. Args: threshold (float): the threshold to judge ood data. One can set value by experience @@ -174,7 +178,9 @@ class OodDetectorFeatureCluster(OodDetector): def get_optimal_threshold(self, label, ds_eval): """ - Get the optimal threshold. + Get the optimal threshold. Try to find an optimal threshold value to + detect OOD examples. The optimal threshold is calculated by a labeled + dateset `ds_eval`. Args: label (numpy.ndarray): The label whether an image is in-distribution and out-of-distribution. @@ -204,7 +210,9 @@ class OodDetectorFeatureCluster(OodDetector): def ood_predict(self, threshold, ds_test): """ - The out-of-distribution detection. + The out-of-distribution detection. This function aims to detect whether images, + regarded as `ds_test`, are OOD examples or not. If the prediction score of one + image is larger than `threshold`, this image is out-of-distribution. Args: threshold (float): the threshold to judge ood data. One can set value by experience diff --git a/mindarmour/reliability/model_fault_injection/fault_injection.py b/mindarmour/reliability/model_fault_injection/fault_injection.py index d917755..7730757 100644 --- a/mindarmour/reliability/model_fault_injection/fault_injection.py +++ b/mindarmour/reliability/model_fault_injection/fault_injection.py @@ -28,7 +28,9 @@ TAG = 'FaultInjector' class FaultInjector: """ - Fault injection for deep neural networks and evaluate performance. + Fault injection module simulates various fault scenarios for deep neural networks and evaluates + performance and reliability of the model. + For details, please check `Tutorial `_ Args: @@ -40,7 +42,6 @@ class FaultInjector: Examples: >>> from mindspore import Model - >>> import mindspore.dataset as ds >>> import mindspore.ops.operations as P >>> from mindarmour.reliability.model_fault_injection.fault_injection import FaultInjector >>> class Net(nn.Cell): @@ -64,7 +65,7 @@ class FaultInjector: >>> model = Model(net) >>> ds_eval = ds.GeneratorDataset(dataset_generator, ['image', 'label']) >>> fi_type = ['bitflips_random', 'bitflips_designated', 'random', 'zeros', - 'nan', 'inf', 'anti_activation', 'precision_loss'] + ... 'nan', 'inf', 'anti_activation', 'precision_loss'] >>> fi_mode = ['single_layer', 'all_layer'] >>> fi_size = [1] >>> fi = FaultInjector(model, ds_eval, fi_type, fi_mode, fi_size) diff --git a/mindarmour/utils/logger.py b/mindarmour/utils/logger.py index 37ddb9f..f49aeae 100644 --- a/mindarmour/utils/logger.py +++ b/mindarmour/utils/logger.py @@ -30,6 +30,8 @@ class LogUtil: """ Logging module. + Recording the logging statistics over time in long-running scripts. + Raises: SyntaxError: If create this class. """ diff --git a/mindarmour/utils/util.py b/mindarmour/utils/util.py index a000ad0..b851365 100644 --- a/mindarmour/utils/util.py +++ b/mindarmour/utils/util.py @@ -216,6 +216,7 @@ class GradWrap(Cell): ... out = self._softmax(inputs) ... out = self._Dense(out) ... return self._squeeze(out) + >>> net = Net() >>> data = Tensor(np.ones([2, 1, 10]).astype(np.float32)*0.01) >>> labels = Tensor(np.ones([2, 10]).astype(np.float32)) >>> num_classes = 10