@@ -1,2 +1,2 @@ | |||
mindspore: | |||
'mindspore/mindspore/daily/202203/20220320/master_20220320041531_3e442945369de2d9dd20e9e2e9d3c7524a128ee7_newest/' | |||
'mindspore/mindspore/version/202203/20220323/master_20220323111039_5c9f2e6d5dd9c7da631461b5003bbf5920d5f792/' |
@@ -31,6 +31,7 @@ TAG = 'Attack' | |||
class Attack: | |||
""" | |||
The abstract base class for all attack classes creating adversarial examples. | |||
The adversarial examples are generated by adding adversarial noises to the original sample. | |||
""" | |||
def __init__(self): | |||
pass | |||
@@ -142,7 +142,7 @@ class NES(Attack): | |||
def generate(self, inputs, labels): | |||
""" | |||
Main algorithm for NES. | |||
Generate adversarial examples based on input data and target labels. | |||
Args: | |||
inputs (numpy.ndarray): Benign input samples. | |||
@@ -58,7 +58,9 @@ def _best_logits_of_other_class(logits, target_class, value=1): | |||
class CarliniWagnerL2Attack(Attack): | |||
""" | |||
The Carlini & Wagner attack using L2 norm. | |||
The Carlini & Wagner attack using L2 norm generates the adversarial examples | |||
by utilizing two separate losses: an adversarial loss to make the generated example | |||
actually adversarial, and a distance loss to constraint the quality of the adversarial example. | |||
References: `Nicholas Carlini, David Wagner: "Towards Evaluating | |||
the Robustness of Neural Networks" <https://arxiv.org/abs/1608.04644>`_ | |||
@@ -204,6 +204,8 @@ class FastGradientMethod(GradientMethod): | |||
class RandomFastGradientMethod(FastGradientMethod): | |||
""" | |||
Fast Gradient Method use Random perturbation. | |||
An one-step attack based on gradients calculation. The adversarial noises | |||
are generated based on the gradients of inputs, and then randomly perturbed. | |||
References: `Florian Tramer, Alexey Kurakin, Nicolas Papernot, "Ensemble | |||
adversarial training: Attacks and defenses" in ICLR, 2018 | |||
@@ -260,8 +262,8 @@ class RandomFastGradientMethod(FastGradientMethod): | |||
class FastGradientSignMethod(GradientMethod): | |||
""" | |||
Use the sign instead of the value of the gradient to the input. This attack is | |||
often referred to as Fast Gradient Sign Method and was introduced previously. | |||
The Fast Gradient Sign Method attack calculates the gradient of the input | |||
data, and then uses the sign of the gradient to create adversarial noises. | |||
References: `Ian J. Goodfellow, J. Shlens, and C. Szegedy, "Explaining | |||
and harnessing adversarial examples," in ICLR, 2015 | |||
@@ -335,6 +337,9 @@ class FastGradientSignMethod(GradientMethod): | |||
class RandomFastGradientSignMethod(FastGradientSignMethod): | |||
""" | |||
Fast Gradient Sign Method using random perturbation. | |||
The Random Fast Gradient Sign Method attack calculates the gradient of the input | |||
data, and then uses the sign of the gradient with random perturbation | |||
to create adversarial noises. | |||
References: `F. Tramer, et al., "Ensemble adversarial training: Attacks | |||
and defenses," in ICLR, 2018 <https://arxiv.org/abs/1705.07204>`_ | |||
@@ -387,7 +392,8 @@ class RandomFastGradientSignMethod(FastGradientSignMethod): | |||
class LeastLikelyClassMethod(FastGradientSignMethod): | |||
""" | |||
Least-Likely Class Method. | |||
The Single Step Least-Likely Class Method, a variant of FGSM, targets the | |||
least-likely class to generate the adversarial examples. | |||
References: `F. Tramer, et al., "Ensemble adversarial training: Attacks | |||
and defenses," in ICLR, 2018 <https://arxiv.org/abs/1705.07204>`_ | |||
@@ -435,6 +441,9 @@ class RandomLeastLikelyClassMethod(FastGradientSignMethod): | |||
""" | |||
Least-Likely Class Method use Random perturbation. | |||
The Single Step Least-Likely Class Method with Random Perturbation, a variant of Random FGSM, | |||
targets the least-likely class to generate the adversarial examples. | |||
References: `F. Tramer, et al., "Ensemble adversarial training: Attacks | |||
and defenses," in ICLR, 2018 <https://arxiv.org/abs/1705.07204>`_ | |||
@@ -257,7 +257,10 @@ class BasicIterativeMethod(IterativeGradientMethod): | |||
class MomentumIterativeMethod(IterativeGradientMethod): | |||
""" | |||
The Momentum Iterative Method attack. | |||
The Momentum Iterative Method attack accelerates the gradient descent algorithm, | |||
such as FGSM, FGM, and LLCM, by accumulating a velocity vector in the gradient | |||
direction of the loss function across iterations, and thus generates the adversarial examples. | |||
References: `Y. Dong, et al., "Boosting adversarial attacks with | |||
momentum," arXiv:1710.06081, 2017 <https://arxiv.org/abs/1710.06081>`_ | |||
@@ -502,7 +505,9 @@ class ProjectedGradientDescent(BasicIterativeMethod): | |||
class DiverseInputIterativeMethod(BasicIterativeMethod): | |||
""" | |||
The Diverse Input Iterative Method attack. | |||
The Diverse Input Iterative Method attack follows the basic iterative method, | |||
and applies random transformation to the input data at each iteration. Such transformation | |||
on the input data could improve the transferability of the adversarial examples. | |||
References: `Xie, Cihang and Zhang, et al., "Improving Transferability of | |||
Adversarial Examples With Input Diversity," in CVPR, 2019 <https://arxiv.org/abs/1803.06978>`_ | |||
@@ -555,7 +560,10 @@ class DiverseInputIterativeMethod(BasicIterativeMethod): | |||
class MomentumDiverseInputIterativeMethod(MomentumIterativeMethod): | |||
""" | |||
The Momentum Diverse Input Iterative Method attack. | |||
The Momentum Diverse Input Iterative Method attack is a momentum iterative method, | |||
and applies random transformation to the input data at each iteration. Such transformation | |||
on the input data could improve the transferability of the adversarial examples. | |||
References: `Xie, Cihang and Zhang, et al., "Improving Transferability of | |||
Adversarial Examples With Input Diversity," in CVPR, 2019 <https://arxiv.org/abs/1803.06978>`_ | |||
@@ -32,8 +32,10 @@ TAG = 'JSMA' | |||
class JSMAAttack(Attack): | |||
""" | |||
JSMA is an targeted & iterative attack based on saliency map of | |||
input features. | |||
Jacobian-based Saliency Map Attack is a targeted and iterative attack based on saliency | |||
map of the input features. It uses the gradient of loss with each class labels with respect | |||
to every component of the input. Then a saliency map is used to select the dimension which | |||
produces the maximum error. | |||
Reference: `The limitations of deep learning in adversarial settings | |||
<https://arxiv.org/abs/1511.07528>`_ | |||
@@ -34,7 +34,8 @@ TAG = 'LBFGS' | |||
class LBFGS(Attack): | |||
""" | |||
Uses L-BFGS-B to minimize the distance between the input and the adversarial example. | |||
In L-BFGS-B attack, the Limited-Memory BFGS optimizaiton algorithm is used | |||
to minimize the distance between the inputs and the adversarial examples. | |||
References: `Pedro Tabacof, Eduardo Valle. "Exploring the Space of | |||
Adversarial Images" <https://arxiv.org/abs/1510.05328>`_ | |||
@@ -88,7 +88,8 @@ class AdversarialDefense(Defense): | |||
class AdversarialDefenseWithAttacks(AdversarialDefense): | |||
""" | |||
Adversarial defense with attacks. | |||
Adversarial training using specific attacking method and the given | |||
adversarial examples to enhance model robustness. | |||
Args: | |||
network (Cell): A MindSpore network to be defensed. | |||
@@ -174,7 +175,8 @@ class AdversarialDefenseWithAttacks(AdversarialDefense): | |||
class EnsembleAdversarialDefense(AdversarialDefenseWithAttacks): | |||
""" | |||
Ensemble adversarial defense. | |||
Adversarial training using a list of specific attacking methods | |||
and the given adversarial examples to enhance model robustness. | |||
Args: | |||
network (Cell): A MindSpore network to be defensed. | |||
@@ -98,9 +98,7 @@ class SimilarityDetector(Detector): | |||
>>> detector.set_threshold(num_nearest_neighbors[-1], thresholds[-1]) | |||
>>> detector.detect(benign_queries) | |||
>>> detections = detector.get_detection_interval() | |||
>>> detector.detect_diff() | |||
>>> detected_queries = detector.get_detected_queries() | |||
>>> detector.transform(x_train) | |||
""" | |||
def __init__(self, trans_model, max_k_neighbor=1000, chunk_size=1000, | |||
@@ -27,7 +27,8 @@ TAG = 'EnsembleDetector' | |||
class EnsembleDetector(Detector): | |||
""" | |||
Ensemble detector. | |||
The ensemble detector uses a list of detectors to detect the adversarial | |||
examples from the input samples. | |||
Args: | |||
detectors (Union[tuple, list]): List of detector methods. | |||
@@ -166,7 +166,8 @@ class ErrorBasedDetector(Detector): | |||
class DivergenceBasedDetector(ErrorBasedDetector): | |||
""" | |||
This class implement a divergence-based detector. | |||
The divergence-based detector learns to distinguish normal and adversarial | |||
examples by their js-divergence. | |||
Reference: `MagNet: a Two-Pronged Defense against Adversarial Examples, | |||
by Dongyu Meng and Hao Chen, at CCS 2017. | |||
@@ -34,7 +34,9 @@ TAG = 'RegionBasedDetector' | |||
class RegionBasedDetector(Detector): | |||
""" | |||
This class implement a region-based detector. | |||
The region-based detector uses the fact that adversarial examples are close | |||
to the classification boundary, and ensembles information around the given example | |||
to predict whether it is an adversarial example or not. | |||
Reference: `Mitigating evasion attacks to deep neural networks via | |||
region-based classification <https://arxiv.org/abs/1709.05583>`_ | |||
@@ -38,6 +38,10 @@ def _median_filter_np(inputs, size=2): | |||
class SpatialSmoothing(Detector): | |||
""" | |||
Detect method based on spatial smoothing. | |||
Using Gaussian filtering, median filtering, and mean filtering, to blur | |||
the original image. When the model has a large threshold difference | |||
between the predicted values before and after the sample is blurred, | |||
it is judged as an adversarial example. | |||
Args: | |||
model (Model): Target model. | |||
@@ -169,7 +169,7 @@ class Fuzzer: | |||
>>> initial_seeds = [] | |||
>>> # make initial seeds | |||
>>> for img, label in zip(test_images, test_labels): | |||
>>> initial_seeds.append([img, label]) | |||
... initial_seeds.append([img, label]) | |||
>>> initial_seeds = initial_seeds[:10] | |||
>>> nc = KMultisectionNeuronCoverage(model, train_images, segmented_num=100, incremental=True) | |||
>>> model_fuzz_test = Fuzzer(model) | |||
@@ -36,6 +36,8 @@ TAG = 'NoiseMechanism' | |||
class ClipMechanismsFactory: | |||
""" | |||
Factory class of clip mechanisms | |||
Wrapper of clip noise generating mechanisms. It supports Adaptive Clipping with | |||
Gaussian Random Noise for now. | |||
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_differential_privacy.html#%E5%B7%AE%E5%88%86%E9%9A%90%E7%A7%81>`_ | |||
@@ -55,7 +57,7 @@ class ClipMechanismsFactory: | |||
learning_rate(float): Learning rate of update norm clip. Default: 0.001. | |||
target_unclipped_quantile(float): Target quantile of norm clip. Default: 0.9. | |||
fraction_stddev(float): The stddev of Gaussian normal which used in | |||
empirical_fraction, the formula is :math:`empirical fraction + N(0, fraction sstddev)`. | |||
empirical_fraction, the formula is :math:`empirical_fraction + N(0, fraction_stddev)`. | |||
Default: 0.01. | |||
seed(int): Original random seed, if seed=0 random normal will use secure | |||
random number. IF seed!=0 random normal will generate values using | |||
@@ -95,6 +97,8 @@ class ClipMechanismsFactory: | |||
class NoiseMechanismsFactory: | |||
""" Factory class of noise mechanisms | |||
Wrapper of noise generating mechanisms. It supports Gaussian Random Noise and | |||
Adaptive Gaussian Random Noise for now. | |||
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_differential_privacy.html#%E5%B7%AE%E5%88%86%E9%9A%90%E7%A7%81>`_ | |||
@@ -165,7 +169,8 @@ class _Mechanisms(Cell): | |||
class NoiseGaussianRandom(_Mechanisms): | |||
""" | |||
Gaussian noise generated mechanism. | |||
Generate noise in Gaussian Distribution with :math:`mean=0` and | |||
:math:`standard deviation = norm_bound * initial_noise_multiplier`. | |||
Args: | |||
norm_bound(float): Clipping bound for the l2 norm of the gradients. | |||
@@ -178,9 +183,6 @@ class NoiseGaussianRandom(_Mechanisms): | |||
given seed. Default: 0. | |||
decay_policy(str): Mechanisms parameters update policy. Default: None. | |||
Returns: | |||
Tensor, generated noise with shape like given gradients. | |||
Examples: | |||
>>> from mindspore import Tensor | |||
>>> from mindspore.common import dtype as mstype | |||
@@ -230,8 +232,7 @@ class NoiseAdaGaussianRandom(NoiseGaussianRandom): | |||
""" | |||
Adaptive Gaussian noise generated mechanism. Noise would be decayed with | |||
training. Decay mode could be 'Time' mode, 'Step' mode, 'Exp' mode. | |||
`self._noise_multiplier` will be update during the model.train, using | |||
_MechanismsParamsUpdater. | |||
`self._noise_multiplier` will be update during model training process. | |||
Args: | |||
norm_bound(float): Clipping bound for the l2 norm of the gradients. | |||
@@ -247,9 +248,6 @@ class NoiseAdaGaussianRandom(NoiseGaussianRandom): | |||
decay_policy(str): Noise decay strategy include 'Step', 'Time', 'Exp'. | |||
Default: 'Exp'. | |||
Returns: | |||
Tensor, generated noise with shape like given gradients. | |||
Examples: | |||
>>> from mindspore import Tensor | |||
>>> from mindspore.common import dtype as mstype | |||
@@ -350,10 +348,10 @@ class _MechanismsParamsUpdater(Cell): | |||
class AdaClippingWithGaussianRandom(Cell): | |||
""" | |||
Adaptive clipping. If `decay_policy` is 'Linear', the update formula :math:`norm bound = norm bound - | |||
learning rate*(beta - target unclipped quantile)`. | |||
If `decay_policy` is 'Geometric', the update formula is :math:`norm bound = | |||
norm bound*exp(-learning rate*(empirical fraction - target unclipped quantile))`. | |||
Adaptive clipping. If `decay_policy` is 'Linear', the update formula :math:`norm_bound = norm_bound - | |||
learning_rate*(beta - target_unclipped_quantile)`. | |||
If `decay_policy` is 'Geometric', the update formula is :math:`norm_bound = | |||
norm_bound*exp(-learning_rate*(empirical_fraction - target_unclipped_quantile))`. | |||
where beta is the empirical fraction of samples with the value at most | |||
`target_unclipped_quantile`. | |||
@@ -28,6 +28,8 @@ TAG = 'DP monitor' | |||
class PrivacyMonitorFactory: | |||
""" | |||
Factory class of DP training's privacy monitor. | |||
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_differential_privacy.html#%E5%B7%AE%E5%88%86%E9%9A%90%E7%A7%81>`_ | |||
""" | |||
def __init__(self): | |||
@@ -38,8 +40,6 @@ class PrivacyMonitorFactory: | |||
""" | |||
Create a privacy monitor class. | |||
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_differential_privacy.html#%E5%B7%AE%E5%88%86%E9%9A%90%E7%A7%81>`_ | |||
Args: | |||
policy (str): Monitor policy, 'rdp' and 'zcdp' are supported | |||
by now. If policy is 'rdp', the monitor will compute the | |||
@@ -74,11 +74,11 @@ class RDPMonitor(Callback): | |||
mechanism is said to have ε'-Renyi differential privacy of order α, it | |||
also satisfies conventional differential privacy (ε, δ) as below: | |||
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_differential_privacy.html#%E5%B7%AE%E5%88%86%E9%9A%90%E7%A7%81>`_ | |||
.. math:: | |||
(ε'+\frac{log(1/δ)}{α-1}, δ) | |||
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_differential_privacy.html#%E5%B7%AE%E5%88%86%E9%9A%90%E7%A7%81>`_ | |||
Reference: `Rényi Differential Privacy of the Sampled Gaussian Mechanism | |||
<https://arxiv.org/abs/1908.10530>`_ | |||
@@ -363,14 +363,15 @@ class ZCDPMonitor(Callback): | |||
if a randomized mechanism is said to have ρ-zCDP, it also satisfies | |||
conventional differential privacy (ε, δ) as below: | |||
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_differential_privacy.html#%E5%B7%AE%E5%88%86%E9%9A%90%E7%A7%81>`_ | |||
.. math:: | |||
(ρ+2\sqrt{ρ*log(1/δ)}, δ) | |||
It should be noted that ZCDPMonitor is not suitable for subsampling | |||
noise mechanisms(such as NoiseAdaGaussianRandom and NoiseGaussianRandom). | |||
The matching noise mechanism of ZCDP will be developed in the future. | |||
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_differential_privacy.html#%E5%B7%AE%E5%88%86%E9%9A%90%E7%A7%81>`_ | |||
Reference: `Concentrated Differentially Private Gradient Descent with | |||
Adaptive per-Iteration Privacy Budget <https://arxiv.org/abs/1808.09501>`_ | |||
@@ -67,6 +67,7 @@ def tensor_grad_scale(scale, grad): | |||
class DPModel(Model): | |||
""" | |||
DPModel is used for constructing a model for differential privacy training. | |||
This class is overload mindspore.train.model.Model. | |||
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_differential_privacy.html#%E5%B7%AE%E5%88%86%E9%9A%90%E7%A7%81>`_ | |||
@@ -82,7 +83,7 @@ class DPModel(Model): | |||
Default: None. | |||
Raises: | |||
ValueError: If DPOptimizer and noise_mecn are both None or not None. | |||
ValueError: If DPOptimizer and noise_mech are both None or not None. | |||
ValueError: If noise_mech or DPOtimizer's mech method is adaptive while clip_mech is not None. | |||
""" | |||
@@ -139,7 +139,7 @@ def _get_attack_model(features, labels, config, n_jobs=-1): | |||
sklearn.BaseEstimator, trained model specify by config["method"]. | |||
Examples: | |||
>>> from mindarmour.privacy.evaluation.attacker import get_attack_model | |||
>>> from mindarmour.privacy.evaluation.attacker import _get_attack_model | |||
>>> features = np.random.randn(10, 10) | |||
>>> labels = np.random.randint(0, 2, 10) | |||
>>> config = {"method": "knn", "params": {"n_neighbors": [3, 5]}} | |||
@@ -94,8 +94,11 @@ def _softmax_cross_entropy(logits, labels, epsilon=1e-12): | |||
class MembershipInference: | |||
""" | |||
Evaluation proposed by Shokri, Stronati, Song and Shmatikov is a grey-box attack. | |||
The attack requires loss or logits results of training samples. | |||
Proposed by Shokri, Stronati, Song and Shmatikov, membership inference is a grey-box attack | |||
for inferring user's privacy data. It requires loss or logits results of the training samples. | |||
(Privacy refers to some sensitive attributes of a single user). | |||
For details, please refer to the `Tutorial <https://mindspore.cn/mindarmour/docs/en/master/test_model_security_membership_inference.html>`_ | |||
References: `Reza Shokri, Marco Stronati, Congzheng Song, Vitaly Shmatikov. | |||
Membership Inference Attacks against Machine Learning Models. 2017. | |||
@@ -25,6 +25,7 @@ TAG = 'suppress masker' | |||
class SuppressMasker(Callback): | |||
""" | |||
Periodicity check suppress privacy function status and toggle suppress operation. | |||
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_suppress_privacy.html#%E5%BC%95%E5%85%A5%E6%8A%91%E5%88%B6%E9%9A%90%E7%A7%81%E8%AE%AD%E7%BB%83>`_ | |||
Args: | |||
@@ -33,7 +34,6 @@ class SuppressMasker(Callback): | |||
Examples: | |||
>>> import mindspore.nn as nn | |||
>>> import mindspore.dataset as ds | |||
>>> import mindspore.ops.operations as P | |||
>>> from mindspore import context | |||
>>> from mindspore.nn import Accuracy | |||
@@ -63,7 +63,6 @@ class SuppressPrivacyFactory: | |||
Examples: | |||
>>> import mindspore.nn as nn | |||
>>> import mindspore.dataset as ds | |||
>>> import mindspore.ops.operations as P | |||
>>> from mindspore import context | |||
>>> from mindspore.nn import Accuracy | |||
@@ -113,6 +112,10 @@ class SuppressPrivacyFactory: | |||
class SuppressCtrl(Cell): | |||
""" | |||
Complete suppress privacy operation, including computing suppress ration, | |||
finding the parameters that should be suppressed, and suppress these | |||
parameters permanently. | |||
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_suppress_privacy.html#%E5%BC%95%E5%85%A5%E6%8A%91%E5%88%B6%E9%9A%90%E7%A7%81%E8%AE%AD%E7%BB%83>`_ | |||
Args: | |||
@@ -56,7 +56,9 @@ def tensor_grad_scale(scale, grad): | |||
class SuppressModel(Model): | |||
""" | |||
This class is overload mindspore.train.model.Model. | |||
Complete model train function. The suppress privacy function is embedded into the overload | |||
mindspore.train.model.Model. | |||
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_suppress_privacy.html>`_ | |||
Args: | |||
@@ -25,7 +25,7 @@ from mindarmour.utils._check_param import check_param_type, check_param_in_range | |||
class OodDetector: | |||
""" | |||
Train the OOD detector. | |||
The abstract class of the out-of-distribution detector. | |||
Args: | |||
model (Model):The training model. | |||
@@ -55,7 +55,9 @@ class OodDetector: | |||
def get_optimal_threshold(self, label, ds_eval): | |||
""" | |||
Get the optimal threshold. | |||
Get the optimal threshold. Try to find an optimal threshold value to | |||
detect OOD examples. The optimal threshold is calculated by a labeled | |||
dateset `ds_eval`. | |||
Args: | |||
label (numpy.ndarray): The label whether an image is in-distribution and out-of-distribution. | |||
@@ -67,7 +69,9 @@ class OodDetector: | |||
def ood_predict(self, threshold, ds_test): | |||
""" | |||
The out-of-distribution detection. | |||
The out-of-distribution detection. This function aims to detect whether images, | |||
regarded as `ds_test`, are OOD examples or not. If the prediction score of one | |||
image is larger than `threshold`, this image is out-of-distribution. | |||
Args: | |||
threshold (float): the threshold to judge ood data. One can set value by experience | |||
@@ -174,7 +178,9 @@ class OodDetectorFeatureCluster(OodDetector): | |||
def get_optimal_threshold(self, label, ds_eval): | |||
""" | |||
Get the optimal threshold. | |||
Get the optimal threshold. Try to find an optimal threshold value to | |||
detect OOD examples. The optimal threshold is calculated by a labeled | |||
dateset `ds_eval`. | |||
Args: | |||
label (numpy.ndarray): The label whether an image is in-distribution and out-of-distribution. | |||
@@ -204,7 +210,9 @@ class OodDetectorFeatureCluster(OodDetector): | |||
def ood_predict(self, threshold, ds_test): | |||
""" | |||
The out-of-distribution detection. | |||
The out-of-distribution detection. This function aims to detect whether images, | |||
regarded as `ds_test`, are OOD examples or not. If the prediction score of one | |||
image is larger than `threshold`, this image is out-of-distribution. | |||
Args: | |||
threshold (float): the threshold to judge ood data. One can set value by experience | |||
@@ -28,7 +28,9 @@ TAG = 'FaultInjector' | |||
class FaultInjector: | |||
""" | |||
Fault injection for deep neural networks and evaluate performance. | |||
Fault injection module simulates various fault scenarios for deep neural networks and evaluates | |||
performance and reliability of the model. | |||
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/fault_injection.html>`_ | |||
Args: | |||
@@ -40,7 +42,6 @@ class FaultInjector: | |||
Examples: | |||
>>> from mindspore import Model | |||
>>> import mindspore.dataset as ds | |||
>>> import mindspore.ops.operations as P | |||
>>> from mindarmour.reliability.model_fault_injection.fault_injection import FaultInjector | |||
>>> class Net(nn.Cell): | |||
@@ -64,7 +65,7 @@ class FaultInjector: | |||
>>> model = Model(net) | |||
>>> ds_eval = ds.GeneratorDataset(dataset_generator, ['image', 'label']) | |||
>>> fi_type = ['bitflips_random', 'bitflips_designated', 'random', 'zeros', | |||
'nan', 'inf', 'anti_activation', 'precision_loss'] | |||
... 'nan', 'inf', 'anti_activation', 'precision_loss'] | |||
>>> fi_mode = ['single_layer', 'all_layer'] | |||
>>> fi_size = [1] | |||
>>> fi = FaultInjector(model, ds_eval, fi_type, fi_mode, fi_size) | |||
@@ -30,6 +30,8 @@ class LogUtil: | |||
""" | |||
Logging module. | |||
Recording the logging statistics over time in long-running scripts. | |||
Raises: | |||
SyntaxError: If create this class. | |||
""" | |||
@@ -216,6 +216,7 @@ class GradWrap(Cell): | |||
... out = self._softmax(inputs) | |||
... out = self._Dense(out) | |||
... return self._squeeze(out) | |||
>>> net = Net() | |||
>>> data = Tensor(np.ones([2, 1, 10]).astype(np.float32)*0.01) | |||
>>> labels = Tensor(np.ones([2, 10]).astype(np.float32)) | |||
>>> num_classes = 10 | |||