Browse Source

!343 Fix Math Formula Issue

Merge pull request !343 from 张澍坤/master
tags/v1.8.0
i-robot Gitee 3 years ago
parent
commit
475589f98a
No known key found for this signature in database GPG Key ID: 173E9B9CA92EEF8F
11 changed files with 31 additions and 40 deletions
  1. +0
    -12
      mindarmour/adv_robustness/evaluations/defense_evaluation.py
  2. +11
    -11
      mindarmour/privacy/diff_privacy/mechanisms/mechanisms.py
  3. +3
    -3
      mindarmour/privacy/diff_privacy/monitor/monitor.py
  4. +1
    -1
      mindarmour/privacy/diff_privacy/train/model.py
  5. +1
    -1
      mindarmour/privacy/evaluation/membership_inference.py
  6. +1
    -1
      mindarmour/privacy/sup_privacy/mask_monitor/masker.py
  7. +6
    -4
      mindarmour/privacy/sup_privacy/sup_ctrl/conctrl.py
  8. +1
    -1
      mindarmour/privacy/sup_privacy/train/model.py
  9. +1
    -1
      mindarmour/reliability/concept_drift/concept_drift_check_images.py
  10. +1
    -1
      mindarmour/reliability/concept_drift/concept_drift_check_time_series.py
  11. +5
    -4
      mindarmour/reliability/model_fault_injection/fault_injection.py

+ 0
- 12
mindarmour/adv_robustness/evaluations/defense_evaluation.py View File

@@ -70,9 +70,6 @@ class DefenseEvaluate:

Returns:
float, the higher, the more successful the defense is.

Examples:
>>> def_eval.cav()
"""
def_succ_num = np.sum(np.argmax(self._def_preds, axis=1)
== self._true_labels)
@@ -87,9 +84,6 @@ class DefenseEvaluate:

Returns:
float, the higher, the more successful the defense is.

Examples:
>>> def_eval.crr()
"""
cond1 = np.argmax(self._def_preds, axis=1) == self._true_labels
cond2 = np.argmax(self._raw_preds, axis=1) != self._true_labels
@@ -118,9 +112,6 @@ class DefenseEvaluate:
- float, the lower, the more successful the defense is.

- If return value == -1, len(idxes) == 0.

Examples:
>>> def_eval.ccv()
"""
idxes = np.arange(self._num_samples)
cond1 = np.argmax(self._def_preds, axis=1) == self._true_labels
@@ -147,9 +138,6 @@ class DefenseEvaluate:
more successful the defense.

- If return value == -1, idxes == 0.

Examples:
>>> def_eval.cos()
"""
idxes = np.arange(self._num_samples)
cond1 = np.argmax(self._def_preds, axis=1) == self._true_labels


+ 11
- 11
mindarmour/privacy/diff_privacy/mechanisms/mechanisms.py View File

@@ -39,7 +39,7 @@ class ClipMechanismsFactory:
Wrapper of clip noise generating mechanisms. It supports Adaptive Clipping with
Gaussian Random Noise for now.

For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_differential_privacy.html#%E5%B7%AE%E5%88%86%E9%9A%90%E7%A7%81>`_
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_differential_privacy.html#%E5%B7%AE%E5%88%86%E9%9A%90%E7%A7%81>`_.

"""

@@ -49,7 +49,7 @@ class ClipMechanismsFactory:
@staticmethod
def create(mech_name, decay_policy='Linear', learning_rate=0.001,
target_unclipped_quantile=0.9, fraction_stddev=0.01, seed=0):
"""
r"""
Args:
mech_name(str): Clip noise generated strategy, support 'Gaussian' now.
decay_policy(str): Decay policy of adaptive clipping, decay_policy must
@@ -57,7 +57,7 @@ class ClipMechanismsFactory:
learning_rate(float): Learning rate of update norm clip. Default: 0.001.
target_unclipped_quantile(float): Target quantile of norm clip. Default: 0.9.
fraction_stddev(float): The stddev of Gaussian normal which used in
empirical_fraction, the formula is :math:`empirical_fraction + N(0, fraction_stddev)`.
empirical_fraction, the formula is :math:`empirical\_fraction + N(0, fraction\_stddev)`.
Default: 0.01.
seed(int): Original random seed, if seed=0 random normal will use secure
random number. IF seed!=0 random normal will generate values using
@@ -100,7 +100,7 @@ class NoiseMechanismsFactory:
Wrapper of noise generating mechanisms. It supports Gaussian Random Noise and
Adaptive Gaussian Random Noise for now.

For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_differential_privacy.html#%E5%B7%AE%E5%88%86%E9%9A%90%E7%A7%81>`_
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_differential_privacy.html#%E5%B7%AE%E5%88%86%E9%9A%90%E7%A7%81>`_.

"""
def __init__(self):
@@ -168,9 +168,9 @@ class _Mechanisms(Cell):


class NoiseGaussianRandom(_Mechanisms):
"""
r"""
Generate noise in Gaussian Distribution with :math:`mean=0` and
:math:`standard deviation = norm_bound * initial_noise_multiplier`.
:math:`standard\_deviation = norm\_bound * initial\_noise\_multiplier`.

Args:
norm_bound(float): Clipping bound for the l2 norm of the gradients.
@@ -347,11 +347,11 @@ class _MechanismsParamsUpdater(Cell):


class AdaClippingWithGaussianRandom(Cell):
"""
Adaptive clipping. If `decay_policy` is 'Linear', the update formula :math:`norm_bound = norm_bound -
learning_rate*(beta - target_unclipped_quantile)`.
If `decay_policy` is 'Geometric', the update formula is :math:`norm_bound =
norm_bound*exp(-learning_rate*(empirical_fraction - target_unclipped_quantile))`.
r"""
Adaptive clipping. If `decay_policy` is 'Linear', the update formula :math:`norm\_bound = norm\_bound -
learning\_rate*(beta - target\_unclipped\_quantile)`.
If `decay_policy` is 'Geometric', the update formula is :math:`norm\_bound =
norm\_bound*exp(-learning\_rate*(empirical\_fraction - target\_unclipped\_quantile))`.
where beta is the empirical fraction of samples with the value at most
`target_unclipped_quantile`.



+ 3
- 3
mindarmour/privacy/diff_privacy/monitor/monitor.py View File

@@ -28,7 +28,7 @@ TAG = 'DP monitor'
class PrivacyMonitorFactory:
"""
Factory class of DP training's privacy monitor.
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_differential_privacy.html#%E5%B7%AE%E5%88%86%E9%9A%90%E7%A7%81>`_
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_differential_privacy.html#%E5%B7%AE%E5%88%86%E9%9A%90%E7%A7%81>`_.

"""

@@ -77,7 +77,7 @@ class RDPMonitor(Callback):
.. math::
(ε'+\frac{log(1/δ)}{α-1}, δ)

For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_differential_privacy.html#%E5%B7%AE%E5%88%86%E9%9A%90%E7%A7%81>`_
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_differential_privacy.html#%E5%B7%AE%E5%88%86%E9%9A%90%E7%A7%81>`_.

Reference: `Rényi Differential Privacy of the Sampled Gaussian Mechanism
<https://arxiv.org/abs/1908.10530>`_
@@ -370,7 +370,7 @@ class ZCDPMonitor(Callback):
noise mechanisms(such as NoiseAdaGaussianRandom and NoiseGaussianRandom).
The matching noise mechanism of ZCDP will be developed in the future.

For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_differential_privacy.html#%E5%B7%AE%E5%88%86%E9%9A%90%E7%A7%81>`_
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_differential_privacy.html#%E5%B7%AE%E5%88%86%E9%9A%90%E7%A7%81>`_.

Reference: `Concentrated Differentially Private Gradient Descent with
Adaptive per-Iteration Privacy Budget <https://arxiv.org/abs/1808.09501>`_


+ 1
- 1
mindarmour/privacy/diff_privacy/train/model.py View File

@@ -70,7 +70,7 @@ class DPModel(Model):
DPModel is used for constructing a model for differential privacy training.
This class is overload mindspore.train.model.Model.

For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_differential_privacy.html#%E5%B7%AE%E5%88%86%E9%9A%90%E7%A7%81>`_
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_differential_privacy.html#%E5%B7%AE%E5%88%86%E9%9A%90%E7%A7%81>`_.

Args:
micro_batches (int): The number of small batches split from an original


+ 1
- 1
mindarmour/privacy/evaluation/membership_inference.py View File

@@ -98,7 +98,7 @@ class MembershipInference:
for inferring user's privacy data. It requires loss or logits results of the training samples.
(Privacy refers to some sensitive attributes of a single user).

For details, please refer to the `Tutorial <https://mindspore.cn/mindarmour/docs/en/master/test_model_security_membership_inference.html>`_
For details, please refer to the `Tutorial <https://mindspore.cn/mindarmour/docs/en/master/test_model_security_membership_inference.html>`_.

References: `Reza Shokri, Marco Stronati, Congzheng Song, Vitaly Shmatikov.
Membership Inference Attacks against Machine Learning Models. 2017.


+ 1
- 1
mindarmour/privacy/sup_privacy/mask_monitor/masker.py View File

@@ -27,7 +27,7 @@ TAG = 'suppress masker'
class SuppressMasker(Callback):
"""
Periodicity check suppress privacy function status and toggle suppress operation.
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_suppress_privacy.html#%E5%BC%95%E5%85%A5%E6%8A%91%E5%88%B6%E9%9A%90%E7%A7%81%E8%AE%AD%E7%BB%83>`_
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_suppress_privacy.html#%E5%BC%95%E5%85%A5%E6%8A%91%E5%88%B6%E9%9A%90%E7%A7%81%E8%AE%AD%E7%BB%83>`_.

Args:
model (SuppressModel): SuppressModel instance.


+ 6
- 4
mindarmour/privacy/sup_privacy/sup_ctrl/conctrl.py View File

@@ -32,7 +32,11 @@ TAG = 'Suppression training.'


class SuppressPrivacyFactory:
""" Factory class of SuppressCtrl mechanisms"""
"""
Factory class of SuppressCtrl mechanisms.

For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_suppress_privacy.html#%E5%BC%95%E5%85%A5%E6%8A%91%E5%88%B6%E9%9A%90%E7%A7%81%E8%AE%AD%E7%BB%83>`_.
"""

def __init__(self):
pass
@@ -41,8 +45,6 @@ class SuppressPrivacyFactory:
def create(networks, mask_layers, policy="local_train", end_epoch=10, batch_num=20, start_epoch=3,
mask_times=1000, lr=0.05, sparse_end=0.90, sparse_start=0.0):
"""
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_suppress_privacy.html#%E5%BC%95%E5%85%A5%E6%8A%91%E5%88%B6%E9%9A%90%E7%A7%81%E8%AE%AD%E7%BB%83>`_

Args:
networks (Cell): The training network.
This networks parameter should be same as 'network' parameter of SuppressModel().
@@ -116,7 +118,7 @@ class SuppressCtrl(Cell):
finding the parameters that should be suppressed, and suppress these
parameters permanently.

For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_suppress_privacy.html#%E5%BC%95%E5%85%A5%E6%8A%91%E5%88%B6%E9%9A%90%E7%A7%81%E8%AE%AD%E7%BB%83>`_
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_suppress_privacy.html#%E5%BC%95%E5%85%A5%E6%8A%91%E5%88%B6%E9%9A%90%E7%A7%81%E8%AE%AD%E7%BB%83>`_.

Args:
networks (Cell): The training network.


+ 1
- 1
mindarmour/privacy/sup_privacy/train/model.py View File

@@ -59,7 +59,7 @@ class SuppressModel(Model):
Complete model train function. The suppress privacy function is embedded into the overload
mindspore.train.model.Model.

For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_suppress_privacy.html>`_
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/protect_user_privacy_with_suppress_privacy.html>`_.

Args:
network (Cell): The training network.


+ 1
- 1
mindarmour/reliability/concept_drift/concept_drift_check_images.py View File

@@ -89,7 +89,7 @@ class OodDetectorFeatureCluster(OodDetector):
the testing data features and the clustering centers determines whether an image is an out-of-distribution(OOD)
image or not.

For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/concept_drift_images.html>`_
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/concept_drift_images.html>`_.

Args:
model (Model):The training model.


+ 1
- 1
mindarmour/reliability/concept_drift/concept_drift_check_time_series.py View File

@@ -23,7 +23,7 @@ from mindarmour.utils._check_param import check_param_type, check_param_in_range
class ConceptDriftCheckTimeSeries:
r"""
ConceptDriftCheckTimeSeries is used for example series distribution change detection.
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/concept_drift_time_series.html>`_
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/concept_drift_time_series.html>`_.

Args:
window_size(int): Size of a concept window, no less than 10. If given the input data,


+ 5
- 4
mindarmour/reliability/model_fault_injection/fault_injection.py View File

@@ -31,7 +31,7 @@ class FaultInjector:
Fault injection module simulates various fault scenarios for deep neural networks and evaluates
performance and reliability of the model.

For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/fault_injection.html>`_
For details, please check `Tutorial <https://mindspore.cn/mindarmour/docs/zh-CN/master/fault_injection.html>`_.

Args:
model (Model): The model need to be evaluated.
@@ -196,7 +196,7 @@ class FaultInjector:

def kick_off(self, ds_data, ds_label, iter_times=100):
"""
Startup and return final results.
Startup and return final results after Fault Injection.

Args:
ds_data(np.ndarray): Input data for testing. The evaluation is based on this data.
@@ -241,9 +241,10 @@ class FaultInjector:

def metrics(self):
"""
metrics of final result.
Metrics of final result.

Returns:
list, the summary of result.
- list, the summary of result.
"""
result_summary = []
single_layer_acc = []


Loading…
Cancel
Save