Browse Source

!242 Update api description

Merge pull request !242 from pkuliuliu/code_docs
tags/v1.6.0
i-robot Gitee 4 years ago
parent
commit
f7b98c7a58
14 changed files with 31 additions and 23 deletions
  1. +1
    -1
      mindarmour/adv_robustness/attacks/attack.py
  2. +3
    -1
      mindarmour/adv_robustness/attacks/black/hop_skip_jump_attack.py
  3. +7
    -7
      mindarmour/adv_robustness/attacks/black/natural_evolutionary_strategy.py
  4. +2
    -2
      mindarmour/adv_robustness/attacks/black/pointwise_attack.py
  5. +1
    -1
      mindarmour/adv_robustness/attacks/gradient_method.py
  6. +0
    -2
      mindarmour/adv_robustness/attacks/iterative_gradient_method.py
  7. +1
    -1
      mindarmour/adv_robustness/attacks/jsma.py
  8. +2
    -2
      mindarmour/adv_robustness/defenses/defense.py
  9. +1
    -1
      mindarmour/adv_robustness/detectors/black/similarity_detector.py
  10. +1
    -1
      mindarmour/adv_robustness/detectors/detector.py
  11. +5
    -1
      mindarmour/adv_robustness/detectors/ensemble_detector.py
  12. +1
    -1
      mindarmour/adv_robustness/detectors/mag_net.py
  13. +5
    -1
      mindarmour/adv_robustness/detectors/region_based_detector.py
  14. +1
    -1
      mindarmour/adv_robustness/detectors/spatial_smoothing.py

+ 1
- 1
mindarmour/adv_robustness/attacks/attack.py View File

@@ -45,7 +45,7 @@ class Attack:
examples are generated.
labels (Union[numpy.ndarray, tuple]): Original/target labels. \
For each input if it has more than one label, it is wrapped in a tuple.
batch_size (int): The number of samples in one batch.
batch_size (int): The number of samples in one batch. Default: 64.

Returns:
numpy.ndarray, generated adversarial examples


+ 3
- 1
mindarmour/adv_robustness/attacks/black/hop_skip_jump_attack.py View File

@@ -53,10 +53,12 @@ class HopSkipJumpAttack(Attack):
estimation. Default: 1000.
stepsize_search (str): Indicating how to search for stepsize; Possible
values are 'geometric_progression', 'grid_search', 'geometric_progression'.
num_iterations (int): The number of iterations. Default: 64.
Default: 'geometric_progression'.
num_iterations (int): The number of iterations. Default: 20.
gamma (float): Used to set binary search threshold theta. Default: 1.0.
For l2 attack the binary search threshold `theta` is
:math:`gamma / d^{3/2}`. For linf attack is :math:`gamma / d^2`.
Default: 1.0.
constraint (str): The norm distance to optimize. Possible values are 'l2',
'linf'. Default: l2.
batch_size (int): Batch size. Default: 32.


+ 7
- 7
mindarmour/adv_robustness/attacks/black/natural_evolutionary_strategy.py View File

@@ -55,21 +55,21 @@ class NES(Attack):
Args:
model (BlackModel): Target model to be attacked.
scene (str): Scene in 'Label_Only', 'Partial_Info' or 'Query_Limit'.
max_queries (int): Maximum query numbers to generate an adversarial example. Default: 500000.
max_queries (int): Maximum query numbers to generate an adversarial example. Default: 10000.
top_k (int): For Partial-Info or Label-Only setting, indicating how much (Top-k) information is
available for the attacker. For Query-Limited setting, this input should be set as -1. Default: -1.
num_class (int): Number of classes in dataset. Default: 10.
batch_size (int): Batch size. Default: 96.
batch_size (int): Batch size. Default: 128.
epsilon (float): Maximum perturbation allowed in attack. Default: 0.3.
samples_per_draw (int): Number of samples draw in antithetic sampling. Default: 96.
samples_per_draw (int): Number of samples draw in antithetic sampling. Default: 128.
momentum (float): Momentum. Default: 0.9.
learning_rate (float): Learning rate. Default: 1e-2.
max_lr (float): Max Learning rate. Default: 1e-2.
min_lr (float): Min Learning rate. Default: 5e-5.
learning_rate (float): Learning rate. Default: 1e-3.
max_lr (float): Max Learning rate. Default: 5e-2.
min_lr (float): Min Learning rate. Default: 5e-4.
sigma (float): Step size of random noise. Default: 1e-3.
plateau_length (int): Length of plateau used in Annealing algorithm. Default: 20.
plateau_drop (float): Drop of plateau used in Annealing algorithm. Default: 2.0.
adv_thresh (float): Threshold of adversarial. Default: 0.15.
adv_thresh (float): Threshold of adversarial. Default: 0.25.
zero_iters (int): Number of points to use for the proxy score. Default: 10.
starting_eps (float): Starting epsilon used in Label-Only setting. Default: 1.0.
starting_delta_eps (float): Delta epsilon used in Label-Only setting. Default: 0.5.


+ 2
- 2
mindarmour/adv_robustness/attacks/black/pointwise_attack.py View File

@@ -39,8 +39,8 @@ class PointWiseAttack(Attack):

Args:
model (BlackModel): Target model.
max_iter (int): Max rounds of iteration to generate adversarial image.
search_iter (int): Max rounds of binary search.
max_iter (int): Max rounds of iteration to generate adversarial image. Default: 100.
search_iter (int): Max rounds of binary search. Default: 10.
is_targeted (bool): If True, targeted attack. If False, untargeted attack. Default: False.
init_attack (Attack): Attack used to find a starting point. Default: None.
sparse (bool): If True, input labels are sparse-encoded. If False, input labels are one-hot-encoded.


+ 1
- 1
mindarmour/adv_robustness/attacks/gradient_method.py View File

@@ -213,7 +213,7 @@ class RandomFastGradientMethod(FastGradientMethod):
bounds (tuple): Upper and lower bounds of data, indicating the data range.
In form of (clip_min, clip_max). Default: (0.0, 1.0).
norm_level (Union[int, numpy.inf]): Order of the norm.
Possible values: np.inf, 1 or 2. Default: 2.
Possible values: np.inf, 1 or 2. Default: 2.
is_targeted (bool): If True, targeted attack. If False, untargeted
attack. Default: False.
loss_fn (Loss): Loss function for optimization. If None, the input network \


+ 0
- 2
mindarmour/adv_robustness/attacks/iterative_gradient_method.py View File

@@ -182,8 +182,6 @@ class BasicIterativeMethod(IterativeGradientMethod):
nb_iter (int): Number of iteration. Default: 5.
loss_fn (Loss): Loss function for optimization. If None, the input network \
is already equipped with loss function. Default: None.
attack (class): The single step gradient method of each iteration. In
this class, FGSM is used.

Examples:
>>> attack = BasicIterativeMethod(network, loss_fn=SoftmaxCrossEntropyWithLogits(sparse=False))


+ 1
- 1
mindarmour/adv_robustness/attacks/jsma.py View File

@@ -46,7 +46,7 @@ class JSMAAttack(Attack):
box_max (float): Upper bound of input of the target model. Default: 1.0.
theta (float): Change ratio of one pixel (relative to
input data range). Default: 1.0.
max_iteration (int): Maximum round of iteration. Default: 100.
max_iteration (int): Maximum round of iteration. Default: 1000.
max_count (int): Maximum times to change each pixel. Default: 3.
increase (bool): If True, increase perturbation. If False, decrease
perturbation. Default: True.


+ 2
- 2
mindarmour/adv_robustness/defenses/defense.py View File

@@ -62,8 +62,8 @@ class Defense:
inputs (numpy.ndarray): Samples based on which adversarial
examples are generated.
labels (numpy.ndarray): Labels of input samples.
batch_size (int): Number of samples in one batch.
epochs (int): Number of epochs.
batch_size (int): Number of samples in one batch. Default: 32.
epochs (int): Number of epochs. Default: 5.

Returns:
numpy.ndarray, loss of batch_defense operation.


+ 1
- 1
mindarmour/adv_robustness/detectors/black/similarity_detector.py View File

@@ -223,7 +223,7 @@ class SimilarityDetector(Detector):

Args:
num_of_neighbors (int): Number of the nearest neighbors.
threshold (float): Detection threshold. Default: None.
threshold (float): Detection threshold.
"""
self._num_of_neighbors = check_int_positive('num_of_neighbors',
num_of_neighbors)


+ 1
- 1
mindarmour/adv_robustness/detectors/detector.py View File

@@ -39,7 +39,7 @@ class Detector:

Args:
inputs (numpy.ndarray): The input samples to calculate the threshold.
labels (numpy.ndarray): Labels of training data.
labels (numpy.ndarray): Labels of training data. Default: None.

Raises:
NotImplementedError: It is an abstract method.


+ 5
- 1
mindarmour/adv_robustness/detectors/ensemble_detector.py View File

@@ -49,7 +49,7 @@ class EnsembleDetector(Detector):

Args:
inputs (numpy.ndarray): Data to calculate the threshold.
labels (numpy.ndarray): Labels of data.
labels (numpy.ndarray): Labels of data. Default: None.

Raises:
NotImplementedError: This function is not available in ensemble.
@@ -116,6 +116,10 @@ class EnsembleDetector(Detector):
Filter adversarial noises in input samples.
This method is not available in this class.

Args:
inputs (Union[numpy.ndarray, list, tuple]): Data been used as
references to create adversarial examples.

Raises:
NotImplementedError: This function is not available in ensemble.
"""


+ 1
- 1
mindarmour/adv_robustness/detectors/mag_net.py View File

@@ -149,7 +149,7 @@ class ErrorBasedDetector(Detector):
Set the parameters threshold.

Args:
threshold (float): Detection threshold. Default: None.
threshold (float): Detection threshold.
"""
self._threshold = check_value_positive('threshold', threshold)



+ 5
- 1
mindarmour/adv_robustness/detectors/region_based_detector.py View File

@@ -73,8 +73,12 @@ class RegionBasedDetector(Detector):
self._radius = None

def set_radius(self, radius):
"""Set radius."""
"""
Set radius.

Args:
radius (float): Radius of region.
"""
self._radius = check_param_in_range('radius', radius,
self._initial_radius,
self._max_radius)


+ 1
- 1
mindarmour/adv_robustness/detectors/spatial_smoothing.py View File

@@ -141,7 +141,7 @@ class SpatialSmoothing(Detector):
Set the parameters threshold.

Args:
threshold (float): Detection threshold. Default: None.
threshold (float): Detection threshold.
"""
self._threshold = check_value_positive('threshold', threshold)



Loading…
Cancel
Save