diff --git a/mindarmour/adv_robustness/attacks/iterative_gradient_method.py b/mindarmour/adv_robustness/attacks/iterative_gradient_method.py index c6e80ed..e5ee3bf 100644 --- a/mindarmour/adv_robustness/attacks/iterative_gradient_method.py +++ b/mindarmour/adv_robustness/attacks/iterative_gradient_method.py @@ -408,12 +408,10 @@ class ProjectedGradientDescent(BasicIterativeMethod): np.inf, 1 or 2. Default: 'inf'. loss_fn (Loss): Loss function for optimization. If None, the input network \ is already equipped with loss function. Default: None. - random_start (bool): If True, use random perturbs at the beginning. If False, - start from original samples. """ def __init__(self, network, eps=0.3, eps_iter=0.1, bounds=(0.0, 1.0), - is_targeted=False, nb_iter=5, norm_level='inf', loss_fn=None, random_start=False): + is_targeted=False, nb_iter=5, norm_level='inf', loss_fn=None): super(ProjectedGradientDescent, self).__init__(network, eps=eps, eps_iter=eps_iter, @@ -422,10 +420,6 @@ class ProjectedGradientDescent(BasicIterativeMethod): nb_iter=nb_iter, loss_fn=loss_fn) self._norm_level = check_norm_level(norm_level) - self._random_start = check_param_type('random_start', random_start, bool) - - def _get_random_start(self, inputs): - return inputs + np.random.uniform(-self._eps, self._eps, size=inputs.shape).astype(np.float32) def generate(self, inputs, labels): """ @@ -455,8 +449,6 @@ class ProjectedGradientDescent(BasicIterativeMethod): clip_diff = clip_max - clip_min else: clip_diff = 1 - if self._random_start: - inputs = self._get_random_start(inputs) for _ in range(self._nb_iter): inputs_tensor = to_tensor_tuple(inputs) labels_tensor = to_tensor_tuple(labels) diff --git a/mindarmour/adv_robustness/defenses/projected_adversarial_defense.py b/mindarmour/adv_robustness/defenses/projected_adversarial_defense.py index 3d4b986..0b44869 100644 --- a/mindarmour/adv_robustness/defenses/projected_adversarial_defense.py +++ b/mindarmour/adv_robustness/defenses/projected_adversarial_defense.py @@ -39,8 +39,6 @@ class ProjectedAdversarialDefense(AdversarialDefenseWithAttacks): nb_iter (int): PGD attack parameters, number of iteration. Default: 5. norm_level (str): Norm type. 'inf' or 'l2'. Default: 'inf'. - random_start (bool): If True, use random perturbs at the beginning. If False, - start from original samples. Examples: >>> net = Net() @@ -56,16 +54,14 @@ class ProjectedAdversarialDefense(AdversarialDefenseWithAttacks): eps=0.3, eps_iter=0.1, nb_iter=5, - norm_level='inf', - random_start=True): + norm_level='inf'): attack = ProjectedGradientDescent(network, eps=eps, eps_iter=eps_iter, nb_iter=nb_iter, bounds=bounds, norm_level=norm_level, - loss_fn=loss_fn, - random_start=random_start) + loss_fn=loss_fn) super(ProjectedAdversarialDefense, self).__init__( network, [attack], loss_fn=loss_fn, optimizer=optimizer, bounds=bounds, replace_ratio=replace_ratio)