Browse Source

Fix issues of code review.

tags/v1.2.1
jin-xiulang 4 years ago
parent
commit
8e32f3a33c
5 changed files with 35 additions and 22 deletions
  1. +4
    -6
      mindarmour/fuzz_testing/image_transform.py
  2. +0
    -1
      mindarmour/privacy/diff_privacy/mechanisms/mechanisms.py
  3. +15
    -4
      mindarmour/privacy/diff_privacy/monitor/monitor.py
  4. +15
    -9
      mindarmour/privacy/diff_privacy/optimizer/optimizer.py
  5. +1
    -2
      mindarmour/privacy/diff_privacy/train/model.py

+ 4
- 6
mindarmour/fuzz_testing/image_transform.py View File

@@ -350,9 +350,9 @@ class Translate(ImageTransform):
Translate an image.

Args:
x_bias ([int, float): X-direction translation, x=x+x_bias*image_length.
x_bias (Union[int, float]): X-direction translation, x = x + x_bias*image_length.
Default: 0.
y_bias ([int, float): Y-direction translation, y=y+y_bias*image_wide.
y_bias (Union[int, float]): Y-direction translation, y = y + y_bias*image_wide.
Default: 0.
"""

@@ -365,10 +365,8 @@ class Translate(ImageTransform):
Set translate parameters.

Args:
x_bias ([float, int]): X-direction translation, x=x+x_bias*image_length.
Default: 0.
y_bias ([float, int]): Y-direction translation, y=y+y_bias*image_wide.
Default: 0.
x_bias (Union[float, int]): X-direction translation. Default: 0.
y_bias (Union[float, int]): Y-direction translation. Default: 0.
auto_param (bool): True if auto generate parameters. Default: False.
"""
self.auto_param = auto_param


+ 0
- 1
mindarmour/privacy/diff_privacy/mechanisms/mechanisms.py View File

@@ -426,7 +426,6 @@ class AdaClippingWithGaussianRandom(Cell):
next_norm_bound = self._sub(norm_bound,
self._mul(self._learning_rate, grad_clip))

# decay_policy == 'Geometric'
else:
grad_clip = self._sub(empirical_fraction,
self._target_unclipped_quantile)


+ 15
- 4
mindarmour/privacy/diff_privacy/monitor/monitor.py View File

@@ -62,7 +62,7 @@ class PrivacyMonitorFactory:
return RDPMonitor(*args, **kwargs)
if policy == 'zcdp':
return ZCDPMonitor(*args, **kwargs)
raise ValueError("Only RDP-policy or ZCDP-policy is supported by now")
raise ValueError("The policy must be 'rdp' or 'zcdp', but got {}".format(policy))


class RDPMonitor(Callback):
@@ -86,7 +86,8 @@ class RDPMonitor(Callback):
be used to calculate privacy spent. Default: 1.5.
max_eps (Union[float, int, None]): The maximum acceptable epsilon
budget for DP training, which is used for estimating the max
training epochs. Default: 10.0.
training epochs. 'None' means there is no limit to epsilon budget.
Default: 10.0.
target_delta (Union[float, int, None]): Target delta budget for DP
training. If target_delta is set to be δ, then the privacy budget
δ would be fixed during the whole training process. Default: 1e-3.
@@ -94,7 +95,7 @@ class RDPMonitor(Callback):
budget for DP training, which is used for estimating the max
training epochs. Max_delta must be less than 1 and suggested
to be less than 1e-3, otherwise overflow would be encountered.
Default: None.
'None' means there is no limit to delta budget. Default: None.
target_eps (Union[float, int, None]): Target epsilon budget for DP
training. If target_eps is set to be ε, then the privacy budget
ε would be fixed during the whole training process. Default: None.
@@ -192,6 +193,7 @@ class RDPMonitor(Callback):
msg = 'One of target eps and target delta must be None'
LOGGER.error(TAG, msg)
raise ValueError(msg)

if dataset_sink_mode:
self._per_print_times = int(self._num_samples / self._batch_size)

@@ -208,6 +210,14 @@ class RDPMonitor(Callback):
>>> num_samples=60000, batch_size=32)
>>> suggest_epoch = rdp.max_epoch_suggest()
"""
if self._target_delta is not None and self._max_eps is None:
msg = 'max_eps should be consistent with target_delta, but got None.'
LOGGER.error(TAG, msg)
raise ValueError(msg)
if self._target_eps is not None and self._max_delta is None:
msg = 'max_delta should be consistent with target_eps, but got None.'
LOGGER.error(TAG, msg)
raise ValueError(msg)
epoch = 1
while epoch < 10000:
steps = self._num_samples // self._batch_size
@@ -427,7 +437,8 @@ class ZCDPMonitor(Callback):
initial_noise_multiplier)
if noise_decay_mode is not None:
if noise_decay_mode not in ('Step', 'Time', 'Exp'):
msg = "Noise decay mode must be in ('Step', 'Time', 'Exp')"
msg = "Noise decay mode must be in ('Step', 'Time', 'Exp'), but got {}.".\
format(noise_decay_mode)
LOGGER.error(TAG, msg)
raise ValueError(msg)
noise_decay_rate = check_param_type('noise_decay_rate', noise_decay_rate, float)


+ 15
- 9
mindarmour/privacy/diff_privacy/optimizer/optimizer.py View File

@@ -36,7 +36,7 @@ _reciprocal = P.Reciprocal()
@_grad_scale.register("Tensor", "Tensor")
def tensor_grad_scale(scale, grad):
""" grad scaling """
return grad * _reciprocal(scale)
return grad*_reciprocal(scale)


class _TupleAdd(nn.Cell):
@@ -59,14 +59,14 @@ class DPOptimizerClassFactory:
micro_batches (int): The number of small batches split from an original batch. Default: 2.

Returns:
Optimizer, Optimizer class
Optimizer, Optimizer class.

Examples:
>>> GaussianSGD = DPOptimizerClassFactory(micro_batches=2)
>>> GaussianSGD.set_mechanisms('Gaussian', norm_bound=1.0, initial_noise_multiplier=1.5)
>>> net_opt = GaussianSGD.create('Momentum')(params=network.trainable_params(),
>>> learning_rate=cfg.lr,
>>> momentum=cfg.momentum)
>>> learning_rate=0.001,
>>> momentum=0.9)
"""

def __init__(self, micro_batches=2):
@@ -76,7 +76,9 @@ class DPOptimizerClassFactory:

def set_mechanisms(self, policy, *args, **kwargs):
"""
Get noise mechanism object.
Get noise mechanism object. Policies can be 'sgd', 'momentum'
or 'adam'. Candidate args and kwargs can be seen in class
NoiseMechanismsFactory of mechanisms.py.

Args:
policy (str): Choose mechanism type.
@@ -85,15 +87,15 @@ class DPOptimizerClassFactory:

def create(self, policy):
"""
Create DP optimizer.
Create DP optimizer. Policies can be 'sgd', 'momentum'
or 'adam'.

Args:
policy (str): Choose original optimizer type.

Returns:
Optimizer, A optimizer with DP.
Optimizer, an optimizer with DP.
"""
dp_opt_class = None
policy_ = policy.lower()
if policy_ == 'sgd':
dp_opt_class = self._get_dp_optimizer_class(nn.SGD)
@@ -102,7 +104,7 @@ class DPOptimizerClassFactory:
elif policy_ == 'adam':
dp_opt_class = self._get_dp_optimizer_class(nn.Adam)
else:
msg = "The {} optimizer is not implement, please choose ['SGD', 'Momentum', 'Adam']" \
msg = "The policy must be in ('SGD', 'Momentum', 'Adam'), but got {}." \
.format(policy)
LOGGER.error(TAG, msg)
raise NameError(msg)
@@ -112,6 +114,10 @@ class DPOptimizerClassFactory:
"""
Wrap original mindspore optimizer with `self._mech`.
"""
if self._mech is None:
msg = 'Noise mechanism should be given through set_mechanisms(), but got None.'
LOGGER.error(TAG, msg)
raise ValueError(msg)
mech = self._mech
micro_batches = self._micro_batches



+ 1
- 2
mindarmour/privacy/diff_privacy/train/model.py View File

@@ -306,8 +306,7 @@ class _ClipGradients(nn.Cell):
"""
construct a compute flow.
"""
# pylint: disable=consider-using-in
if clip_type != 0 and clip_type != 1:
if clip_type not in (0, 1):
return grads

new_grads = ()


Loading…
Cancel
Save