Browse Source

Change TensorAdd to Add

tags/v1.2.1
pkuliuliu 4 years ago
parent
commit
6f7f7fa098
31 changed files with 78 additions and 85 deletions
  1. +1
    -1
      examples/model_security/model_attacks/cv/faster_rcnn/src/FasterRcnn/resnet50.py
  2. +1
    -1
      examples/model_security/model_attacks/cv/yolov3_darknet53/src/darknet.py
  3. +2
    -2
      examples/model_security/model_defenses/mnist_evaluation.py
  4. +2
    -2
      examples/model_security/model_defenses/mnist_similarity_detector.py
  5. +3
    -3
      mindarmour/privacy/diff_privacy/mechanisms/mechanisms.py
  6. +1
    -1
      mindarmour/privacy/diff_privacy/optimizer/optimizer.py
  7. +7
    -7
      mindarmour/privacy/diff_privacy/train/model.py
  8. +1
    -1
      mindarmour/privacy/sup_privacy/train/model.py
  9. +0
    -0
      tests/__init__.py
  10. +2
    -2
      tests/st/resnet50/resnet_cifar10.py
  11. +1
    -1
      tests/ut/python/adv_robustness/attacks/black/test_hsja.py
  12. +1
    -1
      tests/ut/python/adv_robustness/attacks/black/test_nes.py
  13. +1
    -1
      tests/ut/python/adv_robustness/attacks/black/test_pointwise_attack.py
  14. +1
    -1
      tests/ut/python/adv_robustness/attacks/black/test_pso_attack.py
  15. +1
    -1
      tests/ut/python/adv_robustness/attacks/test_gradient_method.py
  16. +1
    -1
      tests/ut/python/adv_robustness/attacks/test_jsma.py
  17. +1
    -1
      tests/ut/python/adv_robustness/attacks/test_lbfgs.py
  18. +1
    -1
      tests/ut/python/adv_robustness/defenses/test_ad.py
  19. +1
    -1
      tests/ut/python/adv_robustness/defenses/test_ead.py
  20. +1
    -1
      tests/ut/python/adv_robustness/defenses/test_nad.py
  21. +1
    -1
      tests/ut/python/adv_robustness/defenses/test_pad.py
  22. +2
    -2
      tests/ut/python/adv_robustness/detectors/black/test_similarity_detector.py
  23. +3
    -3
      tests/ut/python/adv_robustness/detectors/test_ensemble_detector.py
  24. +2
    -2
      tests/ut/python/adv_robustness/detectors/test_mag_net.py
  25. +2
    -2
      tests/ut/python/adv_robustness/detectors/test_region_based_detector.py
  26. +7
    -11
      tests/ut/python/privacy/diff_privacy/test_model_train.py
  27. +13
    -16
      tests/ut/python/privacy/diff_privacy/test_monitor.py
  28. +2
    -2
      tests/ut/python/privacy/diff_privacy/test_optimizer.py
  29. +1
    -1
      tests/ut/python/privacy/evaluation/test_inversion_attack.py
  30. +8
    -10
      tests/ut/python/privacy/evaluation/test_membership_inference.py
  31. +7
    -5
      tests/ut/python/privacy/sup_privacy/test_model_train.py

+ 1
- 1
examples/model_security/model_attacks/cv/faster_rcnn/src/FasterRcnn/resnet50.py View File

@@ -224,7 +224,7 @@ class ResidualBlockUsing(nn.Cell):
self.bn_down_sample = self.bn_down_sample.set_train() self.bn_down_sample = self.bn_down_sample.set_train()
if not weights_update: if not weights_update:
self.conv_down_sample.weight.requires_grad = False self.conv_down_sample.weight.requires_grad = False
self.add = P.TensorAdd()
self.add = P.Add()
def construct(self, x): def construct(self, x):
identity = x identity = x


+ 1
- 1
examples/model_security/model_attacks/cv/yolov3_darknet53/src/darknet.py View File

@@ -64,7 +64,7 @@ class ResidualBlock(nn.Cell):
out_chls = out_channels//2 out_chls = out_channels//2
self.conv1 = conv_block(in_channels, out_chls, kernel_size=1, stride=1) self.conv1 = conv_block(in_channels, out_chls, kernel_size=1, stride=1)
self.conv2 = conv_block(out_chls, out_channels, kernel_size=3, stride=1) self.conv2 = conv_block(out_chls, out_channels, kernel_size=3, stride=1)
self.add = P.TensorAdd()
self.add = P.Add()
def construct(self, x): def construct(self, x):
identity = x identity = x


+ 2
- 2
examples/model_security/model_defenses/mnist_evaluation.py View File

@@ -22,7 +22,7 @@ from mindspore import context
from mindspore import nn from mindspore import nn
from mindspore.nn import Cell from mindspore.nn import Cell
from mindspore.nn import SoftmaxCrossEntropyWithLogits from mindspore.nn import SoftmaxCrossEntropyWithLogits
from mindspore.ops.operations import TensorAdd
from mindspore.ops.operations import Add
from mindspore.train.serialization import load_checkpoint, load_param_into_net from mindspore.train.serialization import load_checkpoint, load_param_into_net
from scipy.special import softmax from scipy.special import softmax
@@ -58,7 +58,7 @@ class EncoderNet(Cell):
def __init__(self, encode_dim): def __init__(self, encode_dim):
super(EncoderNet, self).__init__() super(EncoderNet, self).__init__()
self._encode_dim = encode_dim self._encode_dim = encode_dim
self.add = TensorAdd()
self.add = Add()
def construct(self, inputs): def construct(self, inputs):
""" """


+ 2
- 2
examples/model_security/model_defenses/mnist_similarity_detector.py View File

@@ -18,7 +18,7 @@ from mindspore import Model
from mindspore import Tensor from mindspore import Tensor
from mindspore import context from mindspore import context
from mindspore.nn import Cell from mindspore.nn import Cell
from mindspore.ops.operations import TensorAdd
from mindspore.ops.operations import Add
from mindspore.train.serialization import load_checkpoint, load_param_into_net from mindspore.train.serialization import load_checkpoint, load_param_into_net


from mindarmour import BlackModel from mindarmour import BlackModel
@@ -72,7 +72,7 @@ class EncoderNet(Cell):
def __init__(self, encode_dim): def __init__(self, encode_dim):
super(EncoderNet, self).__init__() super(EncoderNet, self).__init__()
self._encode_dim = encode_dim self._encode_dim = encode_dim
self.add = TensorAdd()
self.add = Add()


def construct(self, inputs): def construct(self, inputs):
""" """


+ 3
- 3
mindarmour/privacy/diff_privacy/mechanisms/mechanisms.py View File

@@ -142,7 +142,7 @@ class NoiseMechanismsFactory:
>>> loss_fn=loss, >>> loss_fn=loss,
>>> optimizer=net_opt, >>> optimizer=net_opt,
>>> metrics=None) >>> metrics=None)
>>> ms_ds = ds.GeneratorDataset(dataset_generator(batch_size, batches),
>>> ms_ds = ds.GeneratorDataset(dataset_generator,
>>> ['data', 'label']) >>> ['data', 'label'])
>>> model.train(epochs, ms_ds, dataset_sink_mode=False) >>> model.train(epochs, ms_ds, dataset_sink_mode=False)
""" """
@@ -325,7 +325,7 @@ class _MechanismsParamsUpdater(Cell):
self._init_noise_multiplier = init_noise_multiplier self._init_noise_multiplier = init_noise_multiplier


self._div = P.Div() self._div = P.Div()
self._add = P.TensorAdd()
self._add = P.Add()
self._assign = P.Assign() self._assign = P.Assign()
self._sub = P.Sub() self._sub = P.Sub()
self._one = Tensor(1, mstype.float32) self._one = Tensor(1, mstype.float32)
@@ -414,7 +414,7 @@ class AdaClippingWithGaussianRandom(Cell):
mstype.float32) mstype.float32)


self._zero = Tensor(0, mstype.float32) self._zero = Tensor(0, mstype.float32)
self._add = P.TensorAdd()
self._add = P.Add()
self._sub = P.Sub() self._sub = P.Sub()
self._mul = P.Mul() self._mul = P.Mul()
self._exp = P.Exp() self._exp = P.Exp()


+ 1
- 1
mindarmour/privacy/diff_privacy/optimizer/optimizer.py View File

@@ -42,7 +42,7 @@ def tensor_grad_scale(scale, grad):
class _TupleAdd(nn.Cell): class _TupleAdd(nn.Cell):
def __init__(self): def __init__(self):
super(_TupleAdd, self).__init__() super(_TupleAdd, self).__init__()
self.add = P.TensorAdd()
self.add = P.Add()
self.hyper_map = C.HyperMap() self.hyper_map = C.HyperMap()


def construct(self, input1, input2): def construct(self, input1, input2):


+ 7
- 7
mindarmour/privacy/diff_privacy/train/model.py View File

@@ -112,7 +112,7 @@ class DPModel(Model):
>>> loss_fn=loss, >>> loss_fn=loss,
>>> optimizer=net_opt, >>> optimizer=net_opt,
>>> metrics=None) >>> metrics=None)
>>> ms_ds = ds.GeneratorDataset(dataset_generator(batch_size, batches),
>>> ms_ds = ds.GeneratorDataset(dataset_generator,
>>> ['data', 'label']) >>> ['data', 'label'])
>>> model.train(epochs, ms_ds, dataset_sink_mode=False) >>> model.train(epochs, ms_ds, dataset_sink_mode=False)
""" """
@@ -133,7 +133,7 @@ class DPModel(Model):
opt_name = opt.__class__.__name__ opt_name = opt.__class__.__name__
# Check whether noise_mech and DPOptimizer are both None or not None, if so, raise ValueError. # Check whether noise_mech and DPOptimizer are both None or not None, if so, raise ValueError.
# And check whether noise_mech or DPOtimizer's mech method is adaptive while clip_mech is not None, # And check whether noise_mech or DPOtimizer's mech method is adaptive while clip_mech is not None,
# if so, rasie ValuerError too.
# if so, raise ValuerError too.
if noise_mech is not None and "DPOptimizer" in opt_name: if noise_mech is not None and "DPOptimizer" in opt_name:
msg = 'DPOptimizer is not supported while noise_mech is not None' msg = 'DPOptimizer is not supported while noise_mech is not None'
LOGGER.error(TAG, msg) LOGGER.error(TAG, msg)
@@ -323,7 +323,7 @@ class _ClipGradients(nn.Cell):
class _TupleAdd(nn.Cell): class _TupleAdd(nn.Cell):
def __init__(self): def __init__(self):
super(_TupleAdd, self).__init__() super(_TupleAdd, self).__init__()
self.add = P.TensorAdd()
self.add = P.Add()
self.hyper_map = C.HyperMap() self.hyper_map = C.HyperMap()


def construct(self, input1, input2): def construct(self, input1, input2):
@@ -422,7 +422,7 @@ class _TrainOneStepWithLossScaleCell(Cell):
self._clip_by_global_norm = _ClipGradients() self._clip_by_global_norm = _ClipGradients()
self._noise_mech = noise_mech self._noise_mech = noise_mech
self._clip_mech = clip_mech self._clip_mech = clip_mech
self._add = P.TensorAdd()
self._add = P.Add()
self._norm = nn.Norm() self._norm = nn.Norm()
self._tuple_add = _TupleAdd() self._tuple_add = _TupleAdd()
self._hyper_map = C.HyperMap() self._hyper_map = C.HyperMap()
@@ -508,7 +508,7 @@ class _TrainOneStepWithLossScaleCell(Cell):
GRADIENT_CLIP_TYPE, GRADIENT_CLIP_TYPE,
self._norm_bound) self._norm_bound)
grads = self._tuple_add(grads, record_grad) grads = self._tuple_add(grads, record_grad)
total_loss = P.TensorAdd()(total_loss, loss)
total_loss = P.Add()(total_loss, loss)
loss = P.Div()(total_loss, self._micro_float) loss = P.Div()(total_loss, self._micro_float)
beta = self._div(beta, self._micro_batches) beta = self._div(beta, self._micro_batches)


@@ -626,7 +626,7 @@ class _TrainOneStepCell(Cell):
self._noise_mech = noise_mech self._noise_mech = noise_mech
self._clip_mech = clip_mech self._clip_mech = clip_mech
self._tuple_add = _TupleAdd() self._tuple_add = _TupleAdd()
self._add = P.TensorAdd()
self._add = P.Add()
self._norm = nn.Norm() self._norm = nn.Norm()
self._hyper_map = C.HyperMap() self._hyper_map = C.HyperMap()
self._zero = Tensor(0, mstype.float32) self._zero = Tensor(0, mstype.float32)
@@ -698,7 +698,7 @@ class _TrainOneStepCell(Cell):
GRADIENT_CLIP_TYPE, GRADIENT_CLIP_TYPE,
self._norm_bound) self._norm_bound)
grads = self._tuple_add(grads, record_grad) grads = self._tuple_add(grads, record_grad)
total_loss = P.TensorAdd()(total_loss, loss)
total_loss = P.Add()(total_loss, loss)
loss = self._div(total_loss, self._micro_float) loss = self._div(total_loss, self._micro_float)


if self._noise_mech is not None: if self._noise_mech is not None:


+ 1
- 1
mindarmour/privacy/sup_privacy/train/model.py View File

@@ -206,7 +206,7 @@ class _TupleAdd(nn.Cell):
""" """
def __init__(self): def __init__(self):
super(_TupleAdd, self).__init__() super(_TupleAdd, self).__init__()
self.add = P.TensorAdd()
self.add = P.Add()
self.hyper_map = C.HyperMap() self.hyper_map = C.HyperMap()


def construct(self, input1, input2): def construct(self, input1, input2):


+ 0
- 0
tests/__init__.py View File


+ 2
- 2
tests/st/resnet50/resnet_cifar10.py View File

@@ -121,7 +121,7 @@ class ResidualBlock(nn.Cell):
self.bn3 = bn_with_initialize_last(out_channels) self.bn3 = bn_with_initialize_last(out_channels)


self.relu = P.ReLU() self.relu = P.ReLU()
self.add = P.TensorAdd()
self.add = P.Add()


def construct(self, x): def construct(self, x):
identity = x identity = x
@@ -168,7 +168,7 @@ class ResidualBlockWithDown(nn.Cell):


self.conv_down_sample = conv1x1(in_channels, out_channels, stride=stride, padding=0) self.conv_down_sample = conv1x1(in_channels, out_channels, stride=stride, padding=0)
self.bn_down_sample = bn_with_initialize(out_channels) self.bn_down_sample = bn_with_initialize(out_channels)
self.add = P.TensorAdd()
self.add = P.Add()


def construct(self, x): def construct(self, x):
identity = x identity = x


+ 1
- 1
tests/ut/python/adv_robustness/attacks/black/test_hsja.py View File

@@ -23,7 +23,7 @@ from mindarmour import BlackModel
from mindarmour.adv_robustness.attacks import HopSkipJumpAttack from mindarmour.adv_robustness.attacks import HopSkipJumpAttack
from mindarmour.utils.logger import LogUtil from mindarmour.utils.logger import LogUtil


from ut.python.utils.mock_net import Net
from tests.ut.python.utils.mock_net import Net


context.set_context(mode=context.GRAPH_MODE) context.set_context(mode=context.GRAPH_MODE)
context.set_context(device_target="Ascend") context.set_context(device_target="Ascend")


+ 1
- 1
tests/ut/python/adv_robustness/attacks/black/test_nes.py View File

@@ -23,7 +23,7 @@ from mindarmour import BlackModel
from mindarmour.adv_robustness.attacks import NES from mindarmour.adv_robustness.attacks import NES
from mindarmour.utils.logger import LogUtil from mindarmour.utils.logger import LogUtil


from ut.python.utils.mock_net import Net
from tests.ut.python.utils.mock_net import Net


context.set_context(mode=context.GRAPH_MODE) context.set_context(mode=context.GRAPH_MODE)
context.set_context(device_target="Ascend") context.set_context(device_target="Ascend")


+ 1
- 1
tests/ut/python/adv_robustness/attacks/black/test_pointwise_attack.py View File

@@ -26,7 +26,7 @@ from mindarmour import BlackModel
from mindarmour.adv_robustness.attacks import PointWiseAttack from mindarmour.adv_robustness.attacks import PointWiseAttack
from mindarmour.utils.logger import LogUtil from mindarmour.utils.logger import LogUtil


from ut.python.utils.mock_net import Net
from tests.ut.python.utils.mock_net import Net


context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")




+ 1
- 1
tests/ut/python/adv_robustness/attacks/black/test_pso_attack.py View File

@@ -144,7 +144,7 @@ def test_pso_attack_targeted():




@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_gpu_inference
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_card @pytest.mark.env_card
@pytest.mark.component_mindarmour @pytest.mark.component_mindarmour
def test_pso_attack_gpu(): def test_pso_attack_gpu():


+ 1
- 1
tests/ut/python/adv_robustness/attacks/test_gradient_method.py View File

@@ -133,7 +133,7 @@ def test_fast_gradient_method():




@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_gpu_inference
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_card @pytest.mark.env_card
@pytest.mark.component_mindarmour @pytest.mark.component_mindarmour
def test_fast_gradient_method_gpu(): def test_fast_gradient_method_gpu():


+ 1
- 1
tests/ut/python/adv_robustness/attacks/test_jsma.py View File

@@ -108,7 +108,7 @@ def test_jsma_attack_2():




@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_gpu_inference
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_card @pytest.mark.env_card
@pytest.mark.component_mindarmour @pytest.mark.component_mindarmour
def test_jsma_attack_gpu(): def test_jsma_attack_gpu():


+ 1
- 1
tests/ut/python/adv_robustness/attacks/test_lbfgs.py View File

@@ -24,7 +24,7 @@ from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindarmour.adv_robustness.attacks import LBFGS from mindarmour.adv_robustness.attacks import LBFGS
from mindarmour.utils.logger import LogUtil from mindarmour.utils.logger import LogUtil


from ut.python.utils.mock_net import Net
from tests.ut.python.utils.mock_net import Net


context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")




+ 1
- 1
tests/ut/python/adv_robustness/defenses/test_ad.py View File

@@ -26,7 +26,7 @@ from mindspore.nn.optim.momentum import Momentum
from mindarmour.adv_robustness.defenses import AdversarialDefense from mindarmour.adv_robustness.defenses import AdversarialDefense
from mindarmour.utils.logger import LogUtil from mindarmour.utils.logger import LogUtil


from ut.python.utils.mock_net import Net
from tests.ut.python.utils.mock_net import Net


LOGGER = LogUtil.get_instance() LOGGER = LogUtil.get_instance()
TAG = 'Ad_Test' TAG = 'Ad_Test'


+ 1
- 1
tests/ut/python/adv_robustness/defenses/test_ead.py View File

@@ -28,7 +28,7 @@ from mindarmour.adv_robustness.attacks import \
from mindarmour.adv_robustness.defenses import EnsembleAdversarialDefense from mindarmour.adv_robustness.defenses import EnsembleAdversarialDefense
from mindarmour.utils.logger import LogUtil from mindarmour.utils.logger import LogUtil


from ut.python.utils.mock_net import Net
from tests.ut.python.utils.mock_net import Net


LOGGER = LogUtil.get_instance() LOGGER = LogUtil.get_instance()
TAG = 'Ead_Test' TAG = 'Ead_Test'


+ 1
- 1
tests/ut/python/adv_robustness/defenses/test_nad.py View File

@@ -25,7 +25,7 @@ from mindspore.nn.optim.momentum import Momentum
from mindarmour.adv_robustness.defenses import NaturalAdversarialDefense from mindarmour.adv_robustness.defenses import NaturalAdversarialDefense
from mindarmour.utils.logger import LogUtil from mindarmour.utils.logger import LogUtil


from ut.python.utils.mock_net import Net
from tests.ut.python.utils.mock_net import Net


LOGGER = LogUtil.get_instance() LOGGER = LogUtil.get_instance()
TAG = 'Nad_Test' TAG = 'Nad_Test'


+ 1
- 1
tests/ut/python/adv_robustness/defenses/test_pad.py View File

@@ -25,7 +25,7 @@ from mindspore.nn.optim.momentum import Momentum
from mindarmour.adv_robustness.defenses import ProjectedAdversarialDefense from mindarmour.adv_robustness.defenses import ProjectedAdversarialDefense
from mindarmour.utils.logger import LogUtil from mindarmour.utils.logger import LogUtil


from ut.python.utils.mock_net import Net
from tests.ut.python.utils.mock_net import Net


LOGGER = LogUtil.get_instance() LOGGER = LogUtil.get_instance()
TAG = 'Pad_Test' TAG = 'Pad_Test'


+ 2
- 2
tests/ut/python/adv_robustness/detectors/black/test_similarity_detector.py View File

@@ -20,7 +20,7 @@ import pytest
from mindspore.nn import Cell from mindspore.nn import Cell
from mindspore import Model from mindspore import Model
from mindspore import context from mindspore import context
from mindspore.ops.operations import TensorAdd
from mindspore.ops.operations import Add
from mindarmour.adv_robustness.detectors import SimilarityDetector from mindarmour.adv_robustness.detectors import SimilarityDetector
@@ -35,7 +35,7 @@ class EncoderNet(Cell):
def __init__(self, encode_dim): def __init__(self, encode_dim):
super(EncoderNet, self).__init__() super(EncoderNet, self).__init__()
self._encode_dim = encode_dim self._encode_dim = encode_dim
self.add = TensorAdd()
self.add = Add()
def construct(self, inputs): def construct(self, inputs):
""" """


+ 3
- 3
tests/ut/python/adv_robustness/detectors/test_ensemble_detector.py View File

@@ -18,7 +18,7 @@ import numpy as np
import pytest import pytest


from mindspore.nn import Cell from mindspore.nn import Cell
from mindspore.ops.operations import TensorAdd
from mindspore.ops.operations import Add
from mindspore.train.model import Model from mindspore.train.model import Model
from mindspore import context from mindspore import context


@@ -35,7 +35,7 @@ class Net(Cell):
""" """
def __init__(self): def __init__(self):
super(Net, self).__init__() super(Net, self).__init__()
self.add = TensorAdd()
self.add = Add()


def construct(self, inputs): def construct(self, inputs):
""" """
@@ -53,7 +53,7 @@ class AutoNet(Cell):
""" """
def __init__(self): def __init__(self):
super(AutoNet, self).__init__() super(AutoNet, self).__init__()
self.add = TensorAdd()
self.add = Add()


def construct(self, inputs): def construct(self, inputs):
""" """


+ 2
- 2
tests/ut/python/adv_robustness/detectors/test_mag_net.py View File

@@ -19,7 +19,7 @@ import pytest


import mindspore.ops.operations as P import mindspore.ops.operations as P
from mindspore.nn import Cell from mindspore.nn import Cell
from mindspore.ops.operations import TensorAdd
from mindspore.ops.operations import Add
from mindspore import Model from mindspore import Model
from mindspore import context from mindspore import context


@@ -36,7 +36,7 @@ class Net(Cell):


def __init__(self): def __init__(self):
super(Net, self).__init__() super(Net, self).__init__()
self.add = TensorAdd()
self.add = Add()


def construct(self, inputs): def construct(self, inputs):
""" """


+ 2
- 2
tests/ut/python/adv_robustness/detectors/test_region_based_detector.py View File

@@ -20,7 +20,7 @@ import pytest
from mindspore.nn import Cell from mindspore.nn import Cell
from mindspore import Model from mindspore import Model
from mindspore import context from mindspore import context
from mindspore.ops.operations import TensorAdd
from mindspore.ops.operations import Add


from mindarmour.adv_robustness.detectors import RegionBasedDetector from mindarmour.adv_robustness.detectors import RegionBasedDetector


@@ -34,7 +34,7 @@ class Net(Cell):
""" """
def __init__(self): def __init__(self):
super(Net, self).__init__() super(Net, self).__init__()
self.add = TensorAdd()
self.add = Add()


def construct(self, inputs): def construct(self, inputs):
""" """


+ 7
- 11
tests/ut/python/privacy/diff_privacy/test_model_train.py View File

@@ -26,11 +26,13 @@ from mindarmour.privacy.diff_privacy import NoiseMechanismsFactory
from mindarmour.privacy.diff_privacy import ClipMechanismsFactory from mindarmour.privacy.diff_privacy import ClipMechanismsFactory
from mindarmour.privacy.diff_privacy import DPOptimizerClassFactory from mindarmour.privacy.diff_privacy import DPOptimizerClassFactory


from ut.python.utils.mock_net import Net
from tests.ut.python.utils.mock_net import Net




def dataset_generator(batch_size, batches):
def dataset_generator():
"""mock training data.""" """mock training data."""
batch_size = 32
batches = 128
data = np.random.random((batches*batch_size, 1, 32, 32)).astype( data = np.random.random((batches*batch_size, 1, 32, 32)).astype(
np.float32) np.float32)
label = np.random.randint(0, 10, batches*batch_size).astype(np.int32) label = np.random.randint(0, 10, batches*batch_size).astype(np.int32)
@@ -49,8 +51,6 @@ def test_dp_model_with_pynative_mode():
norm_bound = 1.0 norm_bound = 1.0
initial_noise_multiplier = 0.01 initial_noise_multiplier = 0.01
network = Net() network = Net()
batch_size = 32
batches = 128
epochs = 1 epochs = 1
micro_batches = 2 micro_batches = 2
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
@@ -73,7 +73,7 @@ def test_dp_model_with_pynative_mode():
loss_fn=loss, loss_fn=loss,
optimizer=net_opt, optimizer=net_opt,
metrics=None) metrics=None)
ms_ds = ds.GeneratorDataset(dataset_generator(batch_size, batches),
ms_ds = ds.GeneratorDataset(dataset_generator,
['data', 'label']) ['data', 'label'])
model.train(epochs, ms_ds, dataset_sink_mode=False) model.train(epochs, ms_ds, dataset_sink_mode=False)


@@ -88,8 +88,6 @@ def test_dp_model_with_graph_mode():
norm_bound = 1.0 norm_bound = 1.0
initial_noise_multiplier = 0.01 initial_noise_multiplier = 0.01
network = Net() network = Net()
batch_size = 32
batches = 128
epochs = 1 epochs = 1
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
noise_mech = NoiseMechanismsFactory().create('Gaussian', noise_mech = NoiseMechanismsFactory().create('Gaussian',
@@ -110,7 +108,7 @@ def test_dp_model_with_graph_mode():
loss_fn=loss, loss_fn=loss,
optimizer=net_opt, optimizer=net_opt,
metrics=None) metrics=None)
ms_ds = ds.GeneratorDataset(dataset_generator(batch_size, batches),
ms_ds = ds.GeneratorDataset(dataset_generator,
['data', 'label']) ['data', 'label'])
model.train(epochs, ms_ds, dataset_sink_mode=False) model.train(epochs, ms_ds, dataset_sink_mode=False)


@@ -125,8 +123,6 @@ def test_dp_model_with_graph_mode_ada_gaussian():
norm_bound = 1.0 norm_bound = 1.0
initial_noise_multiplier = 0.01 initial_noise_multiplier = 0.01
network = Net() network = Net()
batch_size = 32
batches = 128
epochs = 1 epochs = 1
alpha = 0.8 alpha = 0.8
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
@@ -146,6 +142,6 @@ def test_dp_model_with_graph_mode_ada_gaussian():
loss_fn=loss, loss_fn=loss,
optimizer=net_opt, optimizer=net_opt,
metrics=None) metrics=None)
ms_ds = ds.GeneratorDataset(dataset_generator(batch_size, batches),
ms_ds = ds.GeneratorDataset(dataset_generator,
['data', 'label']) ['data', 'label'])
model.train(epochs, ms_ds, dataset_sink_mode=False) model.train(epochs, ms_ds, dataset_sink_mode=False)

+ 13
- 16
tests/ut/python/privacy/diff_privacy/test_monitor.py View File

@@ -25,13 +25,16 @@ import mindspore.context as context
from mindarmour.privacy.diff_privacy import PrivacyMonitorFactory from mindarmour.privacy.diff_privacy import PrivacyMonitorFactory
from mindarmour.utils.logger import LogUtil from mindarmour.utils.logger import LogUtil


from ut.python.utils.mock_net import Net
from tests.ut.python.utils.mock_net import Net


LOGGER = LogUtil.get_instance() LOGGER = LogUtil.get_instance()
TAG = 'DP-Monitor Test' TAG = 'DP-Monitor Test'




def dataset_generator(batch_size, batches):
def dataset_generator():
batch_size = 16
batches = 128

data = np.random.random((batches * batch_size, 1, 32, 32)).astype( data = np.random.random((batches * batch_size, 1, 32, 32)).astype(
np.float32) np.float32)
label = np.random.randint(0, 10, batches * batch_size).astype(np.int32) label = np.random.randint(0, 10, batches * batch_size).astype(np.int32)
@@ -48,7 +51,6 @@ def dataset_generator(batch_size, batches):
def test_dp_monitor(): def test_dp_monitor():
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
batch_size = 16 batch_size = 16
batches = 128
epochs = 1 epochs = 1
rdp = PrivacyMonitorFactory.create(policy='rdp', num_samples=60000, rdp = PrivacyMonitorFactory.create(policy='rdp', num_samples=60000,
batch_size=batch_size, batch_size=batch_size,
@@ -64,19 +66,18 @@ def test_dp_monitor():
model = Model(network, net_loss, net_opt) model = Model(network, net_loss, net_opt)


LOGGER.info(TAG, "============== Starting Training ==============") LOGGER.info(TAG, "============== Starting Training ==============")
ds1 = ds.GeneratorDataset(dataset_generator(batch_size, batches),
ds1 = ds.GeneratorDataset(dataset_generator,
["data", "label"]) ["data", "label"])
model.train(epochs, ds1, callbacks=[rdp], dataset_sink_mode=False) model.train(epochs, ds1, callbacks=[rdp], dataset_sink_mode=False)




@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_gpu_inference
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_card @pytest.mark.env_card
@pytest.mark.component_mindarmour @pytest.mark.component_mindarmour
def test_dp_monitor_gpu(): def test_dp_monitor_gpu():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU") context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
batch_size = 16 batch_size = 16
batches = 128
epochs = 1 epochs = 1
rdp = PrivacyMonitorFactory.create(policy='rdp', num_samples=60000, rdp = PrivacyMonitorFactory.create(policy='rdp', num_samples=60000,
batch_size=batch_size, batch_size=batch_size,
@@ -92,7 +93,7 @@ def test_dp_monitor_gpu():
model = Model(network, net_loss, net_opt) model = Model(network, net_loss, net_opt)


LOGGER.info(TAG, "============== Starting Training ==============") LOGGER.info(TAG, "============== Starting Training ==============")
ds1 = ds.GeneratorDataset(dataset_generator(batch_size, batches),
ds1 = ds.GeneratorDataset(dataset_generator,
["data", "label"]) ["data", "label"])
model.train(epochs, ds1, callbacks=[rdp], dataset_sink_mode=False) model.train(epochs, ds1, callbacks=[rdp], dataset_sink_mode=False)


@@ -104,7 +105,6 @@ def test_dp_monitor_gpu():
def test_dp_monitor_cpu(): def test_dp_monitor_cpu():
context.set_context(mode=context.GRAPH_MODE, device_target="CPU") context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
batch_size = 16 batch_size = 16
batches = 128
epochs = 1 epochs = 1
rdp = PrivacyMonitorFactory.create(policy='rdp', num_samples=60000, rdp = PrivacyMonitorFactory.create(policy='rdp', num_samples=60000,
batch_size=batch_size, batch_size=batch_size,
@@ -120,7 +120,7 @@ def test_dp_monitor_cpu():
model = Model(network, net_loss, net_opt) model = Model(network, net_loss, net_opt)


LOGGER.info(TAG, "============== Starting Training ==============") LOGGER.info(TAG, "============== Starting Training ==============")
ds1 = ds.GeneratorDataset(dataset_generator(batch_size, batches),
ds1 = ds.GeneratorDataset(dataset_generator,
["data", "label"]) ["data", "label"])
model.train(epochs, ds1, callbacks=[rdp], dataset_sink_mode=False) model.train(epochs, ds1, callbacks=[rdp], dataset_sink_mode=False)


@@ -133,7 +133,6 @@ def test_dp_monitor_cpu():
def test_dp_monitor_zcdp(): def test_dp_monitor_zcdp():
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
batch_size = 16 batch_size = 16
batches = 128
epochs = 1 epochs = 1
zcdp = PrivacyMonitorFactory.create(policy='zcdp', num_samples=60000, zcdp = PrivacyMonitorFactory.create(policy='zcdp', num_samples=60000,
batch_size=batch_size, batch_size=batch_size,
@@ -149,19 +148,18 @@ def test_dp_monitor_zcdp():
model = Model(network, net_loss, net_opt) model = Model(network, net_loss, net_opt)


LOGGER.info(TAG, "============== Starting Training ==============") LOGGER.info(TAG, "============== Starting Training ==============")
ds1 = ds.GeneratorDataset(dataset_generator(batch_size, batches),
ds1 = ds.GeneratorDataset(dataset_generator,
["data", "label"]) ["data", "label"])
model.train(epochs, ds1, callbacks=[zcdp], dataset_sink_mode=False) model.train(epochs, ds1, callbacks=[zcdp], dataset_sink_mode=False)




@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_gpu_inference
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_card @pytest.mark.env_card
@pytest.mark.component_mindarmour @pytest.mark.component_mindarmour
def test_dp_monitor_zcdp_gpu(): def test_dp_monitor_zcdp_gpu():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU") context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
batch_size = 16 batch_size = 16
batches = 128
epochs = 1 epochs = 1
zcdp = PrivacyMonitorFactory.create(policy='zcdp', num_samples=60000, zcdp = PrivacyMonitorFactory.create(policy='zcdp', num_samples=60000,
batch_size=batch_size, batch_size=batch_size,
@@ -177,7 +175,7 @@ def test_dp_monitor_zcdp_gpu():
model = Model(network, net_loss, net_opt) model = Model(network, net_loss, net_opt)


LOGGER.info(TAG, "============== Starting Training ==============") LOGGER.info(TAG, "============== Starting Training ==============")
ds1 = ds.GeneratorDataset(dataset_generator(batch_size, batches),
ds1 = ds.GeneratorDataset(dataset_generator,
["data", "label"]) ["data", "label"])
model.train(epochs, ds1, callbacks=[zcdp], dataset_sink_mode=False) model.train(epochs, ds1, callbacks=[zcdp], dataset_sink_mode=False)


@@ -189,7 +187,6 @@ def test_dp_monitor_zcdp_gpu():
def test_dp_monitor_zcdp_cpu(): def test_dp_monitor_zcdp_cpu():
context.set_context(mode=context.GRAPH_MODE, device_target="CPU") context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
batch_size = 16 batch_size = 16
batches = 128
epochs = 1 epochs = 1
zcdp = PrivacyMonitorFactory.create(policy='zcdp', num_samples=60000, zcdp = PrivacyMonitorFactory.create(policy='zcdp', num_samples=60000,
batch_size=batch_size, batch_size=batch_size,
@@ -205,6 +202,6 @@ def test_dp_monitor_zcdp_cpu():
model = Model(network, net_loss, net_opt) model = Model(network, net_loss, net_opt)


LOGGER.info(TAG, "============== Starting Training ==============") LOGGER.info(TAG, "============== Starting Training ==============")
ds1 = ds.GeneratorDataset(dataset_generator(batch_size, batches),
ds1 = ds.GeneratorDataset(dataset_generator,
["data", "label"]) ["data", "label"])
model.train(epochs, ds1, callbacks=[zcdp], dataset_sink_mode=False) model.train(epochs, ds1, callbacks=[zcdp], dataset_sink_mode=False)

+ 2
- 2
tests/ut/python/privacy/diff_privacy/test_optimizer.py View File

@@ -19,7 +19,7 @@ from mindspore.train.model import Model


from mindarmour.privacy.diff_privacy import DPOptimizerClassFactory from mindarmour.privacy.diff_privacy import DPOptimizerClassFactory


from ut.python.utils.mock_net import Net
from tests.ut.python.utils.mock_net import Net




@pytest.mark.level0 @pytest.mark.level0
@@ -42,7 +42,7 @@ def test_optimizer():




@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_gpu_inference
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_card @pytest.mark.env_card
@pytest.mark.component_mindarmour @pytest.mark.component_mindarmour
def test_optimizer_gpu(): def test_optimizer_gpu():


+ 1
- 1
tests/ut/python/privacy/evaluation/test_inversion_attack.py View File

@@ -22,7 +22,7 @@ import mindspore.context as context


from mindarmour.privacy.evaluation.inversion_attack import ImageInversionAttack from mindarmour.privacy.evaluation.inversion_attack import ImageInversionAttack


from ut.python.utils.mock_net import Net
from tests.ut.python.utils.mock_net import Net




context.set_context(mode=context.GRAPH_MODE) context.set_context(mode=context.GRAPH_MODE)


+ 8
- 10
tests/ut/python/privacy/evaluation/test_membership_inference.py View File

@@ -25,14 +25,16 @@ import mindspore.context as context


from mindarmour.privacy.evaluation import MembershipInference from mindarmour.privacy.evaluation import MembershipInference


from ut.python.utils.mock_net import Net
from tests.ut.python.utils.mock_net import Net




context.set_context(mode=context.GRAPH_MODE) context.set_context(mode=context.GRAPH_MODE)




def dataset_generator(batch_size, batches):
def dataset_generator():
"""mock training data.""" """mock training data."""
batch_size = 16
batches = 1
data = np.random.randn(batches*batch_size, 1, 32, 32).astype( data = np.random.randn(batches*batch_size, 1, 32, 32).astype(
np.float32) np.float32)
label = np.random.randint(0, 10, batches*batch_size).astype(np.int32) label = np.random.randint(0, 10, batches*batch_size).astype(np.int32)
@@ -74,11 +76,9 @@ def test_membership_inference_object_train():
"n_neighbors": [3, 5, 7], "n_neighbors": [3, 5, 7],
} }
}] }]
batch_size = 16
batches = 1
ds_train = ds.GeneratorDataset(dataset_generator(batch_size, batches),
ds_train = ds.GeneratorDataset(dataset_generator,
["image", "label"]) ["image", "label"])
ds_test = ds.GeneratorDataset(dataset_generator(batch_size, batches),
ds_test = ds.GeneratorDataset(dataset_generator,
["image", "label"]) ["image", "label"])
inference_model.train(ds_train, ds_test, config) inference_model.train(ds_train, ds_test, config)


@@ -96,11 +96,9 @@ def test_membership_inference_eval():
inference_model = MembershipInference(model, -1) inference_model = MembershipInference(model, -1)
assert isinstance(inference_model, MembershipInference) assert isinstance(inference_model, MembershipInference)


batch_size = 16
batches = 1
eval_train = ds.GeneratorDataset(dataset_generator(batch_size, batches),
eval_train = ds.GeneratorDataset(dataset_generator,
["image", "label"]) ["image", "label"])
eval_test = ds.GeneratorDataset(dataset_generator(batch_size, batches),
eval_test = ds.GeneratorDataset(dataset_generator,
["image", "label"]) ["image", "label"])


metrics = ["precision", "accuracy", "recall"] metrics = ["precision", "accuracy", "recall"]


+ 7
- 5
tests/ut/python/privacy/sup_privacy/test_model_train.py View File

@@ -25,15 +25,18 @@ from mindspore.train.callback import LossMonitor
from mindspore.nn.metrics import Accuracy from mindspore.nn.metrics import Accuracy
import mindspore.dataset as ds import mindspore.dataset as ds


from ut.python.utils.mock_net import Net as LeNet5

from mindarmour.privacy.sup_privacy import SuppressModel from mindarmour.privacy.sup_privacy import SuppressModel
from mindarmour.privacy.sup_privacy import SuppressMasker from mindarmour.privacy.sup_privacy import SuppressMasker
from mindarmour.privacy.sup_privacy import SuppressPrivacyFactory from mindarmour.privacy.sup_privacy import SuppressPrivacyFactory
from mindarmour.privacy.sup_privacy import MaskLayerDes from mindarmour.privacy.sup_privacy import MaskLayerDes


def dataset_generator(batch_size, batches):
from tests.ut.python.utils.mock_net import Net as LeNet5


def dataset_generator():
"""mock training data.""" """mock training data."""
batches = 10
batch_size = 32
data = np.random.random((batches*batch_size, 1, 32, 32)).astype( data = np.random.random((batches*batch_size, 1, 32, 32)).astype(
np.float32) np.float32)
label = np.random.randint(0, 10, batches*batch_size).astype(np.int32) label = np.random.randint(0, 10, batches*batch_size).astype(np.int32)
@@ -51,7 +54,6 @@ def test_suppress_model_with_pynative_mode():
networks_l5 = LeNet5() networks_l5 = LeNet5()
epochs = 5 epochs = 5
batch_num = 10 batch_num = 10
batch_size = 32
mask_times = 10 mask_times = 10
lr = 0.01 lr = 0.01
masklayers_lenet5 = [] masklayers_lenet5 = []
@@ -79,7 +81,7 @@ def test_suppress_model_with_pynative_mode():
ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet",
directory="./trained_ckpt_file/", directory="./trained_ckpt_file/",
config=config_ck) config=config_ck)
ds_train = ds.GeneratorDataset(dataset_generator(batch_size, batch_num), ['data', 'label'])
ds_train = ds.GeneratorDataset(dataset_generator, ['data', 'label'])


model_instance.train(epochs, ds_train, callbacks=[ckpoint_cb, LossMonitor(), suppress_masker], model_instance.train(epochs, ds_train, callbacks=[ckpoint_cb, LossMonitor(), suppress_masker],
dataset_sink_mode=False) dataset_sink_mode=False)

Loading…
Cancel
Save