You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_model_train.py 3.8 kB

5 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """
  15. DP-Model test.
  16. """
  17. import pytest
  18. import numpy as np
  19. from mindspore import nn
  20. from mindspore import context
  21. import mindspore.dataset as ds
  22. from mindarmour.diff_privacy import DPModel
  23. from mindarmour.diff_privacy import MechanismsFactory
  24. from mindarmour.diff_privacy import DPOptimizerClassFactory
  25. from test_network import LeNet5
  26. def dataset_generator(batch_size, batches):
  27. """mock training data."""
  28. data = np.random.random((batches * batch_size, 1, 32, 32)).astype(np.float32)
  29. label = np.random.randint(0, 10, batches * batch_size).astype(np.int32)
  30. for i in range(batches):
  31. yield data[i * batch_size:(i + 1) * batch_size], label[i * batch_size:(i + 1) * batch_size]
  32. @pytest.mark.level0
  33. @pytest.mark.platform_arm_ascend_training
  34. @pytest.mark.platform_x86_ascend_training
  35. @pytest.mark.env_card
  36. @pytest.mark.component_mindarmour
  37. def test_dp_model_pynative_mode():
  38. context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
  39. norm_clip = 1.0
  40. initial_noise_multiplier = 0.01
  41. network = LeNet5()
  42. batch_size = 32
  43. batches = 128
  44. epochs = 1
  45. micro_batches = 2
  46. loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
  47. factory_opt = DPOptimizerClassFactory(micro_batches=micro_batches)
  48. factory_opt.set_mechanisms('Gaussian',
  49. norm_bound=norm_clip,
  50. initial_noise_multiplier=initial_noise_multiplier)
  51. net_opt = factory_opt.create('Momentum')(network.trainable_params(), learning_rate=0.1, momentum=0.9)
  52. model = DPModel(micro_batches=micro_batches,
  53. norm_clip=norm_clip,
  54. mech=None,
  55. network=network,
  56. loss_fn=loss,
  57. optimizer=net_opt,
  58. metrics=None)
  59. ms_ds = ds.GeneratorDataset(dataset_generator(batch_size, batches), ['data', 'label'])
  60. ms_ds.set_dataset_size(batch_size * batches)
  61. model.train(epochs, ms_ds, dataset_sink_mode=False)
  62. @pytest.mark.level0
  63. @pytest.mark.platform_arm_ascend_training
  64. @pytest.mark.platform_x86_ascend_training
  65. @pytest.mark.env_card
  66. @pytest.mark.component_mindarmour
  67. def test_dp_model_with_graph_mode():
  68. context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
  69. norm_clip = 1.0
  70. initial_noise_multiplier = 0.01
  71. network = LeNet5()
  72. batch_size = 32
  73. batches = 128
  74. epochs = 1
  75. loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
  76. mech = MechanismsFactory().create('Gaussian',
  77. norm_bound=norm_clip,
  78. initial_noise_multiplier=initial_noise_multiplier)
  79. net_opt = nn.Momentum(network.trainable_params(), learning_rate=0.1, momentum=0.9)
  80. model = DPModel(micro_batches=2,
  81. norm_clip=norm_clip,
  82. mech=mech,
  83. network=network,
  84. loss_fn=loss,
  85. optimizer=net_opt,
  86. metrics=None)
  87. ms_ds = ds.GeneratorDataset(dataset_generator(batch_size, batches), ['data', 'label'])
  88. ms_ds.set_dataset_size(batch_size * batches)
  89. model.train(epochs, ms_ds, dataset_sink_mode=False)

MindArmour关注AI的安全和隐私问题。致力于增强模型的安全可信、保护用户的数据隐私。主要包含3个模块:对抗样本鲁棒性模块、Fuzz Testing模块、隐私保护与评估模块。 对抗样本鲁棒性模块 对抗样本鲁棒性模块用于评估模型对于对抗样本的鲁棒性,并提供模型增强方法用于增强模型抗对抗样本攻击的能力,提升模型鲁棒性。对抗样本鲁棒性模块包含了4个子模块:对抗样本的生成、对抗样本的检测、模型防御、攻防评估。