You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_inversion_attack.py 2.3 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061
  1. # Copyright 2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """
  15. Inversion attack test
  16. """
  17. import pytest
  18. import numpy as np
  19. import mindspore.context as context
  20. from mindarmour.privacy.evaluation.inversion_attack import ImageInversionAttack
  21. from tests.ut.python.utils.mock_net import Net
  22. context.set_context(mode=context.GRAPH_MODE)
  23. @pytest.mark.level0
  24. @pytest.mark.platform_x86_ascend_training
  25. @pytest.mark.platform_arm_ascend_training
  26. @pytest.mark.env_onecard
  27. @pytest.mark.component_mindarmour
  28. def test_inversion_attack():
  29. net = Net()
  30. original_images = np.random.random((2, 1, 32, 32)).astype(np.float32)
  31. target_features = np.random.random((2, 10)).astype(np.float32)
  32. inversion_attack = ImageInversionAttack(net, input_shape=(1, 32, 32), input_bound=(0, 1), loss_weights=[1, 0.2, 5])
  33. inversion_images = inversion_attack.generate(target_features, iters=10)
  34. avg_ssim = inversion_attack.evaluate(original_images, inversion_images)
  35. assert 0 < avg_ssim[1] < 1
  36. assert target_features.shape[0] == inversion_images.shape[0]
  37. @pytest.mark.level0
  38. @pytest.mark.platform_x86_ascend_training
  39. @pytest.mark.platform_arm_ascend_training
  40. @pytest.mark.env_onecard
  41. @pytest.mark.component_mindarmour
  42. def test_inversion_attack2():
  43. net = Net()
  44. original_images = np.random.random((2, 1, 32, 32)).astype(np.float32)
  45. target_features = np.random.random((2, 10)).astype(np.float32)
  46. inversion_attack = ImageInversionAttack(net, input_shape=(1, 32, 32), input_bound=(0, 1), loss_weights=[1, 0.2, 5])
  47. inversion_images = inversion_attack.generate(target_features, iters=10)
  48. true_labels = np.array([1, 2])
  49. new_net = Net()
  50. indexes = inversion_attack.evaluate(original_images, inversion_images, true_labels, new_net)
  51. assert len(indexes) == 3

MindArmour关注AI的安全和隐私问题。致力于增强模型的安全可信、保护用户的数据隐私。主要包含3个模块:对抗样本鲁棒性模块、Fuzz Testing模块、隐私保护与评估模块。 对抗样本鲁棒性模块 对抗样本鲁棒性模块用于评估模型对于对抗样本的鲁棒性,并提供模型增强方法用于增强模型抗对抗样本攻击的能力,提升模型鲁棒性。对抗样本鲁棒性模块包含了4个子模块:对抗样本的生成、对抗样本的检测、模型防御、攻防评估。