You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_lbfgs.py 3.9 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109
  1. # Copyright 2019 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """
  15. LBFGS-Attack test.
  16. """
  17. import os
  18. import numpy as np
  19. import pytest
  20. from mindspore import context
  21. from mindspore.train.serialization import load_checkpoint, load_param_into_net
  22. from mindarmour.adv_robustness.attacks import LBFGS
  23. from mindarmour.utils.logger import LogUtil
  24. from tests.ut.python.utils.mock_net import Net
  25. LOGGER = LogUtil.get_instance()
  26. TAG = 'LBFGS_Test'
  27. LOGGER.set_level('DEBUG')
  28. @pytest.mark.level0
  29. @pytest.mark.platform_arm_ascend_training
  30. @pytest.mark.platform_x86_ascend_training
  31. @pytest.mark.env_card
  32. @pytest.mark.component_mindarmour
  33. def test_lbfgs_attack_ascend():
  34. """
  35. Feature: LBFGS-Attack testfor ascend
  36. Description: make sure that attack.generate works properly
  37. Expectation: attack.generate works properly
  38. """
  39. context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
  40. np.random.seed(123)
  41. # upload trained network
  42. current_dir = os.path.dirname(os.path.abspath(__file__))
  43. ckpt_path = os.path.join(current_dir,
  44. '../../dataset/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt')
  45. net = Net()
  46. load_dict = load_checkpoint(ckpt_path)
  47. load_param_into_net(net, load_dict)
  48. # get one mnist image
  49. input_np = np.load(os.path.join(current_dir,
  50. '../../dataset/test_images.npy'))[:1]
  51. label_np = np.load(os.path.join(current_dir,
  52. '../../dataset/test_labels.npy'))[:1]
  53. LOGGER.debug(TAG, 'true label is :{}'.format(label_np[0]))
  54. classes = 10
  55. target_np = np.random.randint(0, classes, 1)
  56. while target_np == label_np[0]:
  57. target_np = np.random.randint(0, classes)
  58. target_np = np.eye(10)[target_np].astype(np.float32)
  59. attack = LBFGS(net, is_targeted=True)
  60. LOGGER.debug(TAG, 'target_np is :{}'.format(target_np[0]))
  61. _ = attack.generate(input_np, target_np)
  62. @pytest.mark.level0
  63. @pytest.mark.platform_x86_cpu
  64. @pytest.mark.env_card
  65. @pytest.mark.component_mindarmour
  66. def test_lbfgs_attack_cpu():
  67. """
  68. Feature: LBFGS-Attack testfor cpu
  69. Description: make sure that attack.generate works properly
  70. Expectation: attack.generate works properly
  71. """
  72. context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
  73. np.random.seed(123)
  74. # upload trained network
  75. current_dir = os.path.dirname(os.path.abspath(__file__))
  76. ckpt_path = os.path.join(current_dir,
  77. '../../dataset/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt')
  78. net = Net()
  79. load_dict = load_checkpoint(ckpt_path)
  80. load_param_into_net(net, load_dict)
  81. # get one mnist image
  82. input_np = np.load(os.path.join(current_dir,
  83. '../../dataset/test_images.npy'))[:1]
  84. label_np = np.load(os.path.join(current_dir,
  85. '../../dataset/test_labels.npy'))[:1]
  86. LOGGER.debug(TAG, 'true label is :{}'.format(label_np[0]))
  87. classes = 10
  88. target_np = np.random.randint(0, classes, 1)
  89. while target_np == label_np[0]:
  90. target_np = np.random.randint(0, classes)
  91. target_np = np.eye(10)[target_np].astype(np.float32)
  92. attack = LBFGS(net, is_targeted=True)
  93. LOGGER.debug(TAG, 'target_np is :{}'.format(target_np[0]))
  94. _ = attack.generate(input_np, target_np)

MindArmour关注AI的安全和隐私问题。致力于增强模型的安全可信、保护用户的数据隐私。主要包含3个模块:对抗样本鲁棒性模块、Fuzz Testing模块、隐私保护与评估模块。 对抗样本鲁棒性模块 对抗样本鲁棒性模块用于评估模型对于对抗样本的鲁棒性,并提供模型增强方法用于增强模型抗对抗样本攻击的能力,提升模型鲁棒性。对抗样本鲁棒性模块包含了4个子模块:对抗样本的生成、对抗样本的检测、模型防御、攻防评估。