You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_hsja.py 5.5 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165
  1. # Copyright 2019 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. import numpy as np
  16. import pytest
  17. from mindspore import Tensor
  18. from mindspore import context
  19. from mindspore.train.serialization import load_checkpoint, load_param_into_net
  20. from mindarmour import BlackModel
  21. from mindarmour.adv_robustness.attacks import HopSkipJumpAttack
  22. from mindarmour.utils.logger import LogUtil
  23. from ut.python.utils.mock_net import Net
  24. context.set_context(mode=context.GRAPH_MODE)
  25. context.set_context(device_target="Ascend")
  26. LOGGER = LogUtil.get_instance()
  27. TAG = 'HopSkipJumpAttack'
  28. class ModelToBeAttacked(BlackModel):
  29. """model to be attack"""
  30. def __init__(self, network):
  31. super(ModelToBeAttacked, self).__init__()
  32. self._network = network
  33. def predict(self, inputs):
  34. """predict"""
  35. if len(inputs.shape) == 3:
  36. inputs = inputs[np.newaxis, :]
  37. result = self._network(Tensor(inputs.astype(np.float32)))
  38. return result.asnumpy()
  39. def random_target_labels(true_labels):
  40. target_labels = []
  41. for label in true_labels:
  42. while True:
  43. target_label = np.random.randint(0, 10)
  44. if target_label != label:
  45. target_labels.append(target_label)
  46. break
  47. return target_labels
  48. def create_target_images(dataset, data_labels, target_labels):
  49. res = []
  50. for label in target_labels:
  51. for i, data_label in enumerate(data_labels):
  52. if data_label == label:
  53. res.append(dataset[i])
  54. break
  55. return np.array(res)
  56. # public variable
  57. def get_model():
  58. # upload trained network
  59. current_dir = os.path.dirname(os.path.abspath(__file__))
  60. ckpt_path = os.path.join(current_dir,
  61. '../../../dataset/trained_ckpt_file/checkpoint_lenet-10_1875.ckpt')
  62. net = Net()
  63. load_dict = load_checkpoint(ckpt_path)
  64. load_param_into_net(net, load_dict)
  65. net.set_train(False)
  66. model = ModelToBeAttacked(net)
  67. return model
  68. @pytest.mark.level0
  69. @pytest.mark.platform_arm_ascend_training
  70. @pytest.mark.platform_x86_ascend_training
  71. @pytest.mark.env_card
  72. @pytest.mark.component_mindarmour
  73. def test_hsja_mnist_attack():
  74. """
  75. hsja-Attack test
  76. """
  77. current_dir = os.path.dirname(os.path.abspath(__file__))
  78. # get test data
  79. test_images_set = np.load(os.path.join(current_dir,
  80. '../../../dataset/test_images.npy'))
  81. test_labels_set = np.load(os.path.join(current_dir,
  82. '../../../dataset/test_labels.npy'))
  83. # prediction accuracy before attack
  84. model = get_model()
  85. batch_num = 1 # the number of batches of attacking samples
  86. predict_labels = []
  87. i = 0
  88. for img in test_images_set:
  89. i += 1
  90. pred_labels = np.argmax(model.predict(img), axis=1)
  91. predict_labels.append(pred_labels)
  92. if i >= batch_num:
  93. break
  94. predict_labels = np.concatenate(predict_labels)
  95. true_labels = test_labels_set[:batch_num]
  96. accuracy = np.mean(np.equal(predict_labels, true_labels))
  97. LOGGER.info(TAG, "prediction accuracy before attacking is : %s",
  98. accuracy)
  99. test_images = test_images_set[:batch_num]
  100. # attacking
  101. norm = 'l2'
  102. search = 'grid_search'
  103. target = False
  104. attack = HopSkipJumpAttack(model, constraint=norm, stepsize_search=search)
  105. if target:
  106. target_labels = random_target_labels(true_labels)
  107. target_images = create_target_images(test_images_set, test_labels_set,
  108. target_labels)
  109. LOGGER.info(TAG, 'len target labels : %s', len(target_labels))
  110. LOGGER.info(TAG, 'len target_images : %s', len(target_images))
  111. LOGGER.info(TAG, 'len test_images : %s', len(test_images))
  112. attack.set_target_images(target_images)
  113. success_list, adv_data, _ = attack.generate(test_images, target_labels)
  114. else:
  115. success_list, adv_data, _ = attack.generate(test_images, None)
  116. assert (adv_data != test_images).any()
  117. adv_datas = []
  118. gts = []
  119. for success, adv, gt in zip(success_list, adv_data, true_labels):
  120. if success:
  121. adv_datas.append(adv)
  122. gts.append(gt)
  123. if gts:
  124. adv_datas = np.concatenate(np.asarray(adv_datas), axis=0)
  125. gts = np.asarray(gts)
  126. pred_logits_adv = model.predict(adv_datas)
  127. pred_lables_adv = np.argmax(pred_logits_adv, axis=1)
  128. accuracy_adv = np.mean(np.equal(pred_lables_adv, gts))
  129. LOGGER.info(TAG, 'mis-classification rate of adversaries is : %s',
  130. accuracy_adv)
  131. @pytest.mark.level0
  132. @pytest.mark.platform_arm_ascend_training
  133. @pytest.mark.platform_x86_ascend_training
  134. @pytest.mark.env_card
  135. @pytest.mark.component_mindarmour
  136. def test_value_error():
  137. model = get_model()
  138. norm = 'l2'
  139. with pytest.raises(ValueError):
  140. assert HopSkipJumpAttack(model, constraint=norm, stepsize_search='bad-search')

MindArmour关注AI的安全和隐私问题。致力于增强模型的安全可信、保护用户的数据隐私。主要包含3个模块:对抗样本鲁棒性模块、Fuzz Testing模块、隐私保护与评估模块。 对抗样本鲁棒性模块 对抗样本鲁棒性模块用于评估模型对于对抗样本的鲁棒性,并提供模型增强方法用于增强模型抗对抗样本攻击的能力,提升模型鲁棒性。对抗样本鲁棒性模块包含了4个子模块:对抗样本的生成、对抗样本的检测、模型防御、攻防评估。