You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_attack_eval.py 3.2 kB

5 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. # Copyright 2019 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """
  15. Attack evaluation test.
  16. """
  17. import numpy as np
  18. import pytest
  19. from mindarmour.evaluations.attack_evaluation import AttackEvaluate
  20. @pytest.mark.level0
  21. @pytest.mark.platform_arm_ascend_training
  22. @pytest.mark.platform_x86_ascend_training
  23. @pytest.mark.env_card
  24. @pytest.mark.component_mindarmour
  25. def test_attack_eval():
  26. # prepare test data
  27. np.random.seed(1024)
  28. inputs = np.random.normal(size=(3, 512, 512, 3))
  29. labels = np.array([[0.1, 0.1, 0.2, 0.6],
  30. [0.1, 0.7, 0.0, 0.2],
  31. [0.8, 0.1, 0.0, 0.1]])
  32. adv_x = inputs + np.ones((3, 512, 512, 3))*0.001
  33. adv_y = np.array([[0.1, 0.1, 0.2, 0.6],
  34. [0.1, 0.0, 0.8, 0.1],
  35. [0.0, 0.9, 0.1, 0.0]])
  36. # create obj
  37. attack_eval = AttackEvaluate(inputs, labels, adv_x, adv_y)
  38. # run eval
  39. mr = attack_eval.mis_classification_rate()
  40. acac = attack_eval.avg_conf_adv_class()
  41. l_0, l_2, l_inf = attack_eval.avg_lp_distance()
  42. ass = attack_eval.avg_ssim()
  43. nte = attack_eval.nte()
  44. res = [mr, acac, l_0, l_2, l_inf, ass, nte]
  45. # compare
  46. expected_value = [0.6666, 0.8500, 1.0, 0.0009, 0.0001, 0.9999, 0.75]
  47. assert np.allclose(res, expected_value, 0.0001, 0.0001)
  48. @pytest.mark.level0
  49. @pytest.mark.platform_arm_ascend_training
  50. @pytest.mark.platform_x86_ascend_training
  51. @pytest.mark.env_card
  52. @pytest.mark.component_mindarmour
  53. def test_value_error():
  54. # prepare test data
  55. np.random.seed(1024)
  56. inputs = np.random.normal(size=(3, 512, 512, 3))
  57. labels = np.array([[0.1, 0.1, 0.2, 0.6],
  58. [0.1, 0.7, 0.0, 0.2],
  59. [0.8, 0.1, 0.0, 0.1]])
  60. adv_x = inputs + np.ones((3, 512, 512, 3))*0.001
  61. adv_y = np.array([[0.1, 0.1, 0.2, 0.6],
  62. [0.1, 0.0, 0.8, 0.1],
  63. [0.0, 0.9, 0.1, 0.0]])
  64. # create obj
  65. with pytest.raises(ValueError) as e:
  66. assert AttackEvaluate(inputs, labels, adv_x, adv_y, targeted=True)
  67. assert str(e.value) == 'targeted attack need target_label, but got None.'
  68. @pytest.mark.level0
  69. @pytest.mark.platform_arm_ascend_training
  70. @pytest.mark.platform_x86_ascend_training
  71. @pytest.mark.env_card
  72. @pytest.mark.component_mindarmour
  73. def test_value_error():
  74. # prepare test data
  75. np.random.seed(1024)
  76. inputs = np.array([])
  77. labels = np.array([])
  78. adv_x = inputs
  79. adv_y = np.array([])
  80. # create obj
  81. with pytest.raises(ValueError) as e:
  82. assert AttackEvaluate(inputs, labels, adv_x, adv_y)
  83. assert str(e.value) == 'inputs must not be empty'

MindArmour关注AI的安全和隐私问题。致力于增强模型的安全可信、保护用户的数据隐私。主要包含3个模块:对抗样本鲁棒性模块、Fuzz Testing模块、隐私保护与评估模块。 对抗样本鲁棒性模块 对抗样本鲁棒性模块用于评估模型对于对抗样本的鲁棒性,并提供模型增强方法用于增强模型抗对抗样本攻击的能力,提升模型鲁棒性。对抗样本鲁棒性模块包含了4个子模块:对抗样本的生成、对抗样本的检测、模型防御、攻防评估。