You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

adversarial_attack.py 10 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. # Copyright 2022 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Train set"""
  16. import os
  17. import re
  18. import numpy as np
  19. import face_recognition as fr
  20. import face_recognition_models as frm
  21. import dlib
  22. from PIL import Image, ImageDraw
  23. import mindspore
  24. import mindspore.dataset.vision.py_transforms as P
  25. from mindspore.dataset.vision.py_transforms import ToPIL as ToPILImage
  26. from mindspore.dataset.vision.py_transforms import ToTensor
  27. from mindspore import Parameter, ops, nn, Tensor
  28. from loss_design import MyTrainOneStepCell, MyWithLossCellTargetAttack, \
  29. MyWithLossCellNonTargetAttack, FaceLossTargetAttack, FaceLossNoTargetAttack
  30. class FaceAdversarialAttack():
  31. """
  32. Class used to create adversarial facial recognition attacks.
  33. Args:
  34. input_img (numpy.ndarray): The input image.
  35. target_img (numpy.ndarray): The target image.
  36. seed (int): optional Sets custom seed for reproducibility. Default is generated randomly.
  37. net (mindspore.Model): face recognition model.
  38. """
  39. def __init__(self, input_img, target_img, net, seed=None):
  40. if seed is not None:
  41. np.random.seed(seed)
  42. self.mean = Tensor([0.485, 0.456, 0.406])
  43. self.std = Tensor([0.229, 0.224, 0.225])
  44. self.expand_dims = mindspore.ops.ExpandDims()
  45. self.imageize = ToPILImage()
  46. self.tensorize = ToTensor()
  47. self.normalize = P.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
  48. self.resnet = net
  49. self.input_tensor = Tensor(self.normalize(self.tensorize(input_img)))
  50. self.target_tensor = Tensor(self.normalize(self.tensorize(target_img)))
  51. self.input_emb = self.resnet(self.expand_dims(self.input_tensor, 0))
  52. self.target_emb = self.resnet(self.expand_dims(self.target_tensor, 0))
  53. self.adversarial_emb = None
  54. self.mask_tensor = create_mask(input_img)
  55. self.ref = self.mask_tensor
  56. self.pm = Parameter(self.mask_tensor)
  57. self.opt = nn.Adam([self.pm], learning_rate=0.01, weight_decay=0.0001)
  58. def train(self, attack_method):
  59. """
  60. Optimized adversarial image.
  61. Args:
  62. attack_method (String) : Including target attack and non_target attack.
  63. Returns:
  64. Tensor, adversarial image.
  65. Tensor, mask image.
  66. """
  67. if attack_method == "non_target_attack":
  68. loss = FaceLossNoTargetAttack()
  69. net_with_criterion = MyWithLossCellNonTargetAttack(self.resnet, loss, self.input_tensor)
  70. if attack_method == "target_attack":
  71. loss = FaceLossTargetAttack(self.target_emb)
  72. net_with_criterion = MyWithLossCellTargetAttack(self.resnet, loss, self.input_tensor)
  73. train_net = MyTrainOneStepCell(net_with_criterion, self.opt)
  74. for i in range(2000):
  75. self.mask_tensor = Tensor(self.pm)
  76. loss = train_net(self.mask_tensor)
  77. print("epoch %d ,loss: %f \n " % (i, loss.asnumpy().item()))
  78. self.mask_tensor = ops.clip_by_value(
  79. self.mask_tensor, Tensor(0, mindspore.float32), Tensor(1, mindspore.float32))
  80. adversarial_tensor = apply(
  81. self.input_tensor,
  82. (self.mask_tensor - self.mean[:, None, None]) / self.std[:, None, None],
  83. self.ref)
  84. adversarial_tensor = self._reverse_norm(adversarial_tensor)
  85. processed_input_tensor = self._reverse_norm(self.input_tensor)
  86. processed_target_tensor = self._reverse_norm(self.target_tensor)
  87. return {
  88. "adversarial_tensor": adversarial_tensor,
  89. "mask_tensor": self.mask_tensor,
  90. "processed_input_tensor": processed_input_tensor,
  91. "processed_target_tensor": processed_target_tensor
  92. }
  93. def test_target_attack(self):
  94. """
  95. The model is used to test the recognition ability of adversarial images under target attack.
  96. """
  97. adversarial_tensor = apply(
  98. self.input_tensor,
  99. (self.mask_tensor - self.mean[:, None, None]) / self.std[:, None, None],
  100. self.ref)
  101. self.adversarial_emb = self.resnet(self.expand_dims(adversarial_tensor, 0))
  102. self.input_emb = self.resnet(self.expand_dims(self.input_tensor, 0))
  103. self.target_emb = self.resnet(self.expand_dims(self.target_tensor, 0))
  104. adversarial_index = np.argmax(self.adversarial_emb.asnumpy())
  105. target_index = np.argmax(self.target_emb.asnumpy())
  106. input_index = np.argmax(self.input_emb.asnumpy())
  107. print("input_label:", input_index)
  108. print("target_label:", target_index)
  109. print("The confidence of the input image on the input label:", self.input_emb.asnumpy()[0][input_index])
  110. print("The confidence of the input image on the target label:", self.input_emb.asnumpy()[0][target_index])
  111. print("================================")
  112. print("adversarial_label:", adversarial_index)
  113. print("The confidence of the adversarial sample on the correct label:",
  114. self.adversarial_emb.asnumpy()[0][input_index])
  115. print("The confidence of the adversarial sample on the target label:",
  116. self.adversarial_emb.asnumpy()[0][target_index])
  117. print("input_label: %d, target_label: %d, adversarial_label: %d"
  118. % (input_index, target_index, adversarial_index))
  119. def test_non_target_attack(self):
  120. """
  121. The model is used to test the recognition ability of adversarial images under non_target attack.
  122. """
  123. adversarial_tensor = apply(
  124. self.input_tensor,
  125. (self.mask_tensor - self.mean[:, None, None]) / self.std[:, None, None],
  126. self.ref)
  127. self.adversarial_emb = self.resnet(self.expand_dims(adversarial_tensor, 0))
  128. self.input_emb = self.resnet(self.expand_dims(self.input_tensor, 0))
  129. adversarial_index = np.argmax(self.adversarial_emb.asnumpy())
  130. input_index = np.argmax(self.input_emb.asnumpy())
  131. print("input_label:", input_index)
  132. print("The confidence of the input image on the input label:", self.input_emb.asnumpy()[0][input_index])
  133. print("================================")
  134. print("adversarial_label:", adversarial_index)
  135. print("The confidence of the adversarial sample on the correct label:",
  136. self.adversarial_emb.asnumpy()[0][input_index])
  137. print("The confidence of the adversarial sample on the adversarial label:",
  138. self.adversarial_emb.asnumpy()[0][adversarial_index])
  139. print(
  140. "input_label: %d, adversarial_label: %d" % (input_index, adversarial_index))
  141. def _reverse_norm(self, image_tensor):
  142. """
  143. Reverses normalization for a given image_tensor.
  144. Args:
  145. image_tensor (Tensor): Tensor.
  146. Returns:
  147. Tensor, image.
  148. """
  149. tensor = image_tensor * self.std[:, None, None] + self.mean[:, None, None]
  150. return tensor
  151. def apply(image_tensor, mask_tensor, reference_tensor):
  152. """
  153. Apply a mask over an image.
  154. Args:
  155. image_tensor (Tensor): Canvas to be used to apply mask on.
  156. mask_tensor (Tensor): Mask to apply over the image.
  157. reference_tensor (Tensor): Used to reference mask boundaries
  158. Returns:
  159. Tensor, image.
  160. """
  161. tensor = mindspore.numpy.where((reference_tensor == 0), image_tensor, mask_tensor)
  162. return tensor
  163. def create_mask(face_image):
  164. """
  165. Create mask image.
  166. Args:
  167. face_image (PIL.Image): image of a detected face.
  168. Returns:
  169. mask_tensor : a mask image.
  170. """
  171. mask = Image.new('RGB', face_image.size, color=(0, 0, 0))
  172. d = ImageDraw.Draw(mask)
  173. landmarks = fr.face_landmarks(np.array(face_image))
  174. area = [landmark
  175. for landmark in landmarks[0]['chin']
  176. if landmark[1] > max(landmarks[0]['nose_tip'])[1]]
  177. area.append(landmarks[0]['nose_bridge'][1])
  178. d.polygon(area, fill=(255, 255, 255))
  179. mask = np.array(mask)
  180. mask = mask.astype(np.float32)
  181. for i in range(mask.shape[0]):
  182. for j in range(mask.shape[1]):
  183. for k in range(mask.shape[2]):
  184. if mask[i][j][k] == 255.:
  185. mask[i][j][k] = 0.5
  186. else:
  187. mask[i][j][k] = 0
  188. mask_tensor = Tensor(mask)
  189. mask_tensor = mask_tensor.swapaxes(0, 2).swapaxes(1, 2)
  190. mask_tensor.requires_grad = True
  191. return mask_tensor
  192. def detect_face(image):
  193. """
  194. Face detection and alignment process using dlib library.
  195. Args:
  196. image (numpy.ndarray): image file location.
  197. Returns:
  198. face_image : Resized face image.
  199. """
  200. dlib_detector = dlib.get_frontal_face_detector()
  201. dlib_shape_predictor = dlib.shape_predictor(frm.pose_predictor_model_location())
  202. dlib_image = dlib.load_rgb_image(image)
  203. detections = dlib_detector(dlib_image, 1)
  204. dlib_faces = dlib.full_object_detections()
  205. for det in detections:
  206. dlib_faces.append(dlib_shape_predictor(dlib_image, det))
  207. face_image = Image.fromarray(dlib.get_face_chip(dlib_image, dlib_faces[0], size=112))
  208. return face_image
  209. def load_data(data):
  210. """
  211. An auxiliary function that loads image data.
  212. Args:
  213. data (String): The path to the given data.
  214. Returns:
  215. list : Resize list of face images.
  216. """
  217. image_files = [f for f in os.listdir(data) if re.search(r'.*\.(jpe?g|png)', f)]
  218. image_files_locs = [os.path.join(data, f) for f in image_files]
  219. image_list = []
  220. for img in image_files_locs:
  221. image_list.append(detect_face(img))
  222. return image_list

MindArmour关注AI的安全和隐私问题。致力于增强模型的安全可信、保护用户的数据隐私。主要包含3个模块:对抗样本鲁棒性模块、Fuzz Testing模块、隐私保护与评估模块。 对抗样本鲁棒性模块 对抗样本鲁棒性模块用于评估模型对于对抗样本的鲁棒性,并提供模型增强方法用于增强模型抗对抗样本攻击的能力,提升模型鲁棒性。对抗样本鲁棒性模块包含了4个子模块:对抗样本的生成、对抗样本的检测、模型防御、攻防评估。