You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

loss_design.py 6.2 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. # Copyright 2022 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """optimization Settings"""
  16. import mindspore
  17. from mindspore import ops, nn, Tensor
  18. from mindspore.dataset.vision.py_transforms import ToTensor
  19. import mindspore.dataset.vision.py_transforms as P
  20. class MyTrainOneStepCell(nn.TrainOneStepCell):
  21. """
  22. Encapsulation class of network training.
  23. Append an optimizer to the training network after that the construct
  24. function can be called to create the backward graph.
  25. Args:
  26. network (Cell): The training network. Note that loss function should have been added.
  27. optimizer (Optimizer): Optimizer for updating the weights.
  28. sens (Number): The adjust parameter. Default: 1.0.
  29. """
  30. def __init__(self, network, optimizer, sens=1.0):
  31. super(MyTrainOneStepCell, self).__init__(network, optimizer, sens)
  32. self.grad = ops.composite.GradOperation(get_all=True, sens_param=False)
  33. def construct(self, *inputs):
  34. """Defines the computation performed."""
  35. loss = self.network(*inputs)
  36. grads = self.grad(self.network)(*inputs)
  37. self.optimizer(grads)
  38. return loss
  39. class MyWithLossCellTargetAttack(nn.Cell):
  40. """The loss function defined by the target attack"""
  41. def __init__(self, net, loss_fn, input_tensor):
  42. super(MyWithLossCellTargetAttack, self).__init__(auto_prefix=False)
  43. self.net = net
  44. self._loss_fn = loss_fn
  45. self.std = Tensor([0.229, 0.224, 0.225])
  46. self.mean = Tensor([0.485, 0.456, 0.406])
  47. self.expand_dims = mindspore.ops.ExpandDims()
  48. self.normalize = P.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
  49. self.tensorize = ToTensor()
  50. self.input_tensor = input_tensor
  51. self.input_emb = self.net(self.expand_dims(self.input_tensor, 0))
  52. @property
  53. def backbone_network(self):
  54. return self.net
  55. def construct(self, mask_tensor):
  56. ref = mask_tensor
  57. adversarial_tensor = mindspore.numpy.where(
  58. (ref == 0),
  59. self.input_tensor,
  60. (mask_tensor - self.mean[:, None, None]) / self.std[:, None, None])
  61. adversarial_emb = self.net(self.expand_dims(adversarial_tensor, 0))
  62. loss = self._loss_fn(adversarial_emb)
  63. return loss
  64. class MyWithLossCellNonTargetAttack(nn.Cell):
  65. """The loss function defined by the non target attack"""
  66. def __init__(self, net, loss_fn, input_tensor):
  67. super(MyWithLossCellNonTargetAttack, self).__init__(auto_prefix=False)
  68. self.net = net
  69. self._loss_fn = loss_fn
  70. self.std = Tensor([0.229, 0.224, 0.225])
  71. self.mean = Tensor([0.485, 0.456, 0.406])
  72. self.expand_dims = mindspore.ops.ExpandDims()
  73. self.normalize = P.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
  74. self.tensorize = ToTensor()
  75. self.input_tensor = input_tensor
  76. self.input_emb = self.net(self.expand_dims(self.input_tensor, 0))
  77. @property
  78. def backbone_network(self):
  79. return self.net
  80. def construct(self, mask_tensor):
  81. ref = mask_tensor
  82. adversarial_tensor = mindspore.numpy.where(
  83. (ref == 0),
  84. self.input_tensor,
  85. (mask_tensor - self.mean[:, None, None]) / self.std[:, None, None])
  86. adversarial_emb = self.net(self.expand_dims(adversarial_tensor, 0))
  87. loss = self._loss_fn(adversarial_emb, self.input_emb)
  88. return loss
  89. class FaceLossTargetAttack(nn.Cell):
  90. """The loss function of the target attack"""
  91. def __init__(self, target_emb):
  92. super(FaceLossTargetAttack, self).__init__()
  93. self.uniformreal = ops.UniformReal(seed=2)
  94. self.sum = ops.ReduceSum(keep_dims=False)
  95. self.norm = nn.Norm(keep_dims=True)
  96. self.zeroslike = ops.ZerosLike()
  97. self.concat_op1 = ops.Concat(1)
  98. self.concat_op2 = ops.Concat(2)
  99. self.pow = ops.Pow()
  100. self.reduce_sum = ops.operations.ReduceSum()
  101. self.target_emb = target_emb
  102. self.abs = ops.Abs()
  103. self.reduce_mean = ops.ReduceMean()
  104. def construct(self, adversarial_emb):
  105. prod_sum = self.reduce_sum(adversarial_emb * self.target_emb, (1,))
  106. square1 = self.reduce_sum(ops.functional.square(adversarial_emb), (1,))
  107. square2 = self.reduce_sum(ops.functional.square(self.target_emb), (1,))
  108. denom = ops.functional.sqrt(square1) * ops.functional.sqrt(square2)
  109. loss = -(prod_sum / denom)
  110. return loss
  111. class FaceLossNoTargetAttack(nn.Cell):
  112. """The loss function of the non-target attack"""
  113. def __init__(self):
  114. """Initialization"""
  115. super(FaceLossNoTargetAttack, self).__init__()
  116. self.uniformreal = ops.UniformReal(seed=2)
  117. self.sum = ops.ReduceSum(keep_dims=False)
  118. self.norm = nn.Norm(keep_dims=True)
  119. self.zeroslike = ops.ZerosLike()
  120. self.concat_op1 = ops.Concat(1)
  121. self.concat_op2 = ops.Concat(2)
  122. self.pow = ops.Pow()
  123. self.reduce_sum = ops.operations.ReduceSum()
  124. self.abs = ops.Abs()
  125. self.reduce_mean = ops.ReduceMean()
  126. def construct(self, adversarial_emb, input_emb):
  127. prod_sum = self.reduce_sum(adversarial_emb * input_emb, (1,))
  128. square1 = self.reduce_sum(ops.functional.square(adversarial_emb), (1,))
  129. square2 = self.reduce_sum(ops.functional.square(input_emb), (1,))
  130. denom = ops.functional.sqrt(square1) * ops.functional.sqrt(square2)
  131. loss = prod_sum / denom
  132. return loss

MindArmour关注AI的安全和隐私问题。致力于增强模型的安全可信、保护用户的数据隐私。主要包含3个模块:对抗样本鲁棒性模块、Fuzz Testing模块、隐私保护与评估模块。 对抗样本鲁棒性模块 对抗样本鲁棒性模块用于评估模型对于对抗样本的鲁棒性,并提供模型增强方法用于增强模型抗对抗样本攻击的能力,提升模型鲁棒性。对抗样本鲁棒性模块包含了4个子模块:对抗样本的生成、对抗样本的检测、模型防御、攻防评估。