You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

resnet_cifar10.py 9.8 kB

5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import numpy as np
  15. import math
  16. from mindspore import nn
  17. from mindspore.ops import operations as P
  18. from mindspore.common.tensor import Tensor
  19. from mindspore import context
  20. def variance_scaling_raw(shape):
  21. value = np.random.normal(size=shape).astype(np.float32)
  22. return Tensor(value)
  23. def weight_variable(shape):
  24. value = np.random.normal(size=shape).astype(np.float32)
  25. return Tensor(value)
  26. def sweight_variable(shape):
  27. value = np.random.uniform(size=shape).astype(np.float32)
  28. return Tensor(value)
  29. def weight_variable_0(shape):
  30. zeros = np.zeros(shape).astype(np.float32)
  31. return Tensor(zeros)
  32. def weight_variable_1(shape):
  33. ones = np.ones(shape).astype(np.float32)
  34. return Tensor(ones)
  35. def conv3x3(in_channels, out_channels, stride=1, padding=0):
  36. """3x3 convolution """
  37. weight_shape = (out_channels, in_channels, 3, 3)
  38. weight = variance_scaling_raw(weight_shape)
  39. return nn.Conv2d(in_channels, out_channels,
  40. kernel_size=3, stride=stride, padding=padding, weight_init=weight, has_bias=False, pad_mode="same")
  41. def conv1x1(in_channels, out_channels, stride=1, padding=0):
  42. """1x1 convolution"""
  43. weight_shape = (out_channels, in_channels, 1, 1)
  44. weight = variance_scaling_raw(weight_shape)
  45. return nn.Conv2d(in_channels, out_channels,
  46. kernel_size=1, stride=stride, padding=padding, weight_init=weight, has_bias=False, pad_mode="same")
  47. def conv7x7(in_channels, out_channels, stride=1, padding=0):
  48. """1x1 convolution"""
  49. weight_shape = (out_channels, in_channels, 7, 7)
  50. weight = variance_scaling_raw(weight_shape)
  51. return nn.Conv2d(in_channels, out_channels,
  52. kernel_size=7, stride=stride, padding=padding, weight_init=weight, has_bias=False, pad_mode="same")
  53. def bn_with_initialize(out_channels):
  54. shape = (out_channels)
  55. mean = weight_variable_0(shape)
  56. var = weight_variable_1(shape)
  57. beta = weight_variable_0(shape)
  58. gamma = sweight_variable(shape)
  59. bn = nn.BatchNorm2d(out_channels, momentum=0.99, eps=0.00001, gamma_init=gamma,
  60. beta_init=beta, moving_mean_init=mean, moving_var_init=var)
  61. return bn
  62. def bn_with_initialize_last(out_channels):
  63. shape = (out_channels)
  64. mean = weight_variable_0(shape)
  65. var = weight_variable_1(shape)
  66. beta = weight_variable_0(shape)
  67. gamma = sweight_variable(shape)
  68. bn = nn.BatchNorm2d(out_channels, momentum=0.99, eps=0.00001, gamma_init=gamma,
  69. beta_init=beta, moving_mean_init=mean, moving_var_init=var)
  70. return bn
  71. def fc_with_initialize(input_channels, out_channels):
  72. weight_shape = (out_channels, input_channels)
  73. weight = np.random.normal(size=weight_shape).astype(np.float32)
  74. weight = Tensor(weight)
  75. bias_shape = (out_channels)
  76. bias_value = np.random.uniform(size=bias_shape).astype(np.float32)
  77. bias = Tensor(bias_value)
  78. return nn.Dense(input_channels, out_channels, weight, bias)
  79. class ResidualBlock(nn.Cell):
  80. expansion = 4
  81. def __init__(self,
  82. in_channels,
  83. out_channels,
  84. stride=1,
  85. down_sample=False):
  86. super(ResidualBlock, self).__init__()
  87. out_chls = out_channels // self.expansion
  88. self.conv1 = conv1x1(in_channels, out_chls, stride=stride, padding=0)
  89. self.bn1 = bn_with_initialize(out_chls)
  90. self.conv2 = conv3x3(out_chls, out_chls, stride=1, padding=0)
  91. self.bn2 = bn_with_initialize(out_chls)
  92. self.conv3 = conv1x1(out_chls, out_channels, stride=1, padding=0)
  93. self.bn3 = bn_with_initialize_last(out_channels)
  94. self.relu = P.ReLU()
  95. self.add = P.TensorAdd()
  96. def construct(self, x):
  97. identity = x
  98. out = self.conv1(x)
  99. out = self.bn1(out)
  100. out = self.relu(out)
  101. out = self.conv2(out)
  102. out = self.bn2(out)
  103. out = self.relu(out)
  104. out = self.conv3(out)
  105. out = self.bn3(out)
  106. out = self.add(out, identity)
  107. out = self.relu(out)
  108. return out
  109. class ResidualBlockWithDown(nn.Cell):
  110. expansion = 4
  111. def __init__(self,
  112. in_channels,
  113. out_channels,
  114. stride=1,
  115. down_sample=False):
  116. super(ResidualBlockWithDown, self).__init__()
  117. out_chls = out_channels // self.expansion
  118. self.conv1 = conv1x1(in_channels, out_chls, stride=stride, padding=0)
  119. self.bn1 = bn_with_initialize(out_chls)
  120. self.conv2 = conv3x3(out_chls, out_chls, stride=1, padding=0)
  121. self.bn2 = bn_with_initialize(out_chls)
  122. self.conv3 = conv1x1(out_chls, out_channels, stride=1, padding=0)
  123. self.bn3 = bn_with_initialize_last(out_channels)
  124. self.relu = P.ReLU()
  125. self.downSample = down_sample
  126. self.conv_down_sample = conv1x1(in_channels, out_channels, stride=stride, padding=0)
  127. self.bn_down_sample = bn_with_initialize(out_channels)
  128. self.add = P.TensorAdd()
  129. def construct(self, x):
  130. identity = x
  131. out = self.conv1(x)
  132. out = self.bn1(out)
  133. out = self.relu(out)
  134. out = self.conv2(out)
  135. out = self.bn2(out)
  136. out = self.relu(out)
  137. out = self.conv3(out)
  138. out = self.bn3(out)
  139. identity = self.conv_down_sample(identity)
  140. identity = self.bn_down_sample(identity)
  141. out = self.add(out, identity)
  142. out = self.relu(out)
  143. return out
  144. class MakeLayer0(nn.Cell):
  145. def __init__(self, block, layer_num, in_channels, out_channels, stride):
  146. super(MakeLayer0, self).__init__()
  147. self.a = ResidualBlockWithDown(in_channels, out_channels, stride=1, down_sample=True)
  148. self.b = block(out_channels, out_channels, stride=stride)
  149. self.c = block(out_channels, out_channels, stride=1)
  150. def construct(self, x):
  151. x = self.a(x)
  152. x = self.b(x)
  153. x = self.c(x)
  154. return x
  155. class MakeLayer1(nn.Cell):
  156. def __init__(self, block, layer_num, in_channels, out_channels, stride):
  157. super(MakeLayer1, self).__init__()
  158. self.a = ResidualBlockWithDown(in_channels, out_channels, stride=stride, down_sample=True)
  159. self.b = block(out_channels, out_channels, stride=1)
  160. self.c = block(out_channels, out_channels, stride=1)
  161. self.d = block(out_channels, out_channels, stride=1)
  162. def construct(self, x):
  163. x = self.a(x)
  164. x = self.b(x)
  165. x = self.c(x)
  166. x = self.d(x)
  167. return x
  168. class MakeLayer2(nn.Cell):
  169. def __init__(self, block, layer_num, in_channels, out_channels, stride):
  170. super(MakeLayer2, self).__init__()
  171. self.a = ResidualBlockWithDown(in_channels, out_channels, stride=stride, down_sample=True)
  172. self.b = block(out_channels, out_channels, stride=1)
  173. self.c = block(out_channels, out_channels, stride=1)
  174. self.d = block(out_channels, out_channels, stride=1)
  175. self.e = block(out_channels, out_channels, stride=1)
  176. self.f = block(out_channels, out_channels, stride=1)
  177. def construct(self, x):
  178. x = self.a(x)
  179. x = self.b(x)
  180. x = self.c(x)
  181. x = self.d(x)
  182. x = self.e(x)
  183. x = self.f(x)
  184. return x
  185. class MakeLayer3(nn.Cell):
  186. def __init__(self, block, layer_num, in_channels, out_channels, stride):
  187. super(MakeLayer3, self).__init__()
  188. self.a = ResidualBlockWithDown(in_channels, out_channels, stride=stride, down_sample=True)
  189. self.b = block(out_channels, out_channels, stride=1)
  190. self.c = block(out_channels, out_channels, stride=1)
  191. def construct(self, x):
  192. x = self.a(x)
  193. x = self.b(x)
  194. x = self.c(x)
  195. return x
  196. class ResNet(nn.Cell):
  197. def __init__(self, block, layer_num, num_classes=100):
  198. super(ResNet, self).__init__()
  199. self.num_classes = num_classes
  200. self.conv1 = conv7x7(3, 64, stride=2, padding=0)
  201. self.bn1 = bn_with_initialize(64)
  202. self.relu = P.ReLU()
  203. self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")
  204. self.layer1 = MakeLayer0(block, layer_num[0], in_channels=64, out_channels=256, stride=1)
  205. self.layer2 = MakeLayer1(block, layer_num[1], in_channels=256, out_channels=512, stride=2)
  206. self.layer3 = MakeLayer2(block, layer_num[2], in_channels=512, out_channels=1024, stride=2)
  207. self.layer4 = MakeLayer3(block, layer_num[3], in_channels=1024, out_channels=2048, stride=2)
  208. self.pool = P.ReduceMean(keep_dims=True)
  209. self.squeeze = P.Squeeze(axis=(2, 3))
  210. self.fc = fc_with_initialize(512*block.expansion, num_classes)
  211. def construct(self, x):
  212. x = self.conv1(x)
  213. x = self.bn1(x)
  214. x = self.relu(x)
  215. x = self.maxpool(x)
  216. x = self.layer1(x)
  217. x = self.layer2(x)
  218. x = self.layer3(x)
  219. x = self.layer4(x)
  220. x = self.pool(x, (2, 3))
  221. x = self.squeeze(x)
  222. x = self.fc(x)
  223. return x
  224. def resnet50_cifar10(num_classes):
  225. return ResNet(ResidualBlock, [3, 4, 6, 3], num_classes)

MindArmour关注AI的安全和隐私问题。致力于增强模型的安全可信、保护用户的数据隐私。主要包含3个模块:对抗样本鲁棒性模块、Fuzz Testing模块、隐私保护与评估模块。 对抗样本鲁棒性模块 对抗样本鲁棒性模块用于评估模型对于对抗样本的鲁棒性,并提供模型增强方法用于增强模型抗对抗样本攻击的能力,提升模型鲁棒性。对抗样本鲁棒性模块包含了4个子模块:对抗样本的生成、对抗样本的检测、模型防御、攻防评估。