You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_optimizer.py 8.3 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. import numpy as np
  10. import megengine.autodiff as ad
  11. import megengine.functional as F
  12. from megengine import Parameter, optimizer
  13. from megengine.jit import trace
  14. from megengine.module import Linear, Module
  15. from megengine.tensor import tensor
  16. class MLP(Module):
  17. def __init__(self):
  18. super().__init__()
  19. self.dense0 = Linear(28, 50)
  20. self.dense1 = Linear(50, 20)
  21. def forward(self, x):
  22. x = self.dense0(x)
  23. x = F.relu(x)
  24. x = self.dense1(x)
  25. return x
  26. class Simple(Module):
  27. def __init__(self):
  28. super().__init__()
  29. self.a = Parameter(1.23, dtype=np.float32)
  30. def forward(self, x):
  31. x = x * self.a
  32. return x
  33. def _test_optimizer(opt_str, test_case, check_class, update_lr=False):
  34. iter_num = 3
  35. net = Simple()
  36. opt = getattr(optimizer, opt_str)(net.parameters(), **test_case)
  37. check_func = check_class(net, **test_case)
  38. gm = ad.GradManager().attach(net.parameters())
  39. step = 0
  40. data_shape = (2, 28)
  41. for i in range(iter_num):
  42. if update_lr and i == 1: # change learning rate
  43. for group in opt.param_groups:
  44. group["lr"] += 0.01
  45. check_func.lr += 0.01
  46. data = tensor(np.random.random(data_shape).astype(np.float32))
  47. opt.clear_grad()
  48. with gm:
  49. pred = net(data)
  50. loss = pred.sum()
  51. gm.backward(loss)
  52. ori_params = {}
  53. for param in net.parameters():
  54. ori_params[param] = np.copy(param.numpy())
  55. opt.step()
  56. step += 1
  57. check_func(ori_params, net.parameters(), step)
  58. # static graph
  59. for symbolic in (False, True):
  60. @trace(symbolic=symbolic)
  61. def train_func(data, *, opt=None, gm=None):
  62. opt.clear_grad()
  63. with gm:
  64. pred = net(data)
  65. loss = pred.sum()
  66. gm.backward(loss)
  67. opt.step()
  68. # reset net and opt
  69. net = Simple()
  70. opt = getattr(optimizer, opt_str)(net.parameters(), **test_case)
  71. gm = ad.GradManager().attach(net.parameters())
  72. check_func = check_class(net, **test_case)
  73. step = 0
  74. for i in range(iter_num):
  75. if update_lr and i == 1: # change learning rate
  76. for group in opt.param_groups:
  77. group["lr"] += 0.01
  78. check_func.lr += 0.01
  79. ori_params = {}
  80. for param in net.parameters():
  81. ori_params[param] = np.copy(param.numpy())
  82. train_func(np.random.random(data_shape).astype(np.float32), opt=opt, gm=gm)
  83. step += 1
  84. check_func(ori_params, net.parameters(), step)
  85. def test_sgd():
  86. class CheckValue:
  87. def __init__(self, net, **kwarg):
  88. self.slots = {}
  89. for param in net.parameters():
  90. self.slots[param] = np.zeros(param.shape).astype(np.float32)
  91. for k, v in kwarg.items():
  92. setattr(self, k, v)
  93. def __call__(self, ori_params, new_params, step):
  94. for param in new_params:
  95. grad = param.grad.numpy()
  96. if hasattr(self, "momentum"):
  97. self.slots[param] = grad + self.slots[param] * self.momentum
  98. delta = -self.lr * self.slots[param]
  99. else:
  100. delta = -self.lr * grad
  101. np.testing.assert_almost_equal(param.numpy(), ori_params[param] + delta)
  102. cases = [
  103. {"momentum": 0.9, "lr": 0.01}, # SGD with momentum
  104. {"lr": 0.01}, # simple SGD
  105. {"weight_decay": 0.1, "lr": 0.01}, # with weight_decay
  106. ]
  107. for case in cases:
  108. _test_optimizer("SGD", case, CheckValue)
  109. _test_optimizer("SGD", case, CheckValue, update_lr=True)
  110. def test_adam():
  111. class CheckValue:
  112. def __init__(self, net, **kwarg):
  113. self.m_slots = {}
  114. self.v_slots = {}
  115. for param in net.parameters():
  116. self.m_slots[param] = np.zeros(param.shape).astype(np.float32)
  117. self.v_slots[param] = np.zeros(param.shape).astype(np.float32)
  118. for k, v in kwarg.items():
  119. setattr(self, k, v)
  120. def __call__(self, ori_params, new_params, step):
  121. for param in new_params:
  122. grad = param.grad.numpy()
  123. m = self.m_slots[param]
  124. v = self.v_slots[param]
  125. m *= self.betas[0]
  126. m += (1 - self.betas[0]) * grad
  127. v *= self.betas[1]
  128. v += (1 - self.betas[1]) * grad * grad
  129. delta = (m / (1 - self.betas[0] ** step)) / (
  130. np.sqrt(v / (1 - self.betas[1] ** step)) + self.eps
  131. )
  132. np.testing.assert_almost_equal(
  133. param.numpy(), ori_params[param] - self.lr * delta
  134. )
  135. cases = [
  136. {"betas": (0.8, 0.9), "eps": 1e-04, "lr": 0.01},
  137. {
  138. "betas": (0.8, 0.9),
  139. "eps": 1e-04,
  140. "lr": 0.01,
  141. "weight_decay": 0.1,
  142. }, # with weight_decay
  143. ]
  144. for case in cases:
  145. _test_optimizer("Adam", case, CheckValue)
  146. _test_optimizer("Adam", case, CheckValue, update_lr=True)
  147. def test_adagrad():
  148. class CheckValue:
  149. def __init__(self, net, **kwarg):
  150. self.s_slots = {}
  151. for param in net.parameters():
  152. self.s_slots[param] = np.zeros(param.shape).astype(np.float32)
  153. for k, v in kwarg.items():
  154. setattr(self, k, v)
  155. def __call__(self, ori_params, new_params, step):
  156. for param in new_params:
  157. grad = param.grad.numpy()
  158. self.s_slots[param] += grad ** 2
  159. delta = grad / (self.s_slots[param] + self.eps) ** 0.5
  160. delta *= -(self.lr / (1 + (step - 1) * self.lr_decay))
  161. np.testing.assert_almost_equal(param.numpy(), ori_params[param] + delta)
  162. cases = [
  163. {"lr": 0.01, "eps": 1e-06, "lr_decay": 0.01},
  164. {"lr": 0.01, "eps": 1e-06, "lr_decay": 0.0}, # without lr_decay
  165. {
  166. "lr": 0.01,
  167. "eps": 1e-06,
  168. "lr_decay": 0.01,
  169. "weight_decay": 0.1,
  170. }, # with weight_decay
  171. ]
  172. for case in cases:
  173. _test_optimizer("Adagrad", case, CheckValue)
  174. _test_optimizer("Adagrad", case, CheckValue, update_lr=True)
  175. def test_adadelta():
  176. class CheckValue:
  177. def __init__(self, net, **kwarg):
  178. self.s_slots = {}
  179. self.a_slots = {}
  180. for param in net.parameters():
  181. self.s_slots[param] = np.zeros(param.shape).astype(np.float32)
  182. self.a_slots[param] = np.zeros(param.shape).astype(np.float32)
  183. for k, v in kwarg.items():
  184. setattr(self, k, v)
  185. def __call__(self, ori_params, new_params, step):
  186. for param in new_params:
  187. grad = param.grad.numpy()
  188. self.s_slots[param] = self.s_slots[param] * self.rho + grad ** 2 * (
  189. 1 - self.rho
  190. )
  191. delta = (
  192. grad
  193. * ((self.a_slots[param] + self.eps) ** 0.5)
  194. / (self.s_slots[param] + self.eps) ** 0.5
  195. )
  196. self.a_slots[param] = self.a_slots[param] * self.rho + delta ** 2 * (
  197. 1 - self.rho
  198. )
  199. delta *= -self.lr
  200. np.testing.assert_almost_equal(param.numpy(), ori_params[param] + delta)
  201. cases = [
  202. {"lr": 1.0, "eps": 1e-06, "rho": 0.9},
  203. {"lr": 1.0, "eps": 1e-06, "rho": 0.9, "weight_decay": 0.9}, # with weight_decay
  204. ]
  205. for case in cases:
  206. _test_optimizer("Adadelta", case, CheckValue)
  207. _test_optimizer("Adadelta", case, CheckValue, update_lr=True)

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台