You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

opr_impl.cpp 10 kB

feat(bazel/windows/xp/sp2/inference): implement inference on windows xp (os vesion >= sp2) build with bazel * bazel build support(define __DEPLOY_ON_XP_SP2__ when deploy on xp sp2): (dbg)./bazel build //brain/megbrain:load_and_run --cpu='x86_windows_xp' --compiler='clang_cl' -c dbg --copt "-D__DEPLOY_ON_XP_SP2__=1" (opt)./bazel build //brain/megbrain:load_and_run --cpu='x86_windows_xp' --compiler='clang_cl' -c opt --copt "-D__DEPLOY_ON_XP_SP2__=1" * internal behavior: will define MGB_HAVE_THREAD=0 when enable __DEPLOY_ON_XP_SP2__ * refer to https://docs.microsoft.com/en-us/cpp/build/configuring-programs-for-windows-xp?view=msvc-160 xp sp2(x86) do not support vc runtime fully, casused by KERNEL32.dll do not implement some base apis for c++ std function, for example, std::mutex/std::thread/std::condition_variable as a workround, we will disable some MegEngine features on xp sp2 env, for exampe, multi-thread etc! * about DNN_MUTEX/MGB_MUTEX, if your code will build in inference code (even CPU backends), please replace std::mutex to DNN_MUTEX/MGB_MUTEX, * about multi-thread, if you code need multi-thread support, please enable it when MGB_HAVE_THREAD=1 * about test build env status 1: Visual Studio 2019(MSVC version <= 14.26.28801)---- pass 2: Visual Studio 2019(MSVC version > 14.26.28801) ---- failed caused by this 'new' version will put VCR depends on win7 KERNEL32.DLL, this may be fixed at Visual Studio 2019 later version but we do not test at this MR merge point 3: Visual Studio 2017 ---------- pass 4: Visual Studio 2014 ---------- pass GitOrigin-RevId: 65ac48b95e99f2c510fe5db449cc8182d682e113
3 years ago
feat(bazel/windows/xp/sp2/inference): implement inference on windows xp (os vesion >= sp2) build with bazel * bazel build support(define __DEPLOY_ON_XP_SP2__ when deploy on xp sp2): (dbg)./bazel build //brain/megbrain:load_and_run --cpu='x86_windows_xp' --compiler='clang_cl' -c dbg --copt "-D__DEPLOY_ON_XP_SP2__=1" (opt)./bazel build //brain/megbrain:load_and_run --cpu='x86_windows_xp' --compiler='clang_cl' -c opt --copt "-D__DEPLOY_ON_XP_SP2__=1" * internal behavior: will define MGB_HAVE_THREAD=0 when enable __DEPLOY_ON_XP_SP2__ * refer to https://docs.microsoft.com/en-us/cpp/build/configuring-programs-for-windows-xp?view=msvc-160 xp sp2(x86) do not support vc runtime fully, casused by KERNEL32.dll do not implement some base apis for c++ std function, for example, std::mutex/std::thread/std::condition_variable as a workround, we will disable some MegEngine features on xp sp2 env, for exampe, multi-thread etc! * about DNN_MUTEX/MGB_MUTEX, if your code will build in inference code (even CPU backends), please replace std::mutex to DNN_MUTEX/MGB_MUTEX, * about multi-thread, if you code need multi-thread support, please enable it when MGB_HAVE_THREAD=1 * about test build env status 1: Visual Studio 2019(MSVC version <= 14.26.28801)---- pass 2: Visual Studio 2019(MSVC version > 14.26.28801) ---- failed caused by this 'new' version will put VCR depends on win7 KERNEL32.DLL, this may be fixed at Visual Studio 2019 later version but we do not test at this MR merge point 3: Visual Studio 2017 ---------- pass 4: Visual Studio 2014 ---------- pass GitOrigin-RevId: 65ac48b95e99f2c510fe5db449cc8182d682e113
3 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305
  1. /**
  2. * \file dnn/src/common/elemwise/opr_impl.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "src/common/elemwise/kern_defs.cuh"
  12. #include "src/common/utils.h"
  13. #include "megdnn/oprs.h"
  14. #include "megdnn/tensor_format.h"
  15. #include "midout.h"
  16. MIDOUT_DECL(megdnn_common_elemwise)
  17. #include <mutex>
  18. #include <vector>
  19. using namespace megdnn;
  20. namespace {
  21. class FormatDeducer {
  22. const TensorFormat m_default;
  23. TensorFormat m_result = m_default;
  24. public:
  25. inline void feed(TensorFormat cur);
  26. bool is_default(TensorFormat f) const { return f == m_default; }
  27. TensorFormat get() const { return m_result; }
  28. };
  29. } // anonymous namespace
  30. using Mode = param::Elemwise::Mode;
  31. using ModeTrait = ElemwiseForward::ModeTrait;
  32. const ModeTrait& ModeTrait::from_mode(Mode mode) {
  33. static DNN_MUTEX mtx;
  34. static std::vector<ModeTrait> traits;
  35. MEGDNN_LOCK_GUARD(mtx);
  36. if (traits.empty()) {
  37. auto get = [&](Mode m) -> ModeTrait& {
  38. auto im = static_cast<size_t>(m);
  39. if (im >= traits.size())
  40. traits.resize(im + 1);
  41. return traits[im];
  42. };
  43. #define cb(_m) \
  44. MIDOUT_BEGIN(megdnn_common_elemwise, midout_iv(Mode::_m)) { \
  45. get(Mode::_m).allow_int = true; \
  46. } \
  47. MIDOUT_END();
  48. MEGDNN_FOREACH_ELEMWISE_MODE_UNARY_INT(cb);
  49. MEGDNN_FOREACH_ELEMWISE_MODE_BINARY_INT(cb);
  50. MEGDNN_FOREACH_ELEMWISE_MODE_TERNARY_INT(cb);
  51. #undef cb
  52. #define cb(_m) \
  53. MIDOUT_BEGIN(megdnn_common_elemwise, midout_iv(Mode::_m)) { \
  54. get(Mode::_m).allow_float = true; \
  55. } \
  56. MIDOUT_END();
  57. MEGDNN_FOREACH_ELEMWISE_MODE_UNARY_FLOAT(cb);
  58. MEGDNN_FOREACH_ELEMWISE_MODE_BINARY_FLOAT(cb);
  59. MEGDNN_FOREACH_ELEMWISE_MODE_TERNARY_FLOAT(cb);
  60. #undef cb
  61. #define cb(_m) \
  62. MIDOUT_BEGIN(megdnn_common_elemwise, midout_iv(Mode::_m)) { \
  63. get(Mode::_m).allow_bool = true; \
  64. } \
  65. MIDOUT_END();
  66. MEGDNN_FOREACH_ELEMWISE_MODE_UNARY_BOOL(cb);
  67. MEGDNN_FOREACH_ELEMWISE_MODE_BINARY_BOOL(cb);
  68. #undef cb
  69. #define cb(_m) \
  70. MIDOUT_BEGIN(megdnn_common_elemwise, midout_iv(Mode::_m)) { \
  71. auto&& t = get(Mode::_m); \
  72. t.arity = _a; \
  73. t.name = (#_m); \
  74. } \
  75. MIDOUT_END();
  76. #define _a 1
  77. MEGDNN_FOREACH_ELEMWISE_MODE_UNARY_FLOAT(cb);
  78. MEGDNN_FOREACH_ELEMWISE_MODE_UNARY_INT(cb);
  79. MEGDNN_FOREACH_ELEMWISE_MODE_UNARY_BOOL(cb);
  80. #undef _a
  81. #define _a 2
  82. MEGDNN_FOREACH_ELEMWISE_MODE_BINARY_FLOAT(cb);
  83. MEGDNN_FOREACH_ELEMWISE_MODE_BINARY_INT(cb);
  84. MEGDNN_FOREACH_ELEMWISE_MODE_BINARY_BOOL(cb);
  85. #undef _a
  86. #define _a 3
  87. MEGDNN_FOREACH_ELEMWISE_MODE_TERNARY_FLOAT(cb);
  88. MEGDNN_FOREACH_ELEMWISE_MODE_TERNARY_INT(cb);
  89. #undef _a
  90. #undef cb
  91. #define FUSE(_m, _arity) \
  92. MIDOUT_BEGIN(megdnn_common_elemwise, midout_iv(Mode::_m)) { \
  93. auto&& t = get(Mode::_m); \
  94. t.allow_int = true; \
  95. t.allow_float = true; \
  96. t.allow_bool = true; \
  97. t.arity = _arity; \
  98. t.name = (#_m); \
  99. } \
  100. MIDOUT_END();
  101. FUSE(FUSE_MUL_ADD3, 3);
  102. FUSE(FUSE_MUL_ADD4, 4);
  103. #undef FUSE
  104. #define COMM_CB(_m) \
  105. MIDOUT_BEGIN(megdnn_common_elemwise, midout_iv(Mode::_m)) { \
  106. traits.at(static_cast<int>(Mode::_m)).commutable = true; \
  107. } \
  108. MIDOUT_END()
  109. #define COMM(_m) MEGDNN_ELEMWISE_MODE_ENABLE(_m, COMM_CB)
  110. COMM(ADD);
  111. COMM(FUSE_ADD_RELU);
  112. COMM(FUSE_ADD_SIGMOID);
  113. COMM(FUSE_ADD_TANH);
  114. COMM(MUL);
  115. COMM(RMULH);
  116. COMM(MAX);
  117. COMM(MIN);
  118. COMM(EQ);
  119. COMM(LOG_SUM_EXP);
  120. #undef COMM
  121. #undef COMM_CB
  122. #if MEGDNN_ELEMWISE_MODE_ENABLE_ALL
  123. for (auto&& i : traits) {
  124. megdnn_assert(i.arity && (i.allow_int || i.allow_float || i.allow_bool) &&
  125. (!i.commutable || i.arity == 2));
  126. }
  127. #else
  128. #pragma message "elemwise mode stripped"
  129. #endif
  130. }
  131. auto&& ret = traits.at(static_cast<int>(mode));
  132. #if !MEGDNN_ELEMWISE_MODE_ENABLE_ALL
  133. megdnn_assert(ret.arity);
  134. #endif
  135. return ret;
  136. }
  137. void ElemwiseForward::deduce_shape(const TensorShapeArray& src,
  138. TensorShape& dst) {
  139. auto err = [&]() {
  140. std::string msg("bad input shape for polyadic operator: ");
  141. bool first = true;
  142. for (auto&& i : src) {
  143. if (first)
  144. first = false;
  145. else
  146. msg.append(", ");
  147. msg.append(i.to_string());
  148. }
  149. megdnn_throw(msg);
  150. };
  151. dst.ndim = 0;
  152. for (auto&& cur : src) {
  153. if (!cur.ndim)
  154. err();
  155. if (!dst.ndim || dst.is_scalar())
  156. dst = cur;
  157. else if (!cur.is_scalar()) {
  158. int max_ndim = std::max(cur.ndim, dst.ndim);
  159. for (int i = 0; i < max_ndim; ++i) {
  160. int cur_idx = cur.ndim - i - 1;
  161. int dst_idx = dst.ndim - i - 1;
  162. if (cur_idx >= 0 && dst_idx >= 0) {
  163. size_t v0 = dst.shape[dst_idx], v1 = cur.shape[cur_idx];
  164. if (v0 != v1) {
  165. if (v0 > 1 && v1 > 1)
  166. err();
  167. }
  168. int final_idx = std::max(cur_idx, dst_idx);
  169. dst.shape[final_idx] =
  170. (v0 != 0 && v1 != 0) ? std::max(v0, v1) : 0;
  171. } else {
  172. if (dst_idx < 0) {
  173. dst.shape[cur_idx] = cur.shape[cur_idx];
  174. }
  175. }
  176. }
  177. dst.ndim = max_ndim;
  178. }
  179. }
  180. }
  181. void FormatDeducer::feed(TensorFormat cur) {
  182. // only one kind of non-default format can exist; and in such case the
  183. // layouts with default format must be scalar (checked in deduce_layout)
  184. if (cur == m_default)
  185. return;
  186. if (m_result == m_default) {
  187. m_result = cur;
  188. } else {
  189. megdnn_assert(m_result == cur,
  190. "different input layout formats in elemwise: %s vs %s",
  191. m_result.impl()->to_string().c_str(),
  192. cur.impl()->to_string().c_str());
  193. }
  194. }
  195. void ElemwiseForward::deduce_format(const TensorFormatArray& src,
  196. TensorFormat& dst) {
  197. FormatDeducer d;
  198. for (auto i : src) {
  199. d.feed(i);
  200. }
  201. dst = d.get();
  202. }
  203. void ElemwiseForward::deduce_layout(const TensorLayoutArray& src,
  204. TensorLayout& dst) {
  205. megdnn_assert(src.size() == mode_trait().arity);
  206. DType dtype;
  207. FormatDeducer format_deducer;
  208. for (auto&& i : src) {
  209. if (!dtype.valid()) {
  210. dtype = i.dtype;
  211. dst.format = i.format;
  212. } else {
  213. megdnn_assert(dtype == i.dtype,
  214. "input dtype not unique: get %s and %s", dtype.name(),
  215. i.dtype.name());
  216. }
  217. format_deducer.feed(i.format);
  218. }
  219. dst.format = format_deducer.get();
  220. if (!format_deducer.is_default(dst.format)) {
  221. for (auto&& i : src) {
  222. if (format_deducer.is_default(i.format)) {
  223. megdnn_assert(
  224. i.collapse_contiguous().is_scalar(),
  225. "default format can only be used on scalar, got %s",
  226. i.to_string().c_str());
  227. }
  228. }
  229. }
  230. check_dtype(dtype);
  231. TensorShapeArray src_shp;
  232. for (auto&& i : src)
  233. src_shp.push_back(i);
  234. deduce_shape(src_shp, dst);
  235. dst.dtype = dtype;
  236. dst.init_contiguous_stride();
  237. }
  238. void ElemwiseForward::check_layout_and_broadcast(
  239. const TensorLayoutPtrArray& src, const TensorLayout& dst) {
  240. megdnn_assert(src.size() == mode_trait().arity);
  241. DType dtype;
  242. for (auto i : src) {
  243. if (!dtype.valid()) {
  244. dtype = i->dtype;
  245. } else {
  246. megdnn_assert(dtype == i->dtype);
  247. }
  248. *i = i->broadcast(dst);
  249. }
  250. check_dtype(dtype);
  251. megdnn_assert(dtype == dst.dtype && dst.is_contiguous());
  252. }
  253. void ElemwiseForward::check_dtype(DType dtype) {
  254. megdnn_assert(dtype.valid());
  255. auto&& trait = mode_trait();
  256. switch (dtype.category()) {
  257. case DTypeCategory::FLOAT:
  258. megdnn_assert(trait.allow_float, "unsupport mode %s for float\n",
  259. trait.name);
  260. break;
  261. case DTypeCategory::INT:
  262. megdnn_assert(trait.allow_int, "unsupport mode %s for int\n",
  263. trait.name);
  264. break;
  265. case DTypeCategory::BOOL:
  266. megdnn_assert(trait.allow_bool, "unsupport mode %s for bool\n",
  267. trait.name);
  268. break;
  269. default:
  270. megdnn_throw("bad dtype");
  271. }
  272. }
  273. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台