You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

opr_impl.cpp 12 kB

feat(bazel/windows/xp/sp2/inference): implement inference on windows xp (os vesion >= sp2) build with bazel * bazel build support(define __DEPLOY_ON_XP_SP2__ when deploy on xp sp2): (dbg)./bazel build //brain/megbrain:load_and_run --cpu='x86_windows_xp' --compiler='clang_cl' -c dbg --copt "-D__DEPLOY_ON_XP_SP2__=1" (opt)./bazel build //brain/megbrain:load_and_run --cpu='x86_windows_xp' --compiler='clang_cl' -c opt --copt "-D__DEPLOY_ON_XP_SP2__=1" * internal behavior: will define MGB_HAVE_THREAD=0 when enable __DEPLOY_ON_XP_SP2__ * refer to https://docs.microsoft.com/en-us/cpp/build/configuring-programs-for-windows-xp?view=msvc-160 xp sp2(x86) do not support vc runtime fully, casused by KERNEL32.dll do not implement some base apis for c++ std function, for example, std::mutex/std::thread/std::condition_variable as a workround, we will disable some MegEngine features on xp sp2 env, for exampe, multi-thread etc! * about DNN_MUTEX/MGB_MUTEX, if your code will build in inference code (even CPU backends), please replace std::mutex to DNN_MUTEX/MGB_MUTEX, * about multi-thread, if you code need multi-thread support, please enable it when MGB_HAVE_THREAD=1 * about test build env status 1: Visual Studio 2019(MSVC version <= 14.26.28801)---- pass 2: Visual Studio 2019(MSVC version > 14.26.28801) ---- failed caused by this 'new' version will put VCR depends on win7 KERNEL32.DLL, this may be fixed at Visual Studio 2019 later version but we do not test at this MR merge point 3: Visual Studio 2017 ---------- pass 4: Visual Studio 2014 ---------- pass GitOrigin-RevId: 65ac48b95e99f2c510fe5db449cc8182d682e113
3 years ago
feat(bazel/windows/xp/sp2/inference): implement inference on windows xp (os vesion >= sp2) build with bazel * bazel build support(define __DEPLOY_ON_XP_SP2__ when deploy on xp sp2): (dbg)./bazel build //brain/megbrain:load_and_run --cpu='x86_windows_xp' --compiler='clang_cl' -c dbg --copt "-D__DEPLOY_ON_XP_SP2__=1" (opt)./bazel build //brain/megbrain:load_and_run --cpu='x86_windows_xp' --compiler='clang_cl' -c opt --copt "-D__DEPLOY_ON_XP_SP2__=1" * internal behavior: will define MGB_HAVE_THREAD=0 when enable __DEPLOY_ON_XP_SP2__ * refer to https://docs.microsoft.com/en-us/cpp/build/configuring-programs-for-windows-xp?view=msvc-160 xp sp2(x86) do not support vc runtime fully, casused by KERNEL32.dll do not implement some base apis for c++ std function, for example, std::mutex/std::thread/std::condition_variable as a workround, we will disable some MegEngine features on xp sp2 env, for exampe, multi-thread etc! * about DNN_MUTEX/MGB_MUTEX, if your code will build in inference code (even CPU backends), please replace std::mutex to DNN_MUTEX/MGB_MUTEX, * about multi-thread, if you code need multi-thread support, please enable it when MGB_HAVE_THREAD=1 * about test build env status 1: Visual Studio 2019(MSVC version <= 14.26.28801)---- pass 2: Visual Studio 2019(MSVC version > 14.26.28801) ---- failed caused by this 'new' version will put VCR depends on win7 KERNEL32.DLL, this may be fixed at Visual Studio 2019 later version but we do not test at this MR merge point 3: Visual Studio 2017 ---------- pass 4: Visual Studio 2014 ---------- pass GitOrigin-RevId: 65ac48b95e99f2c510fe5db449cc8182d682e113
3 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374
  1. #include "src/common/elemwise/kern_defs.cuh"
  2. #include "src/common/utils.h"
  3. #include "megdnn/oprs.h"
  4. #include "megdnn/tensor_format.h"
  5. #include "midout.h"
  6. MIDOUT_DECL(megdnn_common_elemwise)
  7. //! this tag will be used at tools/gen_header_for_bin_reduce.py
  8. //! please do not modify it
  9. MIDOUT_DECL(megdnn_common_elemwise_mode)
  10. #include <mutex>
  11. #include <vector>
  12. using namespace megdnn;
  13. namespace {
  14. class FormatDeducer {
  15. const TensorFormat m_default;
  16. TensorFormat m_result = m_default;
  17. public:
  18. inline void feed(TensorFormat cur);
  19. bool is_default(TensorFormat f) const { return f == m_default; }
  20. TensorFormat get() const { return m_result; }
  21. };
  22. } // anonymous namespace
  23. using Mode = param::Elemwise::Mode;
  24. using ModeTrait = ElemwiseForward::ModeTrait;
  25. const ModeTrait& ModeTrait::from_mode(Mode mode) {
  26. static DNN_MUTEX mtx;
  27. static std::vector<ModeTrait> traits;
  28. MEGDNN_LOCK_GUARD(mtx);
  29. if (traits.empty()) {
  30. auto get = [&](Mode m) -> ModeTrait& {
  31. auto im = static_cast<size_t>(m);
  32. if (im >= traits.size())
  33. traits.resize(im + 1);
  34. return traits[im];
  35. };
  36. #define cb(_m) \
  37. MIDOUT_BEGIN(megdnn_common_elemwise, midout_iv(Mode::_m)) { \
  38. get(Mode::_m).allow_int = true; \
  39. } \
  40. MIDOUT_END();
  41. MEGDNN_FOREACH_ELEMWISE_MODE_UNARY_INT(cb);
  42. MEGDNN_FOREACH_ELEMWISE_MODE_BINARY_INT(cb);
  43. MEGDNN_FOREACH_ELEMWISE_MODE_TERNARY_INT(cb);
  44. #undef cb
  45. #define cb(_m) \
  46. MIDOUT_BEGIN(megdnn_common_elemwise, midout_iv(Mode::_m)) { \
  47. get(Mode::_m).allow_float = true; \
  48. } \
  49. MIDOUT_END();
  50. MEGDNN_FOREACH_ELEMWISE_MODE_UNARY_FLOAT(cb);
  51. MEGDNN_FOREACH_ELEMWISE_MODE_BINARY_FLOAT(cb);
  52. MEGDNN_FOREACH_ELEMWISE_MODE_TERNARY_FLOAT(cb);
  53. #undef cb
  54. #define cb(_m) \
  55. MIDOUT_BEGIN(megdnn_common_elemwise, midout_iv(Mode::_m)) { \
  56. get(Mode::_m).allow_bool = true; \
  57. } \
  58. MIDOUT_END();
  59. MEGDNN_FOREACH_ELEMWISE_MODE_UNARY_BOOL(cb);
  60. MEGDNN_FOREACH_ELEMWISE_MODE_BINARY_BOOL(cb);
  61. #undef cb
  62. #define cb(_m) \
  63. MIDOUT_BEGIN(megdnn_common_elemwise, midout_iv(Mode::_m)) { \
  64. auto&& t = get(Mode::_m); \
  65. t.arity = _a; \
  66. t.name = (#_m); \
  67. } \
  68. MIDOUT_END();
  69. #define _a 1
  70. MEGDNN_FOREACH_ELEMWISE_MODE_UNARY_FLOAT(cb);
  71. MEGDNN_FOREACH_ELEMWISE_MODE_UNARY_INT(cb);
  72. MEGDNN_FOREACH_ELEMWISE_MODE_UNARY_BOOL(cb);
  73. #undef _a
  74. #define _a 2
  75. MEGDNN_FOREACH_ELEMWISE_MODE_BINARY_FLOAT(cb);
  76. MEGDNN_FOREACH_ELEMWISE_MODE_BINARY_INT(cb);
  77. MEGDNN_FOREACH_ELEMWISE_MODE_BINARY_BOOL(cb);
  78. #undef _a
  79. #define _a 3
  80. MEGDNN_FOREACH_ELEMWISE_MODE_TERNARY_FLOAT(cb);
  81. MEGDNN_FOREACH_ELEMWISE_MODE_TERNARY_INT(cb);
  82. #undef _a
  83. #undef cb
  84. #define FUSE(_m, _arity) \
  85. MIDOUT_BEGIN(megdnn_common_elemwise, midout_iv(Mode::_m)) { \
  86. auto&& t = get(Mode::_m); \
  87. t.allow_int = true; \
  88. t.allow_float = true; \
  89. t.allow_bool = true; \
  90. t.arity = _arity; \
  91. t.name = (#_m); \
  92. } \
  93. MIDOUT_END();
  94. FUSE(FUSE_MUL_ADD3, 3);
  95. FUSE(FUSE_MUL_ADD4, 4);
  96. #undef FUSE
  97. #define COMM_CB(_m) \
  98. MIDOUT_BEGIN(megdnn_common_elemwise, midout_iv(Mode::_m)) { \
  99. traits.at(static_cast<int>(Mode::_m)).commutable = true; \
  100. } \
  101. MIDOUT_END()
  102. #define COMM(_m) MEGDNN_ELEMWISE_MODE_ENABLE(_m, COMM_CB)
  103. COMM(ADD);
  104. COMM(FUSE_ADD_RELU);
  105. COMM(FUSE_ADD_SIGMOID);
  106. COMM(FUSE_ADD_TANH);
  107. COMM(MUL);
  108. COMM(RMULH);
  109. COMM(MAX);
  110. COMM(MIN);
  111. COMM(EQ);
  112. COMM(LOG_SUM_EXP);
  113. #undef COMM
  114. #undef COMM_CB
  115. #if MEGDNN_ELEMWISE_MODE_ENABLE_ALL
  116. for (auto&& i : traits) {
  117. megdnn_assert(
  118. i.arity && (i.allow_int || i.allow_float || i.allow_bool) &&
  119. (!i.commutable || i.arity == 2));
  120. }
  121. #else
  122. #pragma message "elemwise mode stripped"
  123. #endif
  124. }
  125. auto&& ret = traits.at(static_cast<int>(mode));
  126. #if !MEGDNN_ELEMWISE_MODE_ENABLE_ALL
  127. megdnn_assert(ret.arity);
  128. #endif
  129. //! Some DNN backend OPRS will use proxy OPRS. For example, softmax@cpu Naive imp
  130. //! will call elemwise OPR. In the model dump stage, we have no information about
  131. //! this logic, which will lead to the loss of elemwise mode. As a solution, we
  132. //! record the elemwise mode information by adding the 'midout' case flag in the run
  133. //! stage.
  134. #define CB_MODE(mode) \
  135. case mode: \
  136. MIDOUT_BEGIN(megdnn_common_elemwise_mode, midout_iv(mode)) { return ret; } \
  137. MIDOUT_END(); \
  138. break;
  139. switch (mode) {
  140. CB_MODE(Mode::RELU);
  141. CB_MODE(Mode::ABS);
  142. CB_MODE(Mode::ACOS);
  143. CB_MODE(Mode::ASIN);
  144. CB_MODE(Mode::CEIL);
  145. CB_MODE(Mode::COS);
  146. CB_MODE(Mode::EXP);
  147. CB_MODE(Mode::EXPM1);
  148. CB_MODE(Mode::FLOOR);
  149. CB_MODE(Mode::LOG);
  150. CB_MODE(Mode::LOG1P);
  151. CB_MODE(Mode::NEGATE);
  152. CB_MODE(Mode::SIGMOID);
  153. CB_MODE(Mode::SIN);
  154. CB_MODE(Mode::TANH);
  155. CB_MODE(Mode::ABS_GRAD);
  156. CB_MODE(Mode::ADD);
  157. CB_MODE(Mode::FLOOR_DIV);
  158. CB_MODE(Mode::MAX);
  159. CB_MODE(Mode::MIN);
  160. CB_MODE(Mode::MOD);
  161. CB_MODE(Mode::MUL);
  162. CB_MODE(Mode::POW);
  163. CB_MODE(Mode::SIGMOID_GRAD);
  164. CB_MODE(Mode::SUB);
  165. CB_MODE(Mode::SWITCH_GT0);
  166. CB_MODE(Mode::TANH_GRAD);
  167. CB_MODE(Mode::TRUE_DIV);
  168. CB_MODE(Mode::LOG_SUM_EXP);
  169. CB_MODE(Mode::LT);
  170. CB_MODE(Mode::LEQ);
  171. CB_MODE(Mode::EQ);
  172. CB_MODE(Mode::SHL);
  173. CB_MODE(Mode::SHR);
  174. CB_MODE(Mode::COND_LEQ_MOV);
  175. CB_MODE(Mode::FUSE_MUL_ADD3);
  176. CB_MODE(Mode::FUSE_MUL_ADD4);
  177. CB_MODE(Mode::FUSE_ADD_RELU);
  178. CB_MODE(Mode::FUSE_ADD_SIGMOID);
  179. CB_MODE(Mode::FUSE_ADD_TANH);
  180. CB_MODE(Mode::FAST_TANH);
  181. CB_MODE(Mode::FAST_TANH_GRAD);
  182. CB_MODE(Mode::ROUND);
  183. CB_MODE(Mode::RMULH);
  184. CB_MODE(Mode::ATAN2);
  185. CB_MODE(Mode::ERF);
  186. CB_MODE(Mode::ERFINV);
  187. CB_MODE(Mode::ERFC);
  188. CB_MODE(Mode::ERFCINV);
  189. CB_MODE(Mode::H_SWISH);
  190. CB_MODE(Mode::H_SWISH_GRAD);
  191. CB_MODE(Mode::FUSE_ADD_H_SWISH);
  192. CB_MODE(Mode::NOT);
  193. CB_MODE(Mode::AND);
  194. CB_MODE(Mode::OR);
  195. CB_MODE(Mode::XOR);
  196. CB_MODE(Mode::SILU);
  197. CB_MODE(Mode::SILU_GRAD);
  198. CB_MODE(Mode::GELU);
  199. CB_MODE(Mode::GELU_GRAD);
  200. CB_MODE(Mode::COND_LT_MOV);
  201. default:
  202. megdnn_assert(
  203. 0,
  204. "code issue happened!!, please add new elemwise to switch mode.");
  205. return ret;
  206. #undef CB_MODE
  207. }
  208. return ret;
  209. }
  210. void ElemwiseForward::deduce_shape(const TensorShapeArray& src, TensorShape& dst) {
  211. auto err = [&]() {
  212. std::string msg("bad input shape for polyadic operator: ");
  213. bool first = true;
  214. for (auto&& i : src) {
  215. if (first)
  216. first = false;
  217. else
  218. msg.append(", ");
  219. msg.append(i.to_string());
  220. }
  221. megdnn_throw(msg);
  222. };
  223. dst.ndim = 0;
  224. for (auto&& cur : src) {
  225. if (!cur.ndim)
  226. err();
  227. if (!dst.ndim || dst.is_scalar())
  228. dst = cur;
  229. else if (!cur.is_scalar()) {
  230. int max_ndim = std::max(cur.ndim, dst.ndim);
  231. for (int i = 0; i < max_ndim; ++i) {
  232. int cur_idx = cur.ndim - i - 1;
  233. int dst_idx = dst.ndim - i - 1;
  234. if (cur_idx >= 0 && dst_idx >= 0) {
  235. size_t v0 = dst.shape[dst_idx], v1 = cur.shape[cur_idx];
  236. if (v0 != v1) {
  237. if (v0 > 1 && v1 > 1)
  238. err();
  239. }
  240. int final_idx = std::max(cur_idx, dst_idx);
  241. dst.shape[final_idx] = (v0 != 0 && v1 != 0) ? std::max(v0, v1) : 0;
  242. } else {
  243. if (dst_idx < 0) {
  244. dst.shape[cur_idx] = cur.shape[cur_idx];
  245. }
  246. }
  247. }
  248. dst.ndim = max_ndim;
  249. }
  250. }
  251. }
  252. void FormatDeducer::feed(TensorFormat cur) {
  253. // only one kind of non-default format can exist; and in such case the
  254. // layouts with default format must be scalar (checked in deduce_layout)
  255. if (cur == m_default)
  256. return;
  257. if (m_result == m_default) {
  258. m_result = cur;
  259. } else {
  260. megdnn_assert(
  261. m_result == cur, "different input layout formats in elemwise: %s vs %s",
  262. m_result.impl()->to_string().c_str(), cur.impl()->to_string().c_str());
  263. }
  264. }
  265. void ElemwiseForward::deduce_format(const TensorFormatArray& src, TensorFormat& dst) {
  266. FormatDeducer d;
  267. for (auto i : src) {
  268. d.feed(i);
  269. }
  270. dst = d.get();
  271. }
  272. void ElemwiseForward::deduce_layout(const TensorLayoutArray& src, TensorLayout& dst) {
  273. megdnn_assert(src.size() == mode_trait().arity);
  274. DType dtype;
  275. FormatDeducer format_deducer;
  276. for (auto&& i : src) {
  277. if (!dtype.valid()) {
  278. dtype = i.dtype;
  279. dst.format = i.format;
  280. } else {
  281. megdnn_assert(
  282. dtype == i.dtype, "input dtype not unique: get %s and %s",
  283. dtype.name(), i.dtype.name());
  284. }
  285. format_deducer.feed(i.format);
  286. }
  287. dst.format = format_deducer.get();
  288. if (!format_deducer.is_default(dst.format)) {
  289. for (auto&& i : src) {
  290. if (format_deducer.is_default(i.format)) {
  291. megdnn_assert(
  292. i.collapse_contiguous().is_scalar(),
  293. "default format can only be used on scalar, got %s",
  294. i.to_string().c_str());
  295. }
  296. }
  297. }
  298. check_dtype(dtype);
  299. TensorShapeArray src_shp;
  300. for (auto&& i : src)
  301. src_shp.push_back(i);
  302. deduce_shape(src_shp, dst);
  303. dst.dtype = dtype;
  304. dst.init_contiguous_stride();
  305. }
  306. void ElemwiseForward::check_layout_and_broadcast(
  307. const TensorLayoutPtrArray& src, const TensorLayout& dst) {
  308. megdnn_assert(src.size() == mode_trait().arity);
  309. DType dtype;
  310. for (auto i : src) {
  311. if (!dtype.valid()) {
  312. dtype = i->dtype;
  313. } else {
  314. megdnn_assert(dtype == i->dtype);
  315. }
  316. *i = i->broadcast(dst);
  317. }
  318. check_dtype(dtype);
  319. megdnn_assert(dtype == dst.dtype && dst.is_contiguous());
  320. }
  321. void ElemwiseForward::check_dtype(DType dtype) {
  322. megdnn_assert(dtype.valid());
  323. auto&& trait = mode_trait();
  324. switch (dtype.category()) {
  325. case DTypeCategory::FLOAT:
  326. megdnn_assert(
  327. trait.allow_float, "unsupport mode %s for float\n", trait.name);
  328. break;
  329. case DTypeCategory::INT:
  330. megdnn_assert(trait.allow_int, "unsupport mode %s for int\n", trait.name);
  331. break;
  332. case DTypeCategory::BOOL:
  333. megdnn_assert(trait.allow_bool, "unsupport mode %s for bool\n", trait.name);
  334. break;
  335. default:
  336. megdnn_throw("bad dtype");
  337. }
  338. }
  339. // vim: syntax=cpp.doxygen