You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

mul_kernel.cc 7.6 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "host_kernels/mul_kernel.h"
  17. #include <memory>
  18. #include <set>
  19. #include "common/debug/log.h"
  20. #include "common/math/math_util.h"
  21. #include "common/types.h"
  22. #include "common/util.h"
  23. #include "framework/common/debug/ge_log.h"
  24. #include "framework/common/ge_inner_error_codes.h"
  25. #include "graph/common/bcast.h"
  26. #include "graph/utils/type_utils.h"
  27. #include "inc/kernel_factory.h"
  28. namespace ge {
  29. namespace {
  30. const std::set<DataType> kMulSupportedType = {DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16,
  31. DT_UINT32, DT_UINT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE};
  32. template <typename T>
  33. Status OverflowCheck(T const &x, T const &y, DataType &type) {
  34. switch (type) {
  35. case DT_INT8:
  36. FMK_INT8_MULCHECK(x, y)
  37. break;
  38. case DT_INT16:
  39. FMK_INT16_MULCHECK(x, y)
  40. break;
  41. case DT_INT32:
  42. FMK_INT32_MULCHECK(x, y)
  43. break;
  44. case DT_INT64:
  45. FMK_INT64_MULCHECK(x, y)
  46. break;
  47. case DT_UINT8:
  48. FMK_UINT8_MULCHECK(x, y)
  49. break;
  50. case DT_UINT16:
  51. FMK_UINT16_MULCHECK(x, y)
  52. break;
  53. case DT_UINT32:
  54. FMK_UINT32_MULCHECK(x, y)
  55. break;
  56. case DT_UINT64:
  57. FMK_UINT64_MULCHECK(x, y)
  58. break;
  59. case DT_FLOAT16:
  60. FMK_FP16_MULCHECK(x, y)
  61. break;
  62. case DT_FLOAT:
  63. FMK_FLOAT_MULCHECK(x, y)
  64. break;
  65. case DT_DOUBLE:
  66. FMK_DOUBLE_MULCHECK(x, y)
  67. break;
  68. default:
  69. break;
  70. }
  71. return SUCCESS;
  72. }
  73. #define DEFINE_FUNC_WITH_STATUS_BY_TYPE(TYPE) \
  74. std::function<TYPE(TYPE const &, TYPE const &, DataType &, Status &)> func_##TYPE = \
  75. [](TYPE const &a, TYPE const &b, DataType &type, Status &ret) -> TYPE { \
  76. ret = OverflowCheck(a, b, type); \
  77. if (ret != SUCCESS) { \
  78. GELOGE(PARAM_INVALID, "Result of mul is overflow."); \
  79. return static_cast<TYPE>(0); \
  80. } \
  81. return static_cast<TYPE>(a) * static_cast<TYPE>(b); \
  82. };
  83. #define SET_BCAST_COMPUTE_CASE(DTYPE, TYPE) \
  84. case DTYPE: \
  85. ret = bcast.BCastComputeCheck(input, y_data_##TYPE##_, func_##TYPE); \
  86. break;
  87. #define SET_OUTPUT(DTYPE, TYPE) \
  88. case DTYPE: \
  89. (void)output_ptr->SetData(reinterpret_cast<uint8_t *>(y_data_##TYPE##_.data()), y_data_##TYPE##_.size() * length); \
  90. break;
  91. // [no need to check result]
  92. DEFINE_FUNC_WITH_STATUS_BY_TYPE(int8_t)
  93. DEFINE_FUNC_WITH_STATUS_BY_TYPE(int16_t)
  94. DEFINE_FUNC_WITH_STATUS_BY_TYPE(int32_t)
  95. DEFINE_FUNC_WITH_STATUS_BY_TYPE(int64_t)
  96. DEFINE_FUNC_WITH_STATUS_BY_TYPE(uint8_t)
  97. DEFINE_FUNC_WITH_STATUS_BY_TYPE(uint16_t)
  98. DEFINE_FUNC_WITH_STATUS_BY_TYPE(uint32_t)
  99. DEFINE_FUNC_WITH_STATUS_BY_TYPE(uint64_t)
  100. DEFINE_FUNC_WITH_STATUS_BY_TYPE(fp16_t)
  101. DEFINE_FUNC_WITH_STATUS_BY_TYPE(float)
  102. DEFINE_FUNC_WITH_STATUS_BY_TYPE(double)
  103. } // namespace
  104. Status MulKernel::Compute(const OpDescPtr op_desc_ptr, const std::vector<ConstGeTensorPtr> &input,
  105. std::vector<GeTensorPtr> &v_output) {
  106. GELOGD("MulKernel in");
  107. if (op_desc_ptr == nullptr) {
  108. GELOGE(PARAM_INVALID, "Parameter's invalid, input opDescPtr is nullptr.");
  109. return PARAM_INVALID;
  110. }
  111. Status ret = MulCheck(input);
  112. if (ret != SUCCESS) {
  113. return ret;
  114. }
  115. DataType data_type = input[0]->GetTensorDesc().GetDataType();
  116. BCast bcast;
  117. switch (data_type) {
  118. SET_BCAST_COMPUTE_CASE(DT_INT8, int8_t)
  119. SET_BCAST_COMPUTE_CASE(DT_INT16, int16_t)
  120. SET_BCAST_COMPUTE_CASE(DT_INT32, int32_t)
  121. SET_BCAST_COMPUTE_CASE(DT_INT64, int64_t)
  122. SET_BCAST_COMPUTE_CASE(DT_UINT8, uint8_t)
  123. SET_BCAST_COMPUTE_CASE(DT_UINT16, uint16_t)
  124. SET_BCAST_COMPUTE_CASE(DT_UINT32, uint32_t)
  125. SET_BCAST_COMPUTE_CASE(DT_UINT64, uint64_t)
  126. SET_BCAST_COMPUTE_CASE(DT_FLOAT16, fp16_t)
  127. SET_BCAST_COMPUTE_CASE(DT_FLOAT, float)
  128. SET_BCAST_COMPUTE_CASE(DT_DOUBLE, double)
  129. default:
  130. ret = NOT_CHANGED;
  131. break;
  132. }
  133. if (ret != SUCCESS) {
  134. GELOGW("BCastCompute fail, data_type: %s, ret: %s", TypeUtils::DataTypeToSerialString(data_type).c_str(),
  135. GET_ERRORNO_STR(ret).c_str());
  136. return NOT_CHANGED;
  137. }
  138. uint32_t length = 1;
  139. if (!TypeUtils::GetDataTypeLength(data_type, length)) {
  140. GELOGW("Can't GetDataTypeLength of data_type: %s", TypeUtils::DataTypeToSerialString(data_type).c_str());
  141. return NOT_CHANGED;
  142. }
  143. GeTensorPtr output_ptr = MakeShared<GeTensor>(op_desc_ptr->GetOutputDesc(0));
  144. if (output_ptr == nullptr) {
  145. GELOGE(MEMALLOC_FAILED, "Make shared failed");
  146. return MEMALLOC_FAILED;
  147. }
  148. output_ptr->MutableTensorDesc().SetShape(GeShape(bcast.GetOutputShape()));
  149. // only return GRAPH_SUCCESS here
  150. switch (data_type) {
  151. SET_OUTPUT(DT_INT8, int8_t)
  152. SET_OUTPUT(DT_INT16, int16_t)
  153. SET_OUTPUT(DT_INT32, int32_t)
  154. SET_OUTPUT(DT_INT64, int64_t)
  155. SET_OUTPUT(DT_UINT8, uint8_t)
  156. SET_OUTPUT(DT_UINT16, uint16_t)
  157. SET_OUTPUT(DT_UINT32, uint32_t)
  158. SET_OUTPUT(DT_UINT64, uint64_t)
  159. SET_OUTPUT(DT_FLOAT16, fp16_t)
  160. SET_OUTPUT(DT_FLOAT, float)
  161. SET_OUTPUT(DT_DOUBLE, double)
  162. default:
  163. break;
  164. }
  165. output_ptr->MutableTensorDesc().SetDataType(data_type);
  166. v_output.push_back(output_ptr);
  167. GELOGD("MulKernel success");
  168. return SUCCESS;
  169. }
  170. Status MulKernel::MulCheck(const std::vector<ConstGeTensorPtr> &input) {
  171. // check input number
  172. if (input.size() != static_cast<size_t>(MUL_INPUT_NUM)) {
  173. GELOGI("The number of input for Mul must be %u.", MUL_INPUT_NUM);
  174. return NOT_CHANGED;
  175. }
  176. ConstGeTensorPtr input_x1 = input.at(0);
  177. ConstGeTensorPtr input_x2 = input.at(1);
  178. GE_CHECK_NOTNULL(input_x1);
  179. GE_CHECK_NOTNULL(input_x2);
  180. // check whether there is data in Tensor
  181. if (input_x1->GetData().size() == 0 || input_x2->GetData().size() == 0) {
  182. GELOGI("Check data size fail. x1: %zu, x2: %zu", input_x1->GetData().size(), input_x2->GetData().size());
  183. return NOT_CHANGED;
  184. }
  185. // check whether the data types are the same
  186. DataType type = input_x1->GetTensorDesc().GetDataType();
  187. if (type != input_x2->GetTensorDesc().GetDataType()) {
  188. GELOGI("Data type of inputs for Mul not matched.");
  189. return NOT_CHANGED;
  190. }
  191. // check if input data type is supported
  192. if (kMulSupportedType.find(type) == kMulSupportedType.end()) {
  193. GELOGI("Mul does not support this Data type: %s", TypeUtils::DataTypeToSerialString(type).c_str());
  194. return NOT_CHANGED;
  195. }
  196. return SUCCESS;
  197. }
  198. REGISTER_KERNEL(MUL, MulKernel);
  199. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示