You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

sub_kernel.cc 6.7 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "host_kernels/sub_kernel.h"
  17. #include <cfloat>
  18. #include <cmath>
  19. #include <memory>
  20. #include "common/debug/log.h"
  21. #include "common/math/math_util.h"
  22. #include "common/op/ge_op_utils.h"
  23. #include "graph/common/bcast.h"
  24. #include "graph/utils/type_utils.h"
  25. #include "inc/kernel_factory.h"
  26. namespace ge {
  27. namespace {
  28. const size_t kSubFirstInput = 0;
  29. const size_t kSubSecondInput = 1;
  30. const size_t kSubFirstOutput = 0;
  31. const size_t kSubOutputSize = 1;
  32. const size_t kSubInputSize = 2;
  33. template <typename T>
  34. Status OverflowCheck(T const &x, T const &y, DataType &data_type) {
  35. switch (data_type) {
  36. case DT_INT8:
  37. FMK_INT8_SUBCHECK(x, y)
  38. break;
  39. case DT_INT16:
  40. FMK_INT16_SUBCHECK(x, y)
  41. break;
  42. case DT_INT32:
  43. FMK_INT32_SUBCHECK(x, y)
  44. break;
  45. case DT_INT64:
  46. FMK_INT64_SUBCHECK(x, y)
  47. break;
  48. case DT_UINT8:
  49. FMK_UINT8_SUBCHECK(x, y)
  50. break;
  51. case DT_UINT16:
  52. FMK_UINT16_SUBCHECK(x, y)
  53. break;
  54. case DT_UINT32:
  55. FMK_UINT32_SUBCHECK(x, y)
  56. break;
  57. case DT_UINT64:
  58. FMK_UINT64_SUBCHECK(x, y)
  59. break;
  60. case DT_FLOAT16:
  61. FMK_FP16_SUBCHECK(x, y)
  62. break;
  63. case DT_FLOAT:
  64. FMK_FLOAT_SUBCHECK(x, y)
  65. break;
  66. case DT_DOUBLE:
  67. FMK_DOUBLE_SUBCHECK(x, y)
  68. break;
  69. default:
  70. break;
  71. }
  72. return SUCCESS;
  73. }
  74. #define DEFINE_FUNC_WITH_STATUS_BY_TYPE(TYPE) \
  75. std::function<TYPE(TYPE const &, TYPE const &, DataType &, Status &)> func_##TYPE = \
  76. [](TYPE const &x, TYPE const &y, DataType &type, Status &ret) -> TYPE { \
  77. ret = OverflowCheck<TYPE>(x, y, type); \
  78. if (ret != SUCCESS) { \
  79. GELOGE(PARAM_INVALID, "Result of sub is overflow."); \
  80. return static_cast<TYPE>(0); \
  81. } \
  82. return static_cast<TYPE>(x) - static_cast<TYPE>(y); \
  83. };
  84. #define SET_BCAST_COMPUTE_CASE(DTYPE, TYPE) \
  85. case DTYPE: \
  86. ret = bcast.BCastComputeCheck(input, y_data_##TYPE##_, func_##TYPE); \
  87. break;
  88. #define SET_OUTPUT(DTYPE, TYPE) \
  89. case DTYPE: \
  90. (void)output_ptr->SetData(reinterpret_cast<uint8_t *>(y_data_##TYPE##_.data()), y_data_##TYPE##_.size() * length); \
  91. break;
  92. DEFINE_FUNC_WITH_STATUS_BY_TYPE(int8_t)
  93. DEFINE_FUNC_WITH_STATUS_BY_TYPE(int16_t)
  94. DEFINE_FUNC_WITH_STATUS_BY_TYPE(int32_t)
  95. DEFINE_FUNC_WITH_STATUS_BY_TYPE(int64_t)
  96. DEFINE_FUNC_WITH_STATUS_BY_TYPE(uint8_t)
  97. DEFINE_FUNC_WITH_STATUS_BY_TYPE(uint16_t)
  98. DEFINE_FUNC_WITH_STATUS_BY_TYPE(uint32_t)
  99. DEFINE_FUNC_WITH_STATUS_BY_TYPE(uint64_t)
  100. DEFINE_FUNC_WITH_STATUS_BY_TYPE(fp16_t)
  101. DEFINE_FUNC_WITH_STATUS_BY_TYPE(float)
  102. DEFINE_FUNC_WITH_STATUS_BY_TYPE(double)
  103. } // namespace
  104. Status SubKernel::Compute(const ge::OpDescPtr op_desc_ptr, const std::vector<ge::ConstGeTensorPtr> &input,
  105. vector<ge::GeTensorPtr> &v_output) {
  106. GE_CHECK_NOTNULL(op_desc_ptr);
  107. // check how many inputs
  108. if ((input.size() != kSubInputSize) || (op_desc_ptr->GetOutputsSize() != kSubOutputSize)) {
  109. GELOGW("The number of input for sub must be %zu.", kSubInputSize);
  110. return NOT_CHANGED;
  111. }
  112. GE_CHECK_NOTNULL(input[kSubFirstInput]);
  113. GE_CHECK_NOTNULL(input[kSubSecondInput]);
  114. ConstGeTensorPtr weight0 = input[kSubFirstInput];
  115. ConstGeTensorPtr weight1 = input[kSubSecondInput];
  116. Status ret;
  117. DataType data_type = input[kSubFirstInput]->GetTensorDesc().GetDataType();
  118. BCast bcast;
  119. switch (data_type) {
  120. SET_BCAST_COMPUTE_CASE(DT_INT8, int8_t)
  121. SET_BCAST_COMPUTE_CASE(DT_INT16, int16_t)
  122. SET_BCAST_COMPUTE_CASE(DT_INT32, int32_t)
  123. SET_BCAST_COMPUTE_CASE(DT_INT64, int64_t)
  124. SET_BCAST_COMPUTE_CASE(DT_UINT8, uint8_t)
  125. SET_BCAST_COMPUTE_CASE(DT_UINT16, uint16_t)
  126. SET_BCAST_COMPUTE_CASE(DT_UINT32, uint32_t)
  127. SET_BCAST_COMPUTE_CASE(DT_UINT64, uint64_t)
  128. SET_BCAST_COMPUTE_CASE(DT_FLOAT16, fp16_t)
  129. SET_BCAST_COMPUTE_CASE(DT_FLOAT, float)
  130. SET_BCAST_COMPUTE_CASE(DT_DOUBLE, double)
  131. default:
  132. GELOGI("Sub kernel data type %s not support.", TypeUtils::DataTypeToSerialString(data_type).c_str());
  133. ret = NOT_CHANGED;
  134. break;
  135. }
  136. if (ret != SUCCESS) {
  137. GELOGW("BCastCompute fail, data_type:%s, ret:%s", TypeUtils::DataTypeToSerialString(data_type).c_str(),
  138. GET_ERRORNO_STR(ret).c_str());
  139. return NOT_CHANGED;
  140. }
  141. uint32_t length = 1;
  142. if (!TypeUtils::GetDataTypeLength(data_type, length)) {
  143. GELOGW("Can't GetDataTypeLength of data_type: %s", TypeUtils::DataTypeToSerialString(data_type).c_str());
  144. return NOT_CHANGED;
  145. }
  146. auto output_tensor_desc = op_desc_ptr->GetOutputDesc(kSubFirstOutput);
  147. GeTensorPtr output_ptr = MakeShared<GeTensor>(output_tensor_desc);
  148. if (output_ptr == nullptr) {
  149. GELOGW("make_shared ge::GeTensor failed, node name %s.", op_desc_ptr->GetName().c_str());
  150. return NOT_CHANGED;
  151. }
  152. output_ptr->MutableTensorDesc().SetShape(GeShape(bcast.GetOutputShape()));
  153. // only return GRAPH_SUCCESS here
  154. switch (data_type) {
  155. SET_OUTPUT(DT_INT8, int8_t)
  156. SET_OUTPUT(DT_INT16, int16_t)
  157. SET_OUTPUT(DT_INT32, int32_t)
  158. SET_OUTPUT(DT_INT64, int64_t)
  159. SET_OUTPUT(DT_UINT8, uint8_t)
  160. SET_OUTPUT(DT_UINT16, uint16_t)
  161. SET_OUTPUT(DT_UINT32, uint32_t)
  162. SET_OUTPUT(DT_UINT64, uint64_t)
  163. SET_OUTPUT(DT_FLOAT16, fp16_t)
  164. SET_OUTPUT(DT_FLOAT, float)
  165. SET_OUTPUT(DT_DOUBLE, double)
  166. default:
  167. break;
  168. }
  169. output_ptr->MutableTensorDesc().SetDataType(data_type);
  170. v_output.push_back(output_ptr);
  171. return SUCCESS;
  172. }
  173. REGISTER_KERNEL(SUB, SubKernel);
  174. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示