You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

maximum_kernel.cc 7.2 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "host_kernels/maximum_kernel.h"
  17. #include <memory>
  18. #include <set>
  19. #include "common/debug/log.h"
  20. #include "common/fp16_t.h"
  21. #include "common/types.h"
  22. #include "common/util.h"
  23. #include "framework/common/debug/ge_log.h"
  24. #include "framework/common/ge_inner_error_codes.h"
  25. #include "graph/common/bcast.h"
  26. #include "graph/utils/type_utils.h"
  27. #include "inc/kernel_factory.h"
  28. namespace ge {
  29. namespace {
  30. const size_t kMaximumInputNum = 2;
  31. const size_t kMaximumFirstInput = 0;
  32. const size_t kMaximumSecondInput = 1;
  33. const size_t kMaximumFirstOutput = 0;
  34. const std::set<DataType> kMaximumSupportedType = {DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
  35. DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_DOUBLE};
  36. #define DEFINE_FUNC_BY_TYPE(TYPE) \
  37. std::function<TYPE(TYPE const &, TYPE const &)> func_##TYPE = [](TYPE const &a, TYPE const &b) -> TYPE { \
  38. return (a > b ? a : b); \
  39. };
  40. #define SET_BCAST_COMPUTE_CASE(DTYPE, TYPE) \
  41. case DTYPE: \
  42. ret = bcast.BCastCompute(input, y_data_##TYPE, func_##TYPE); \
  43. break;
  44. #define SET_OUTPUT(DTYPE, TYPE) \
  45. case DTYPE: \
  46. if (output_ptr->SetData(reinterpret_cast<uint8_t *>(y_data_##TYPE.data()), y_data_##TYPE.size() * length) != \
  47. GRAPH_SUCCESS) { \
  48. GELOGW("GenData: SetData failed"); \
  49. } \
  50. break;
  51. DEFINE_FUNC_BY_TYPE(int8_t)
  52. DEFINE_FUNC_BY_TYPE(int16_t)
  53. DEFINE_FUNC_BY_TYPE(int32_t)
  54. DEFINE_FUNC_BY_TYPE(int64_t)
  55. DEFINE_FUNC_BY_TYPE(uint8_t)
  56. DEFINE_FUNC_BY_TYPE(uint16_t)
  57. DEFINE_FUNC_BY_TYPE(uint32_t)
  58. DEFINE_FUNC_BY_TYPE(uint64_t)
  59. DEFINE_FUNC_BY_TYPE(fp16_t)
  60. DEFINE_FUNC_BY_TYPE(float)
  61. DEFINE_FUNC_BY_TYPE(double)
  62. } // namespace
  63. Status MaximumKernel::Compute(const OpDescPtr op_desc_ptr, const std::vector<ConstGeTensorPtr> &input,
  64. std::vector<GeTensorPtr> &v_output) {
  65. GELOGD("MaximumKernel in");
  66. if (op_desc_ptr == nullptr) {
  67. GELOGE(PARAM_INVALID, "Parameter's invalid, input opDescPtr is nullptr.");
  68. return PARAM_INVALID;
  69. }
  70. Status ret = MaximumCheck(input);
  71. if (ret != SUCCESS) {
  72. return ret;
  73. }
  74. std::vector<int8_t> y_data_int8_t;
  75. std::vector<int16_t> y_data_int16_t;
  76. std::vector<int32_t> y_data_int32_t;
  77. std::vector<int64_t> y_data_int64_t;
  78. std::vector<uint8_t> y_data_uint8_t;
  79. std::vector<uint16_t> y_data_uint16_t;
  80. std::vector<uint32_t> y_data_uint32_t;
  81. std::vector<uint64_t> y_data_uint64_t;
  82. std::vector<fp16_t> y_data_fp16_t;
  83. std::vector<float> y_data_float;
  84. std::vector<double> y_data_double;
  85. if (input.empty()) {
  86. GELOGE(FAILED, "input is empty.");
  87. return FAILED;
  88. }
  89. DataType data_type = input[kMaximumFirstInput]->GetTensorDesc().GetDataType();
  90. BCast bcast;
  91. switch (data_type) {
  92. SET_BCAST_COMPUTE_CASE(DT_INT8, int8_t)
  93. SET_BCAST_COMPUTE_CASE(DT_INT16, int16_t)
  94. SET_BCAST_COMPUTE_CASE(DT_INT32, int32_t)
  95. SET_BCAST_COMPUTE_CASE(DT_INT64, int64_t)
  96. SET_BCAST_COMPUTE_CASE(DT_UINT8, uint8_t)
  97. SET_BCAST_COMPUTE_CASE(DT_UINT16, uint16_t)
  98. SET_BCAST_COMPUTE_CASE(DT_UINT32, uint32_t)
  99. SET_BCAST_COMPUTE_CASE(DT_UINT64, uint64_t)
  100. SET_BCAST_COMPUTE_CASE(DT_FLOAT16, fp16_t)
  101. SET_BCAST_COMPUTE_CASE(DT_FLOAT, float)
  102. SET_BCAST_COMPUTE_CASE(DT_DOUBLE, double)
  103. default:
  104. ret = NOT_CHANGED;
  105. break;
  106. }
  107. if (ret != SUCCESS) {
  108. GELOGW("BCastCompute fail, data_type: %s, ret: %s", TypeUtils::DataTypeToSerialString(data_type).c_str(),
  109. GET_ERRORNO_STR(ret).c_str());
  110. return NOT_CHANGED;
  111. }
  112. uint32_t length = 1;
  113. if (!TypeUtils::GetDataTypeLength(data_type, length)) {
  114. GELOGW("Can't GetDataTypeLength of data_type: %s", TypeUtils::DataTypeToSerialString(data_type).c_str());
  115. return NOT_CHANGED;
  116. }
  117. GeTensorPtr output_ptr = MakeShared<GeTensor>(op_desc_ptr->GetOutputDesc(kMaximumFirstOutput));
  118. if (output_ptr == nullptr) {
  119. GELOGE(MEMALLOC_FAILED, "Make shared failed");
  120. return MEMALLOC_FAILED;
  121. }
  122. output_ptr->MutableTensorDesc().SetShape(GeShape(bcast.GetOutputShape()));
  123. // only return GRAPH_SUCCESS here
  124. switch (data_type) {
  125. SET_OUTPUT(DT_INT8, int8_t)
  126. SET_OUTPUT(DT_INT16, int16_t)
  127. SET_OUTPUT(DT_INT32, int32_t)
  128. SET_OUTPUT(DT_INT64, int64_t)
  129. SET_OUTPUT(DT_UINT8, uint8_t)
  130. SET_OUTPUT(DT_UINT16, uint16_t)
  131. SET_OUTPUT(DT_UINT32, uint32_t)
  132. SET_OUTPUT(DT_UINT64, uint64_t)
  133. SET_OUTPUT(DT_FLOAT16, fp16_t)
  134. SET_OUTPUT(DT_FLOAT, float)
  135. SET_OUTPUT(DT_DOUBLE, double)
  136. default:
  137. break;
  138. }
  139. output_ptr->MutableTensorDesc().SetDataType(data_type);
  140. v_output.push_back(output_ptr);
  141. GELOGD("MaximumKernel success");
  142. return SUCCESS;
  143. }
  144. Status MaximumKernel::MaximumCheck(const std::vector<ConstGeTensorPtr> &input) {
  145. // check input number
  146. if (input.size() != kMaximumInputNum) {
  147. GELOGI("The number of input for Maximum must be %zu.", kMaximumInputNum);
  148. return NOT_CHANGED;
  149. }
  150. ConstGeTensorPtr input_x1 = input.at(kMaximumFirstInput);
  151. ConstGeTensorPtr input_x2 = input.at(kMaximumSecondInput);
  152. GE_CHECK_NOTNULL(input_x1);
  153. GE_CHECK_NOTNULL(input_x2);
  154. // check whether there is data in Tensor
  155. if ((input_x1->GetData().size() == 0) || (input_x2->GetData().size() == 0)) {
  156. GELOGI("Check data size fail. x1: %zu, x2: %zu", input_x1->GetData().size(), input_x2->GetData().size());
  157. return NOT_CHANGED;
  158. }
  159. // check whether the data types are the same
  160. DataType type = input_x1->GetTensorDesc().GetDataType();
  161. if (type != input_x2->GetTensorDesc().GetDataType()) {
  162. GELOGI("Data type of inputs for Maximum not matched.");
  163. return NOT_CHANGED;
  164. }
  165. // check if input data type is supported
  166. if (kMaximumSupportedType.find(type) == kMaximumSupportedType.end()) {
  167. GELOGI("Maximum does not support this Data type: %s", TypeUtils::DataTypeToSerialString(type).c_str());
  168. return NOT_CHANGED;
  169. }
  170. return SUCCESS;
  171. }
  172. REGISTER_KERNEL(MAXIMUM, MaximumKernel);
  173. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示