You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

greater_kernel.cc 5.2 kB

5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "host_kernels/greater_kernel.h"
  17. #include <memory>
  18. #include <vector>
  19. #include "common/debug/log.h"
  20. #include "common/fp16_t.h"
  21. #include "common/types.h"
  22. #include "common/util.h"
  23. #include "framework/common/debug/ge_log.h"
  24. #include "framework/common/ge_inner_error_codes.h"
  25. #include "graph/common/bcast.h"
  26. #include "graph/utils/type_utils.h"
  27. #include "inc/kernel_factory.h"
  28. using domi::Status;
  29. using domi::SUCCESS;
  30. namespace ge {
  31. namespace {
  32. const size_t kGreaterInputNum = 2;
  33. #define DEFINE_FUNC_BY_TYPE(TYPE) \
  34. std::function<uint8_t(TYPE const &, TYPE const &)> func_##TYPE = [](TYPE const &a, TYPE const &b) -> uint8_t { \
  35. return a > b; \
  36. };
  37. #define SET_BCAST_COMPUTE_CASE(DTYPE, TYPE) \
  38. case DTYPE: \
  39. ret = bcast.BCastCompute(input, y_data, func_##TYPE); \
  40. break;
  41. DEFINE_FUNC_BY_TYPE(int8_t)
  42. DEFINE_FUNC_BY_TYPE(int16_t)
  43. DEFINE_FUNC_BY_TYPE(int32_t)
  44. DEFINE_FUNC_BY_TYPE(int64_t)
  45. DEFINE_FUNC_BY_TYPE(uint8_t)
  46. DEFINE_FUNC_BY_TYPE(uint16_t)
  47. DEFINE_FUNC_BY_TYPE(uint32_t)
  48. DEFINE_FUNC_BY_TYPE(uint64_t)
  49. DEFINE_FUNC_BY_TYPE(fp16_t)
  50. DEFINE_FUNC_BY_TYPE(float)
  51. DEFINE_FUNC_BY_TYPE(double)
  52. DEFINE_FUNC_BY_TYPE(bool)
  53. } // namespace
  54. Status GreaterKernel::Compute(const OpDescPtr op_desc_ptr, const std::vector<ConstGeTensorPtr> &input,
  55. std::vector<GeTensorPtr> &v_output) {
  56. GELOGD("GreaterKernel in");
  57. if (op_desc_ptr == nullptr) {
  58. GELOGE(PARAM_INVALID, "Parameter's invalid, Input opDescPtr is nullptr.");
  59. return PARAM_INVALID;
  60. }
  61. Status ret = GreaterCheck(input);
  62. if (ret != SUCCESS) {
  63. return ret;
  64. }
  65. std::vector<uint8_t> y_data;
  66. GE_CHECK_NOTNULL(input[0]);
  67. DataType data_type = input[0]->GetTensorDesc().GetDataType();
  68. BCast bcast;
  69. switch (data_type) {
  70. SET_BCAST_COMPUTE_CASE(DT_INT8, int8_t)
  71. SET_BCAST_COMPUTE_CASE(DT_INT16, int16_t)
  72. SET_BCAST_COMPUTE_CASE(DT_INT32, int32_t)
  73. SET_BCAST_COMPUTE_CASE(DT_INT64, int64_t)
  74. SET_BCAST_COMPUTE_CASE(DT_UINT8, uint8_t)
  75. SET_BCAST_COMPUTE_CASE(DT_UINT16, uint16_t)
  76. SET_BCAST_COMPUTE_CASE(DT_UINT32, uint32_t)
  77. SET_BCAST_COMPUTE_CASE(DT_UINT64, uint64_t)
  78. SET_BCAST_COMPUTE_CASE(DT_FLOAT16, fp16_t)
  79. SET_BCAST_COMPUTE_CASE(DT_FLOAT, float)
  80. SET_BCAST_COMPUTE_CASE(DT_DOUBLE, double)
  81. SET_BCAST_COMPUTE_CASE(DT_BOOL, bool)
  82. default:
  83. ret = NOT_CHANGED;
  84. break;
  85. }
  86. if (ret != SUCCESS) {
  87. GELOGW("BCastCompute fail, data_type:%s, ret:%s", TypeUtils::DataTypeToSerialString(data_type).c_str(),
  88. GET_ERRORNO_STR(ret).c_str());
  89. return NOT_CHANGED;
  90. }
  91. GeTensorPtr output_ptr;
  92. output_ptr = MakeShared<GeTensor>(op_desc_ptr->GetOutputDesc(0));
  93. if (output_ptr == nullptr) {
  94. GELOGE(MEMALLOC_FAILED, "Make shared failed %s.", op_desc_ptr->GetName().c_str());
  95. return MEMALLOC_FAILED;
  96. }
  97. output_ptr->MutableTensorDesc().SetShape(GeShape(bcast.GetOutputShape()));
  98. // only return GRAPH_SUCCESS here
  99. GE_CHK_STATUS_RET(output_ptr->SetData(y_data));
  100. output_ptr->MutableTensorDesc().SetDataType(DT_BOOL);
  101. v_output.push_back(output_ptr);
  102. GELOGD("GreaterKernel success");
  103. return SUCCESS;
  104. }
  105. Status GreaterKernel::GreaterCheck(const std::vector<ConstGeTensorPtr> &input) {
  106. // check input number
  107. if (input.size() != kGreaterInputNum) {
  108. GELOGI("The number of input for greater must be %zu.", kGreaterInputNum);
  109. return NOT_CHANGED;
  110. }
  111. GE_CHECK_NOTNULL(input[0]);
  112. GE_CHECK_NOTNULL(input[1]);
  113. ConstGeTensorPtr input_x1 = input.at(0);
  114. ConstGeTensorPtr input_x2 = input.at(1);
  115. // check whether there is data in Tensor
  116. if (input_x1->GetData().size() == 0 || input_x2->GetData().size() == 0) {
  117. GELOGI("Check data size fail. x1: %zu, x2:%zu", input_x1->GetData().size(), input_x2->GetData().size());
  118. return NOT_CHANGED;
  119. }
  120. // check whether the data types are the same
  121. if (input_x1->GetTensorDesc().GetDataType() != input_x2->GetTensorDesc().GetDataType()) {
  122. GELOGI("Data type of inputs for greater not matched.");
  123. return NOT_CHANGED;
  124. }
  125. // check if input data type is supported
  126. DataType type = input_x1->GetTensorDesc().GetDataType();
  127. if (greater_supported_type.find(type) == greater_supported_type.end()) {
  128. GELOGI("Greater does not support this Data type:%s", TypeUtils::DataTypeToSerialString(type).c_str());
  129. return NOT_CHANGED;
  130. }
  131. return SUCCESS;
  132. }
  133. REGISTER_KERNEL(GREATER, GreaterKernel);
  134. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示