You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

range_kernel.cc 6.0 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "host_kernels/range_kernel.h"
  17. #include <memory>
  18. #include <set>
  19. #include "common/debug/log.h"
  20. #include "common/fp16_t.h"
  21. #include "common/types.h"
  22. #include "common/util.h"
  23. #include "framework/common/debug/ge_log.h"
  24. #include "framework/common/ge_inner_error_codes.h"
  25. #include "graph/utils/type_utils.h"
  26. #include "inc/kernel_factory.h"
  27. namespace ge {
  28. namespace {
  29. constexpr size_t kRangeInputNum = 3;
  30. constexpr uint32_t kRangeDimNum = 0;
  31. constexpr size_t kStartIndex = 0;
  32. constexpr size_t kLimitIndex = 1;
  33. constexpr size_t kDeltaIndex = 2;
  34. const std::set<DataType> kRangeSupportedType = {DT_INT32, DT_FLOAT};
  35. } // namespace
  36. Status RangeKernel::Compute(const OpDescPtr op_desc_ptr, const std::vector<ConstGeTensorPtr> &input,
  37. std::vector<GeTensorPtr> &v_output) {
  38. GELOGD("RangeKernel in");
  39. if (op_desc_ptr == nullptr) {
  40. GELOGE(PARAM_INVALID, "Parameter's invalid, input opDescPtr is nullptr.");
  41. return PARAM_INVALID;
  42. }
  43. Status ret = RangeCheck(input);
  44. if (ret != SUCCESS) {
  45. return ret;
  46. }
  47. GeTensorPtr output_ptr = MakeShared<GeTensor>(op_desc_ptr->GetOutputDesc(0));
  48. if (output_ptr == nullptr) {
  49. GELOGE(MEMALLOC_FAILED, "Make shared failed");
  50. return MEMALLOC_FAILED;
  51. }
  52. ConstGeTensorPtr start = input.at(kStartIndex);
  53. ConstGeTensorPtr limit = input.at(kLimitIndex);
  54. ConstGeTensorPtr delta = input.at(kDeltaIndex);
  55. DataType data_type = delta->GetTensorDesc().GetDataType();
  56. if (data_type == DT_FLOAT) {
  57. if (GetRange(*reinterpret_cast<const float *>(start->GetData().data()),
  58. *reinterpret_cast<const float *>(limit->GetData().data()),
  59. *reinterpret_cast<const float *>(delta->GetData().data()), output_ptr) != SUCCESS) {
  60. return PARAM_INVALID;
  61. }
  62. } else if (data_type == DT_INT32) {
  63. if (GetRange(*reinterpret_cast<const int32_t *>(start->GetData().data()),
  64. *reinterpret_cast<const int32_t *>(limit->GetData().data()),
  65. *reinterpret_cast<const int32_t *>(delta->GetData().data()), output_ptr) != SUCCESS) {
  66. return PARAM_INVALID;
  67. }
  68. }
  69. output_ptr->MutableTensorDesc().SetDataType(data_type);
  70. v_output.push_back(output_ptr);
  71. return SUCCESS;
  72. }
  73. Status RangeKernel::RangeCheck(const std::vector<ConstGeTensorPtr> &input) {
  74. // check input number
  75. if (input.size() != kRangeInputNum) {
  76. GELOGI("The number of input for Range must be %zu.", kRangeInputNum);
  77. return NOT_CHANGED;
  78. }
  79. ConstGeTensorPtr start = input.at(0);
  80. ConstGeTensorPtr limit = input.at(1);
  81. ConstGeTensorPtr delta = input.at(2);
  82. GE_CHECK_NOTNULL(start);
  83. GE_CHECK_NOTNULL(limit);
  84. GE_CHECK_NOTNULL(delta);
  85. // check whether there is data in Tensor
  86. if (start->GetData().size() == 0 || limit->GetData().size() == 0 || delta->GetData().size() == 0) {
  87. GELOGI("Check data size fail. start: %zu, limit: %zu, delta: %zu", start->GetData().size(), limit->GetData().size(),
  88. delta->GetData().size());
  89. return NOT_CHANGED;
  90. }
  91. // check whether the data types are the same
  92. DataType type = start->GetTensorDesc().GetDataType();
  93. if ((type != limit->GetTensorDesc().GetDataType()) || (type != delta->GetTensorDesc().GetDataType())) {
  94. GELOGI("Data type of inputs for Range not matched.");
  95. return NOT_CHANGED;
  96. }
  97. // check whether are all scalars
  98. size_t range_dim = static_cast<size_t>(kRangeDimNum);
  99. bool all_scalar = (start->GetTensorDesc().MutableShape().GetDimNum() == range_dim) &&
  100. (limit->GetTensorDesc().MutableShape().GetDimNum() == range_dim) &&
  101. (delta->GetTensorDesc().MutableShape().GetDimNum() == range_dim);
  102. if (!all_scalar) {
  103. GELOGI("Inputs for Range are not all scalars.");
  104. return NOT_CHANGED;
  105. }
  106. // check if input data type is supported
  107. if (kRangeSupportedType.find(type) == kRangeSupportedType.end()) {
  108. GELOGI("Range does not support this Data type: %s", TypeUtils::DataTypeToSerialString(type).c_str());
  109. return NOT_CHANGED;
  110. }
  111. return SUCCESS;
  112. }
  113. template <typename T>
  114. Status RangeKernel::GetRange(const T start, const T limit, const T delta, GeTensorPtr &output) {
  115. // check whether start, limit, delta is valid
  116. if (delta == 0) {
  117. GELOGE(PARAM_INVALID, "Requires delta != 0");
  118. return PARAM_INVALID;
  119. }
  120. if (start > limit && delta > 0) {
  121. GELOGE(PARAM_INVALID, "Requires start <= limit when delta > 0");
  122. return PARAM_INVALID;
  123. }
  124. if (start < limit && delta < 0) {
  125. GELOGE(PARAM_INVALID, "Requires start >= limit when delta < 0");
  126. return PARAM_INVALID;
  127. }
  128. int64_t size = (std::is_integral<T>::value ? ((std::abs(limit - start) + std::abs(delta) - 1) / std::abs(delta))
  129. : std::ceil(std::abs((limit - start) / delta)));
  130. output->MutableTensorDesc().SetShape(GeShape()); // when size is 0
  131. if (size > 0) {
  132. unique_ptr<T[]> buf(new (std::nothrow) T[size]);
  133. if (buf == nullptr) {
  134. GELOGE(MEMALLOC_FAILED, "New buf failed.");
  135. return MEMALLOC_FAILED;
  136. }
  137. T val = start;
  138. for (int64_t i = 0; i < size; ++i) {
  139. buf[i] = val;
  140. val += delta;
  141. }
  142. if (output->SetData(reinterpret_cast<uint8_t *>(buf.get()), size * sizeof(T)) != GRAPH_SUCCESS) {
  143. GELOGW("GetRange: SetData failed");
  144. }
  145. output->MutableTensorDesc().SetShape(GeShape({size}));
  146. }
  147. return SUCCESS;
  148. }
  149. REGISTER_KERNEL(RANGE, RangeKernel);
  150. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示