You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

fill_kernel.cc 4.5 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "host_kernels/fill_kernel.h"
  17. #include <memory>
  18. #include <vector>
  19. #include "common/fp16_t.h"
  20. #include "framework/common/ge_inner_error_codes.h"
  21. #include "framework/common/op/ge_op_utils.h"
  22. #include "framework/common/debug/ge_log.h"
  23. #include "host_kernels/kernel_utils.h"
  24. #include "graph/utils/type_utils.h"
  25. #include "inc/kernel_factory.h"
  26. #include "framework/common/types.h"
  27. namespace {
  28. const int kFillInputSize = 2;
  29. const int kFillDimsInputIndex = 0;
  30. const int kFillDataInputIndex = 1;
  31. } // namespace
  32. namespace ge {
  33. Status FillKernel::Compute(const ge::OpDescPtr op_desc_ptr, const std::vector<ge::ConstGeTensorPtr> &input,
  34. std::vector<ge::GeTensorPtr> &v_output) {
  35. if (input.size() != kFillInputSize) {
  36. GELOGW("fill input size must be %d", kFillInputSize);
  37. return NOT_CHANGED;
  38. }
  39. if (op_desc_ptr == nullptr) {
  40. GELOGE(PARAM_INVALID, "Parameter's invalid, Input opDescPtr is nullptr.");
  41. return PARAM_INVALID;
  42. }
  43. GELOGD("FillKernel in, name: %s.", op_desc_ptr->GetName().c_str());
  44. GE_CHECK_NOTNULL(input.at(kFillDimsInputIndex));
  45. GE_CHECK_NOTNULL(input.at(kFillDataInputIndex));
  46. ConstGeTensorPtr dims = input.at(kFillDimsInputIndex);
  47. ConstGeTensorPtr value = input.at(kFillDataInputIndex);
  48. // Check if the value is a scalar
  49. if (value->GetTensorDesc().GetShape().GetDimNum() != 0) {
  50. GELOGW("value must be a scalar.");
  51. return NOT_CHANGED;
  52. }
  53. auto output_desc = op_desc_ptr->GetOutputDescPtr(0);
  54. GE_CHECK_NOTNULL(output_desc);
  55. if (output_desc->GetShape().IsUnknownShape()) {
  56. GELOGD("Output is unknown shape, [%s] skip FillKernel.", op_desc_ptr->GetName().c_str());
  57. return NOT_CHANGED;
  58. }
  59. GeTensorPtr output_ptr;
  60. output_ptr = MakeShared<GeTensor>(op_desc_ptr->GetOutputDesc(0));
  61. if (output_ptr == nullptr) {
  62. GELOGE(MEMALLOC_FAILED, "make_shared ge::GeTensor failed");
  63. return MEMALLOC_FAILED;
  64. }
  65. int64_t fill_size = 1;
  66. std::vector<int64_t> vec_dim;
  67. DataType dim_type = dims->GetTensorDesc().GetDataType();
  68. // Calculate user input dim
  69. Status ret = PARAM_INVALID;
  70. if (dim_type == DT_INT32) {
  71. ret = KernelUtils::CalcDims<int32_t>(dims, vec_dim, fill_size);
  72. } else if (dim_type == DT_INT64) {
  73. ret = KernelUtils::CalcDims<int64_t>(dims, vec_dim, fill_size);
  74. } else {
  75. GELOGE(PARAM_INVALID, "dim type must be DT_INT32 or DT_INT64.");
  76. return PARAM_INVALID;
  77. }
  78. if (ret != SUCCESS) {
  79. GELOGE(ret, "CalcDims failed, dim_type: %s", TypeUtils::DataTypeToSerialString(dim_type).c_str());
  80. return ret;
  81. }
  82. // Generating a sequence of numbers
  83. DataType data_type = value->GetTensorDesc().GetDataType();
  84. ret = PARAM_INVALID;
  85. switch (data_type) {
  86. #define CASE(dtype, type) \
  87. case dtype: \
  88. ret = KernelUtils::GenData(fill_size, *reinterpret_cast<const type *>(value->GetData().data()), output_ptr); \
  89. break;
  90. CASE(DT_FLOAT, float)
  91. CASE(DT_FLOAT16, fp16_t)
  92. CASE(DT_INT8, int8_t)
  93. CASE(DT_INT16, int16_t)
  94. CASE(DT_UINT16, uint16_t)
  95. CASE(DT_UINT8, uint8_t)
  96. CASE(DT_INT32, int32_t)
  97. CASE(DT_INT64, int64_t)
  98. CASE(DT_UINT32, uint32_t)
  99. CASE(DT_UINT64, uint64_t)
  100. CASE(DT_BOOL, bool)
  101. CASE(DT_DOUBLE, double)
  102. #undef CASE
  103. default:
  104. GELOGW("invalid data type: %s", TypeUtils::DataTypeToSerialString(data_type).c_str());
  105. return NOT_CHANGED;
  106. }
  107. if (ret != SUCCESS) {
  108. GELOGE(ret, "GenData failed, data_type: %s", TypeUtils::DataTypeToSerialString(data_type).c_str());
  109. return ret;
  110. }
  111. output_ptr->MutableTensorDesc().SetShape(GeShape(vec_dim));
  112. output_ptr->MutableTensorDesc().SetDataType(DataType(data_type));
  113. v_output.push_back(output_ptr);
  114. return SUCCESS;
  115. }
  116. REGISTER_KERNEL(FILL, FillKernel);
  117. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示