You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

pack_kernel.cc 8.3 kB

5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "host_kernels/pack_kernel.h"
  17. #include <memory>
  18. #include <vector>
  19. #include "common/debug/log.h"
  20. #include "common/formats/utils/formats_trans_utils.h"
  21. #include "common/ge_inner_error_codes.h"
  22. #include "common/op/ge_op_utils.h"
  23. #include "framework/common/debug/ge_log.h"
  24. #include "graph/debug/ge_attr_define.h"
  25. #include "host_kernels/kernel_utils.h"
  26. #include "graph/utils/type_utils.h"
  27. #include "inc/kernel_factory.h"
  28. #include "framework/common/types.h"
  29. namespace {
  30. const int64_t kShapeItemNumMAX = 2000000000;
  31. } // namespace
  32. namespace ge {
  33. Status PackKernel::Compute(const ge::OpDescPtr op_desc_ptr, const std::vector<ge::ConstGeTensorPtr> &input,
  34. std::vector<ge::GeTensorPtr> &v_output) {
  35. GELOGI("Pack kernel in.");
  36. Status validate_ret = ValidateKernelParams(op_desc_ptr, input);
  37. if (validate_ret != SUCCESS) {
  38. GELOGW("Pack kernel input is invalid , can not continue compute.");
  39. return NOT_CHANGED;
  40. }
  41. GeShape final_shape;
  42. ExpandDims(axis_, input, final_shape);
  43. // generate output
  44. GeTensorPtr output_ptr = MakeShared<GeTensor>(op_desc_ptr->GetOutputDesc(0));
  45. if (output_ptr == nullptr) {
  46. GELOGW("Fail to malloc output.");
  47. return OUT_OF_MEMORY;
  48. }
  49. Status ret = CopyOutputData(final_shape, input, output_ptr);
  50. if (ret != SUCCESS) {
  51. GELOGW("Pack inputs failed. Ignore pack kernel.");
  52. return NOT_CHANGED;
  53. }
  54. v_output.push_back(output_ptr);
  55. return SUCCESS;
  56. }
  57. Status PackKernel::ValidateKernelParams(const ge::OpDescPtr &op_desc_ptr,
  58. const std::vector<ge::ConstGeTensorPtr> &input) {
  59. if (op_desc_ptr == nullptr) {
  60. GELOGW("input opdesc is nullptr.");
  61. return PARAM_INVALID;
  62. }
  63. if (!(AttrUtils::GetInt(op_desc_ptr, PACK_ATTR_NAME_NUM, n_))) {
  64. n_ = 0;
  65. GELOGD("Attr %s is not set, default value %ld is used.", PACK_ATTR_NAME_NUM.c_str(), n_);
  66. }
  67. if (!(AttrUtils::GetInt(op_desc_ptr, ATTR_NAME_AXIS, axis_))) {
  68. GELOGW("Attr %s is not exist.", ATTR_NAME_AXIS.c_str());
  69. return PARAM_INVALID;
  70. }
  71. if (input.empty()) {
  72. GELOGW("The number of input for Pack should be %ld, in fact it is %zu ", n_, input.size());
  73. return NOT_CHANGED;
  74. }
  75. if (input.size() != static_cast<size_t>(n_)) {
  76. GELOGW("The number of input for Pack should be %d, in fact it is %ld ", static_cast<int>(n_),
  77. input.size());
  78. return PARAM_INVALID;
  79. }
  80. data_type_ = op_desc_ptr->GetInputDesc(0).GetDataType();
  81. GeShape shape = op_desc_ptr->GetInputDesc(0).GetShape();
  82. if (axis_ < 0 || axis_ > (static_cast<int64_t>(shape.GetDimNum()) + 1)) {
  83. GELOGW("Axis is %ld ,which is out of range [0,R+1].", axis_);
  84. return NOT_CHANGED;
  85. }
  86. Status validate_ret = ValidateInputs(op_desc_ptr, input);
  87. if (validate_ret != SUCCESS) {
  88. GELOGW("Validate inputs failed.Ignore pack kernel.");
  89. return NOT_CHANGED;
  90. }
  91. return SUCCESS;
  92. }
  93. Status PackKernel::ValidateInputs(const ge::OpDescPtr &op_desc_ptr, const std::vector<ge::ConstGeTensorPtr> &input) {
  94. GeShape shape;
  95. for (int64_t i = 0; i < n_; i++) {
  96. if (input[i] == nullptr) {
  97. GELOGW("Input %ld of pack kernel %s is null.", i, op_desc_ptr->GetName().c_str());
  98. return PARAM_INVALID;
  99. }
  100. if (i == 0) {
  101. // get first input shape
  102. shape = input[0]->GetTensorDesc().GetShape();
  103. }
  104. GeTensorDesc tensor_desc = input[i]->GetTensorDesc();
  105. // check datatype of inputs is same or not
  106. if (tensor_desc.GetDataType() != data_type_) {
  107. GELOGW("Data type of inputs %ld for pack not matched, data type should be %s, but actual datatype is %s", i,
  108. TypeUtils::DataTypeToSerialString(data_type_).c_str(),
  109. TypeUtils::DataTypeToSerialString(tensor_desc.GetDataType()).c_str());
  110. return NOT_CHANGED;
  111. }
  112. // check shape of inputs is same or not
  113. auto dst_shape = tensor_desc.GetShape();
  114. int64_t num = 1;
  115. for (auto dim : dst_shape.GetDims()) {
  116. if (dim < 0) {
  117. GELOGW("Invalid dim ld% in the shape %s", dim, formats::ShapeToString(shape).c_str());
  118. return NOT_CHANGED;
  119. }
  120. num *= dim;
  121. if (num > kShapeItemNumMAX) {
  122. GELOGW("Shape overflow, the total count should be less than %ld!", kShapeItemNumMAX);
  123. return NOT_CHANGED;
  124. }
  125. }
  126. if (!formats::IsShapeEqual(shape, dst_shape)) {
  127. GELOGW("Shape of input %ld is not equal wiht input 0.", i);
  128. return NOT_CHANGED;
  129. }
  130. // check tensor data size is zero ot not
  131. if (input[i]->GetData().size() == 0 && num != 0) {
  132. GELOGW("Inputs %ld do not have value.", i);
  133. return NOT_CHANGED;
  134. }
  135. }
  136. return SUCCESS;
  137. }
  138. void PackKernel::ExpandDims(const int64_t axis, const std::vector<ge::ConstGeTensorPtr> &input, GeShape &final_shape) {
  139. // expand dims
  140. vector<int64_t> current_dims = input[0]->GetTensorDesc().GetShape().GetDims();
  141. vector<int64_t> final_dims;
  142. final_dims.assign(current_dims.begin(), current_dims.end());
  143. // expand dim of N
  144. // assume there are N inputs, and shape is [A,B,C],
  145. // if axis = 0, after pack, the output shape should be [N,A,B,C].
  146. // if axis = 1, after pack, the output shape should be [A,N,B,C].
  147. // ...etc
  148. // if axis = 3, after pack, the output shape should be [A,B,C,N]
  149. if (axis >= static_cast<int64_t>(final_dims.size())) {
  150. final_dims.emplace_back(n_);
  151. } else {
  152. final_dims.insert(final_dims.begin() + axis, n_);
  153. }
  154. final_shape = GeShape(final_dims);
  155. }
  156. Status PackKernel::CopyOutputData(const GeShape &final_shape,
  157. const std::vector<ge::ConstGeTensorPtr> &input,
  158. ge::GeTensorPtr &output_ptr) {
  159. output_ptr->MutableTensorDesc().SetShape(final_shape);
  160. output_ptr->MutableTensorDesc().SetDataType(DataType(data_type_));
  161. if (final_shape.GetShapeSize() == 0 && final_shape.GetDims().size() != 0) {
  162. // means has zero in shape list, output tnesor data is [].
  163. return SUCCESS;
  164. }
  165. int64_t times = 1;
  166. int64_t unit = 1;
  167. // calculate data unit
  168. for (int64_t i = (axis_ + 1); i < static_cast<int64_t>(final_shape.GetDimNum()); i++) {
  169. unit *= final_shape.GetDim(static_cast<size_t>(i));
  170. }
  171. // calculate get times
  172. for (int64_t i = 0; i < axis_; i++) {
  173. times *= final_shape.GetDim(static_cast<size_t>(i));
  174. }
  175. GELOGD("Copy output data times is %ld, unit is %ld.", times, unit);
  176. uint32_t data_size = GetSizeByDataType(data_type_);
  177. // assume output shape is [A,N,B,C], time=A,unit=B*C
  178. // when copy data from input, we follow time*N*unit
  179. auto output_size = final_shape.GetShapeSize();
  180. std::shared_ptr<uint8_t> buf(new (std::nothrow) uint8_t[output_size * data_size], std::default_delete<uint8_t[]>());
  181. if (buf == nullptr) {
  182. GELOGW("malloc buf is null.Ignore pack kernel.");
  183. return NOT_CHANGED;
  184. }
  185. size_t dst_offset = 0;
  186. size_t src_offset = 0;
  187. // data copy follow times*N*offset, which offset = time*unit
  188. for (int64_t i = 0; i < times; i++) {
  189. for (int64_t j = 0; j < n_; j++) {
  190. // input range already check before. Range is [0,n_).
  191. const uint8_t *in_data = input[j]->GetData().data();
  192. auto ret = memcpy_s(buf.get() + dst_offset, output_size * data_size - dst_offset, in_data + src_offset,
  193. data_size * unit);
  194. if (ret != EOK) {
  195. GELOGW("Memory copy failed.");
  196. return NOT_CHANGED;
  197. }
  198. dst_offset += data_size * unit;
  199. }
  200. src_offset += unit * data_size;
  201. }
  202. if (output_ptr->SetData(buf.get(), static_cast<size_t>(output_size * data_size)) != GRAPH_SUCCESS) {
  203. GELOGW("CopyOutputData: SetData failed");
  204. }
  205. return SUCCESS;
  206. }
  207. REGISTER_KERNEL(PACK, PackKernel);
  208. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示