You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

pack_kernel.cc 8.2 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "host_kernels/pack_kernel.h"
  17. #include <memory>
  18. #include <vector>
  19. #include "common/debug/log.h"
  20. #include "common/formats/utils/formats_trans_utils.h"
  21. #include "common/ge_inner_error_codes.h"
  22. #include "common/op/ge_op_utils.h"
  23. #include "framework/common/debug/ge_log.h"
  24. #include "graph/debug/ge_attr_define.h"
  25. #include "host_kernels/kernel_utils.h"
  26. #include "graph/utils/type_utils.h"
  27. #include "inc/kernel_factory.h"
  28. namespace {
  29. const int64_t kShapeItemNumMAX = 2000000000;
  30. } // namespace
  31. namespace ge {
  32. Status PackKernel::Compute(const ge::OpDescPtr op_desc_ptr, const std::vector<ge::ConstGeTensorPtr> &input,
  33. std::vector<ge::GeTensorPtr> &v_output) {
  34. GELOGI("Pack kernel in.");
  35. Status validate_ret = ValidateKernelParams(op_desc_ptr, input);
  36. if (validate_ret != SUCCESS) {
  37. GELOGW("Pack kernel input is invalid , can not continue compute.");
  38. return NOT_CHANGED;
  39. }
  40. GeShape final_shape;
  41. ExpandDims(axis_, input, final_shape);
  42. // generate output
  43. GeTensorPtr output_ptr = MakeShared<GeTensor>(op_desc_ptr->GetOutputDesc(0));
  44. if (output_ptr == nullptr) {
  45. GELOGW("Fail to malloc output.");
  46. return OUT_OF_MEMORY;
  47. }
  48. Status ret = CopyOutputData(final_shape, input, output_ptr);
  49. if (ret != SUCCESS) {
  50. GELOGW("Pack inputs failed. Ignore pack kernel.");
  51. return NOT_CHANGED;
  52. }
  53. v_output.push_back(output_ptr);
  54. return SUCCESS;
  55. }
  56. Status PackKernel::ValidateKernelParams(const ge::OpDescPtr &op_desc_ptr,
  57. const std::vector<ge::ConstGeTensorPtr> &input) {
  58. if (op_desc_ptr == nullptr) {
  59. GELOGW("input opdesc is nullptr.");
  60. return PARAM_INVALID;
  61. }
  62. if (!(AttrUtils::GetInt(op_desc_ptr, PACK_ATTR_NAME_NUM, n_))) {
  63. n_ = 0;
  64. GELOGD("Attr %s is not set, default value %ld is used.", PACK_ATTR_NAME_NUM.c_str(), n_);
  65. }
  66. if (!(AttrUtils::GetInt(op_desc_ptr, ATTR_NAME_AXIS, axis_))) {
  67. GELOGW("Attr %s is not exist.", ATTR_NAME_AXIS.c_str());
  68. return PARAM_INVALID;
  69. }
  70. if (input.empty()) {
  71. GELOGW("The number of input for Pack should be %ld, in fact it is %zu ", n_, input.size());
  72. return NOT_CHANGED;
  73. }
  74. if (input.size() != static_cast<size_t>(n_)) {
  75. GELOGW("The number of input for Pack should be %d, in fact it is %ld ", static_cast<int>(n_), input.size());
  76. return PARAM_INVALID;
  77. }
  78. data_type_ = op_desc_ptr->GetInputDesc(0).GetDataType();
  79. GeShape shape = op_desc_ptr->GetInputDesc(0).GetShape();
  80. if (axis_ < 0 || axis_ > (static_cast<int64_t>(shape.GetDimNum()) + 1)) {
  81. GELOGW("Axis is %ld ,which is out of range [0,R+1].", axis_);
  82. return NOT_CHANGED;
  83. }
  84. Status validate_ret = ValidateInputs(op_desc_ptr, input);
  85. if (validate_ret != SUCCESS) {
  86. GELOGW("Validate inputs failed.Ignore pack kernel.");
  87. return NOT_CHANGED;
  88. }
  89. return SUCCESS;
  90. }
  91. Status PackKernel::ValidateInputs(const ge::OpDescPtr &op_desc_ptr, const std::vector<ge::ConstGeTensorPtr> &input) {
  92. GeShape shape;
  93. for (int64_t i = 0; i < n_; i++) {
  94. if (input[i] == nullptr) {
  95. GELOGW("Input %ld of pack kernel %s is null.", i, op_desc_ptr->GetName().c_str());
  96. return PARAM_INVALID;
  97. }
  98. if (i == 0) {
  99. // get first input shape
  100. shape = input[0]->GetTensorDesc().GetShape();
  101. }
  102. GeTensorDesc tensor_desc = input[i]->GetTensorDesc();
  103. // check datatype of inputs is same or not
  104. if (tensor_desc.GetDataType() != data_type_) {
  105. GELOGW("Data type of inputs %ld for pack not matched, data type should be %s, but actual datatype is %s", i,
  106. TypeUtils::DataTypeToSerialString(data_type_).c_str(),
  107. TypeUtils::DataTypeToSerialString(tensor_desc.GetDataType()).c_str());
  108. return NOT_CHANGED;
  109. }
  110. // check shape of inputs is same or not
  111. auto dst_shape = tensor_desc.GetShape();
  112. int64_t num = 1;
  113. for (auto dim : dst_shape.GetDims()) {
  114. if (dim < 0) {
  115. GELOGW("Invalid dim ld% in the shape %s", dim, formats::ShapeToString(shape).c_str());
  116. return NOT_CHANGED;
  117. }
  118. num *= dim;
  119. if (num > kShapeItemNumMAX) {
  120. GELOGW("Shape overflow, the total count should be less than %ld!", kShapeItemNumMAX);
  121. return NOT_CHANGED;
  122. }
  123. }
  124. if (!formats::IsShapeEqual(shape, dst_shape)) {
  125. GELOGW("Shape of input %ld is not equal wiht input 0.", i);
  126. return NOT_CHANGED;
  127. }
  128. // check tensor data size is zero ot not
  129. if (input[i]->GetData().size() == 0 && num != 0) {
  130. GELOGW("Inputs %ld do not have value.", i);
  131. return NOT_CHANGED;
  132. }
  133. }
  134. return SUCCESS;
  135. }
  136. void PackKernel::ExpandDims(const int64_t axis, const std::vector<ge::ConstGeTensorPtr> &input, GeShape &final_shape) {
  137. // expand dims
  138. vector<int64_t> current_dims = input[0]->GetTensorDesc().GetShape().GetDims();
  139. vector<int64_t> final_dims;
  140. final_dims.assign(current_dims.begin(), current_dims.end());
  141. // expand dim of N
  142. // assume there are N inputs, and shape is [A,B,C],
  143. // if axis = 0, after pack, the output shape should be [N,A,B,C].
  144. // if axis = 1, after pack, the output shape should be [A,N,B,C].
  145. // ...etc
  146. // if axis = 3, after pack, the output shape should be [A,B,C,N]
  147. if (axis >= static_cast<int64_t>(final_dims.size())) {
  148. final_dims.emplace_back(n_);
  149. } else {
  150. final_dims.insert(final_dims.begin() + axis, n_);
  151. }
  152. final_shape = GeShape(final_dims);
  153. }
  154. Status PackKernel::CopyOutputData(const GeShape &final_shape, const std::vector<ge::ConstGeTensorPtr> &input,
  155. ge::GeTensorPtr &output_ptr) {
  156. output_ptr->MutableTensorDesc().SetShape(final_shape);
  157. output_ptr->MutableTensorDesc().SetDataType(DataType(data_type_));
  158. if (final_shape.GetShapeSize() == 0 && final_shape.GetDims().size() != 0) {
  159. // means has zero in shape list, output tnesor data is [].
  160. return SUCCESS;
  161. }
  162. int64_t times = 1;
  163. int64_t unit = 1;
  164. // calculate data unit
  165. for (int64_t i = (axis_ + 1); i < static_cast<int64_t>(final_shape.GetDimNum()); i++) {
  166. unit *= final_shape.GetDim(static_cast<size_t>(i));
  167. }
  168. // calculate get times
  169. for (int64_t i = 0; i < axis_; i++) {
  170. times *= final_shape.GetDim(static_cast<size_t>(i));
  171. }
  172. GELOGD("Copy output data times is %ld, unit is %ld.", times, unit);
  173. uint32_t data_size = GetSizeByDataType(data_type_);
  174. // assume output shape is [A,N,B,C], time=A,unit=B*C
  175. // when copy data from input, we follow time*N*unit
  176. auto output_size = final_shape.GetShapeSize();
  177. std::shared_ptr<uint8_t> buf(new (std::nothrow) uint8_t[output_size * data_size], std::default_delete<uint8_t[]>());
  178. if (buf == nullptr) {
  179. GELOGW("malloc buf is null.Ignore pack kernel.");
  180. return NOT_CHANGED;
  181. }
  182. size_t dst_offset = 0;
  183. size_t src_offset = 0;
  184. // data copy follow times*N*offset, which offset = time*unit
  185. for (int64_t i = 0; i < times; i++) {
  186. for (int64_t j = 0; j < n_; j++) {
  187. // input range already check before. Range is [0,n_).
  188. const uint8_t *in_data = input[j]->GetData().data();
  189. auto ret =
  190. memcpy_s(buf.get() + dst_offset, output_size * data_size - dst_offset, in_data + src_offset, data_size * unit);
  191. if (ret != EOK) {
  192. GELOGW("Memory copy failed.");
  193. return NOT_CHANGED;
  194. }
  195. dst_offset += data_size * unit;
  196. }
  197. src_offset += unit * data_size;
  198. }
  199. if (output_ptr->SetData(buf.get(), static_cast<size_t>(output_size * data_size)) != GRAPH_SUCCESS) {
  200. GELOGW("CopyOutputData: SetData failed");
  201. }
  202. return SUCCESS;
  203. }
  204. REGISTER_KERNEL(PACK, PackKernel);
  205. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示