You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

ragged_conversion_ops.h 4.2 kB

3 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file ragged_conversion_ops.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_RAGGED_CONVERSION_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_RAGGED_CONVERSION_OPS_H_
  22. #include "graph/operator_reg.h"
  23. namespace ge {
  24. /**
  25. *@brief Converts a RaggedTensor into a SparseTensor with the same values . \n
  26. *@par Inputs:
  27. *Two inputs, including:
  28. *@li rt_nested_splits: A list of at least 1 Tensor objects with the same type
  29. in: int32, int64. The row_splits for the RaggedTensor. It's a dynamic input.
  30. *@li rt_dense_values: A Tensor. The flat_values for the RaggedTensor
  31. Must be one of the following types: bool, int8, int16, uint16, int32,
  32. int64, double, float, float16 . \n
  33. *@par Attributes:
  34. *@li RAGGED_RANK: the dynamic of input rt_nested_splits with type int.
  35. *@li Tsplits: A required attribute, the type is int64 . \n
  36. *@par Outputs:
  37. *@li sparse_indices: A Tensor of type int64.
  38. *@li sparse_values: A Tensor. Has the same type as rt_dense_values.
  39. *@li sparse_dense_shape: A Tensor of type int64 . \n
  40. *@par Third-party framework compatibility
  41. * Compatible with TensorFlow operator RaggedTensorToSparse.
  42. */
  43. REG_OP(RaggedTensorToSparse)
  44. .DYNAMIC_INPUT(rt_nested_splits, TensorType({DT_INT32, DT_INT64}))
  45. .INPUT(rt_dense_values, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16}))
  46. .OUTPUT(sparse_indices, TensorType({DT_INT64}))
  47. .OUTPUT(sparse_values, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16}))
  48. .OUTPUT(sparse_dense_shape, TensorType({DT_INT64}))
  49. .ATTR(RAGGED_RANK, Int, 1)
  50. .ATTR(Tsplits, Type, DT_INT64)
  51. .OP_END_FACTORY_REG(RaggedTensorToSparse)
  52. /**
  53. *@brief Create a dense tensor from a ragged tensor, possibly altering its shape . \n
  54. *@par Inputs:
  55. *@li shape:A `Tensor`. Must be one of the following types: `int64`, `int32`.
  56. *@li values:A 1D tensor representing the values of the ragged tensor.
  57. *@li default_value:A `Tensor`. Must have the same type as `values`.
  58. *@li row_partition_tensors:A list of at least 1 `Tensor` objects with the same
  59. type in: `int64`, `int32` . It's a dynamic input.\n
  60. *@par Attributes:
  61. *@li num_row_partition_tensors:Numbers of row partition tensors.
  62. *@li row_partition_types: A list of `strings`.
  63. The types of the row partition tensors. At present, these can be:
  64. * "ROW_SPLITS": the row_splits tensor from the ragged tensor.
  65. * "VALUE_ROWIDS": the value_rowids tensor from the ragged tensor.
  66. * "FIRST_DIM_SIZE": if value_rowids is used for the first dimension, then it
  67. is preceeded by "FIRST_DIM_SIZE" . \n
  68. *@par Outputs:
  69. *result: A `Tensor`. Has the same type as `values`.
  70. */
  71. REG_OP(RaggedTensorToTensor)
  72. .INPUT(shape, TensorType({DT_INT32, DT_INT64}))
  73. .INPUT(values, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, DT_UINT16,
  74. DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16}))
  75. .INPUT(default_value, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16,
  76. DT_UINT16, DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16}))
  77. .DYNAMIC_INPUT(row_partition_tensors, TensorType({DT_INT32, DT_INT64}))
  78. .OUTPUT(result, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, DT_UINT16,
  79. DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16}))
  80. .REQUIRED_ATTR(num_row_partition_tensors, Int)
  81. .REQUIRED_ATTR(row_partition_types, ListString)
  82. .OP_END_FACTORY_REG(RaggedTensorToTensor)
  83. } // namespace ge
  84. #endif // OPS_BUILT_IN_OP_PROTO_INC_RAGGED_CONVERSION_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示