You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

ragged_conversion_ops.h 4.2 kB

3 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file ragged_conversion_ops.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_RAGGED_CONVERSION_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_RAGGED_CONVERSION_OPS_H_
  22. #include "graph/operator_reg.h"
  23. namespace ge {
  24. /**
  25. *@brief Converts a RaggedTensor into a SparseTensor with the same values . \n
  26. *@par Inputs:
  27. *Two inputs, including:
  28. *@li rt_nested_splits: A list of at least 1 Tensor objects with the same type
  29. in: int32, int64. The row_splits for the RaggedTensor. It's a dynamic input.
  30. *@li rt_dense_values: A Tensor. The flat_values for the RaggedTensor
  31. Must be one of the following types: bool, int8, int16, uint16, int32,
  32. int64, double, float, float16 . \n
  33. *@par Attributes:
  34. *@li RAGGED_RANK: the dynamic of input rt_nested_splits with type int.
  35. *@li Tsplits: A required attribute, the type is int64 . \n
  36. *@par Outputs:
  37. *@li sparse_indices: A Tensor of type int64.
  38. *@li sparse_values: A Tensor. Has the same type as rt_dense_values.
  39. *@li sparse_dense_shape: A Tensor of type int64 . \n
  40. *@par Third-party framework compatibility
  41. * Compatible with TensorFlow operator RaggedTensorToSparse.
  42. */
  43. REG_OP(RaggedTensorToSparse)
  44. .DYNAMIC_INPUT(rt_nested_splits, TensorType({DT_INT32, DT_INT64}))
  45. .INPUT(rt_dense_values, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16}))
  46. .OUTPUT(sparse_indices, TensorType({DT_INT64}))
  47. .OUTPUT(sparse_values, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16}))
  48. .OUTPUT(sparse_dense_shape, TensorType({DT_INT64}))
  49. .ATTR(RAGGED_RANK, Int, 1)
  50. .ATTR(Tsplits, Type, DT_INT64)
  51. .OP_END_FACTORY_REG(RaggedTensorToSparse)
  52. /**
  53. *@brief Create a dense tensor from a ragged tensor, possibly altering its shape . \n
  54. *@par Inputs:
  55. *Six inputs, including:
  56. *@li shape:A `Tensor`. Must be one of the following types: `int64`, `int32`.
  57. *@li values:A 1D tensor representing the values of the ragged tensor.
  58. *@li default_value:A `Tensor`. Must have the same type as `values`.
  59. *@li row_partition_tensors:A list of at least 1 `Tensor` objects with the same
  60. type in: `int64`, `int32` . It's a dynamic input.\n
  61. *@par Attributes:
  62. *@li num_row_partition_tensors:Numbers of row partition tensors.
  63. *@li row_partition_types: A list of `strings`.
  64. The types of the row partition tensors. At present, these can be:
  65. * "ROW_SPLITS": the row_splits tensor from the ragged tensor.
  66. * "VALUE_ROWIDS": the value_rowids tensor from the ragged tensor.
  67. * "FIRST_DIM_SIZE": if value_rowids is used for the first dimension, then it
  68. is preceeded by "FIRST_DIM_SIZE" . \n
  69. *@par Outputs:
  70. *@li result: A `Tensor`. Has the same type as `values`.
  71. */
  72. REG_OP(RaggedTensorToTensor)
  73. .INPUT(shape, TensorType({DT_INT32, DT_INT64}))
  74. .INPUT(values, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, DT_UINT16,
  75. DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16}))
  76. .INPUT(default_value, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16,
  77. DT_UINT16, DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16}))
  78. .DYNAMIC_INPUT(row_partition_tensors, TensorType({DT_INT32, DT_INT64}))
  79. .OUTPUT(result, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, DT_UINT16,
  80. DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16}))
  81. .REQUIRED_ATTR(num_row_partition_tensors, Int)
  82. .REQUIRED_ATTR(row_partition_types, ListString)
  83. .OP_END_FACTORY_REG(RaggedTensorToTensor)
  84. } // namespace ge
  85. #endif // OPS_BUILT_IN_OP_PROTO_INC_RAGGED_CONVERSION_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示