Browse Source

!1409 update include files 0330

From: @shenwei41
Reviewed-by: @lilongfei15,@wenkai_dist,@ljl0711
Signed-off-by: @lilongfei15
tags/v1.2.0
mindspore-ci-bot Gitee 4 years ago
parent
commit
0679af1d75
8 changed files with 86 additions and 58 deletions
  1. +1
    -0
      third_party/fwkacllib/inc/mmpa/mmpa_api.h
  2. +4
    -0
      third_party/fwkacllib/inc/mmpa/sub_inc/mmpa_linux.h
  3. +4
    -0
      third_party/fwkacllib/inc/mmpa/sub_inc/mmpa_win.h
  4. +43
    -43
      third_party/fwkacllib/inc/ops/matrix_calculation_ops.h
  5. +17
    -0
      third_party/fwkacllib/inc/ops/nn_pooling_ops.h
  6. +9
    -9
      third_party/fwkacllib/inc/ops/selection_ops.h
  7. +2
    -6
      third_party/fwkacllib/inc/ops/transformation_ops.h
  8. +6
    -0
      third_party/fwkacllib/inc/runtime/dev.h

+ 1
- 0
third_party/fwkacllib/inc/mmpa/mmpa_api.h View File

@@ -56,6 +56,7 @@
#include <dirent.h> #include <dirent.h>
#include <getopt.h> #include <getopt.h>
#include <libgen.h> #include <libgen.h>
#include <malloc.h>


#include <linux/types.h> #include <linux/types.h>
#include <linux/hdreg.h> #include <linux/hdreg.h>


+ 4
- 0
third_party/fwkacllib/inc/mmpa/sub_inc/mmpa_linux.h View File

@@ -550,6 +550,10 @@ MMPA_FUNC_VISIBILITY mmFileHandle mmShmOpen(const CHAR *name, INT32 oflag, mmMod
MMPA_FUNC_VISIBILITY INT32 mmShmUnlink(const CHAR *name); MMPA_FUNC_VISIBILITY INT32 mmShmUnlink(const CHAR *name);
MMPA_FUNC_VISIBILITY VOID *mmMmap(mmFd_t fd, mmSize_t size, mmOfft_t offset, mmFd_t *extra, INT32 prot, INT32 flags); MMPA_FUNC_VISIBILITY VOID *mmMmap(mmFd_t fd, mmSize_t size, mmOfft_t offset, mmFd_t *extra, INT32 prot, INT32 flags);
MMPA_FUNC_VISIBILITY INT32 mmMunMap(VOID *data, mmSize_t size, mmFd_t *extra); MMPA_FUNC_VISIBILITY INT32 mmMunMap(VOID *data, mmSize_t size, mmFd_t *extra);

MMPA_FUNC_VISIBILITY mmSize mmGetPageSize();
MMPA_FUNC_VISIBILITY VOID *mmAlignMalloc(mmSize mallocSize, mmSize alignSize);
MMPA_FUNC_VISIBILITY VOID mmAlignFree(VOID *addr);
#define MMPA_DLL_API #define MMPA_DLL_API


#ifdef __cplusplus #ifdef __cplusplus


+ 4
- 0
third_party/fwkacllib/inc/mmpa/sub_inc/mmpa_win.h View File

@@ -557,6 +557,10 @@ MMPA_FUNC_VISIBILITY mmFileHandle mmShmOpen(const CHAR *name, INT32 oflag, mmMod
MMPA_FUNC_VISIBILITY INT32 mmShmUnlink(const CHAR *name); MMPA_FUNC_VISIBILITY INT32 mmShmUnlink(const CHAR *name);
MMPA_FUNC_VISIBILITY VOID *mmMmap(mmFd_t fd, mmSize_t size, mmOfft_t offset, mmFd_t *extra, INT32 prot, INT32 flags); MMPA_FUNC_VISIBILITY VOID *mmMmap(mmFd_t fd, mmSize_t size, mmOfft_t offset, mmFd_t *extra, INT32 prot, INT32 flags);
MMPA_FUNC_VISIBILITY INT32 mmMunMap(VOID *data, mmSize_t size, mmFd_t *extra); MMPA_FUNC_VISIBILITY INT32 mmMunMap(VOID *data, mmSize_t size, mmFd_t *extra);

MMPA_FUNC_VISIBILITY mmSize mmGetPageSize();
MMPA_FUNC_VISIBILITY VOID *mmAlignMalloc(mmSize mallocSize, mmSize alignSize);
MMPA_FUNC_VISIBILITY VOID mmAlignFree(VOID *addr);
#ifdef __cplusplus #ifdef __cplusplus
#if __cplusplus #if __cplusplus
} }


+ 43
- 43
third_party/fwkacllib/inc/ops/matrix_calculation_ops.h View File

@@ -369,7 +369,7 @@ REG_OP(MatrixSetDiagD)
* int64, complex64, qint8, quint8, qint32, uint16, complex128, half, uint32, * int64, complex64, qint8, quint8, qint32, uint16, complex128, half, uint32,
* uint64 * uint64
*@li indices: An ND Tensor. *@li indices: An ND Tensor.
*Must be one of the following types: int32, int64
*Must be one of the following types: int32 or int64
*@li updates: An ND Tensor. *@li updates: An ND Tensor.
*Must be one of the following types: float16, float32, int8, uint8, double, *Must be one of the following types: float16, float32, int8, uint8, double,
* int64, complex64, qint8, quint8, qint32, uint16, complex128, half, uint32, * int64, complex64, qint8, quint8, qint32, uint16, complex128, half, uint32,
@@ -429,7 +429,7 @@ REG_OP(TensorScatterUpdate)
*@li var: An ND Tensor . \n *@li var: An ND Tensor . \n


*Must be one of the following types: float16, float32, int32, int8, uint8 *Must be one of the following types: float16, float32, int32, int8, uint8
*@li indices: An ND Tensor of type int32 or int64.
*@li indices: An ND Tensor of type int32 or int64




*@li updates: An Tensor. format:NCHW, NHWC . \n *@li updates: An Tensor. format:NCHW, NHWC . \n
@@ -447,10 +447,10 @@ REG_OP(TensorScatterUpdate)
* Compatible with the TensorFlow operator ScatterAdd. * Compatible with the TensorFlow operator ScatterAdd.
*/ */
REG_OP(ScatterAdd) REG_OP(ScatterAdd)
.INPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(indices, TensorType::IndexNumberType()) .INPUT(indices, TensorType::IndexNumberType())
.INPUT(updates, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.ATTR(use_locking, Bool, false) .ATTR(use_locking, Bool, false)
.OP_END_FACTORY_REG(ScatterAdd) .OP_END_FACTORY_REG(ScatterAdd)


@@ -463,7 +463,7 @@ REG_OP(ScatterAdd)
*Must be one of the following types: float16, float, int32, int8, uint8 *Must be one of the following types: float16, float, int32, int8, uint8


*@li indices: An ND Tensor. *@li indices: An ND Tensor.
*Must be one of the following types: int32
*Must be one of the following types: int32 or int64
*@li updates: An ND Tensor. *@li updates: An ND Tensor.
*Must be one of the following types: float16, float, int32, int8, uint8 *Must be one of the following types: float16, float, int32, int8, uint8


@@ -478,10 +478,10 @@ REG_OP(ScatterAdd)
* Compatible with the TensorFlow operator ScatterDiv. * Compatible with the TensorFlow operator ScatterDiv.
*/ */
REG_OP(ScatterDiv) REG_OP(ScatterDiv)
.INPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(indices, TensorType({DT_INT32}))
.INPUT(updates, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(indices, TensorType::IndexNumberType())
.INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.ATTR(use_locking, Bool, false) .ATTR(use_locking, Bool, false)
.OP_END_FACTORY_REG(ScatterDiv) .OP_END_FACTORY_REG(ScatterDiv)


@@ -493,7 +493,7 @@ REG_OP(ScatterDiv)
*@li var: An ND Tensor. *@li var: An ND Tensor.
*Must be one of the following types: float16, float, int32, int8, uint8 *Must be one of the following types: float16, float, int32, int8, uint8
*@li indices: An ND Tensor. *@li indices: An ND Tensor.
*Must be one of the following types: int32
*Must be one of the following types: int32 or int64
*@li updates: An ND Tensor. *@li updates: An ND Tensor.
*Must be one of the following types: float16, float, int32, int8, uint8 *Must be one of the following types: float16, float, int32, int8, uint8
*@par Attributes: *@par Attributes:
@@ -507,10 +507,10 @@ REG_OP(ScatterDiv)
* Compatible with the TensorFlow operator ScatterNdAdd. * Compatible with the TensorFlow operator ScatterNdAdd.
*/ */
REG_OP(ScatterNdAdd) REG_OP(ScatterNdAdd)
.INPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(indices, TensorType::IndexNumberType()) .INPUT(indices, TensorType::IndexNumberType())
.INPUT(updates, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.ATTR(use_locking, Bool, false) .ATTR(use_locking, Bool, false)
.OP_END_FACTORY_REG(ScatterNdAdd) .OP_END_FACTORY_REG(ScatterNdAdd)


@@ -550,7 +550,7 @@ REG_OP(TensorScatterAdd)
*@li var: An ND Tensor. *@li var: An ND Tensor.
*Must be one of the following types: float16, float, int32, int8, uint8 *Must be one of the following types: float16, float, int32, int8, uint8
*@li indices: An ND Tensor. *@li indices: An ND Tensor.
*Must be one of the following types: int32, int64
*Must be one of the following types: int32 or int64
*@li updates: An ND Tensor. *@li updates: An ND Tensor.
*Must be one of the following types: float16, float, int32, int8, uint8 *Must be one of the following types: float16, float, int32, int8, uint8


@@ -565,10 +565,10 @@ REG_OP(TensorScatterAdd)
* Compatible with the TensorFlow operator ScatterNdSub. * Compatible with the TensorFlow operator ScatterNdSub.
*/ */
REG_OP(ScatterNdSub) REG_OP(ScatterNdSub)
.INPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(indices, TensorType::IndexNumberType()) .INPUT(indices, TensorType::IndexNumberType())
.INPUT(updates, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.ATTR(use_locking, Bool, false) .ATTR(use_locking, Bool, false)
.OP_END_FACTORY_REG(ScatterNdSub) .OP_END_FACTORY_REG(ScatterNdSub)


@@ -608,7 +608,7 @@ REG_OP(TensorScatterSub)
*@li var: An ND Tensor. *@li var: An ND Tensor.
*Must be one of the following types: float16, float, int32, int8, uint8 *Must be one of the following types: float16, float, int32, int8, uint8
*@li indices: An ND Tensor. *@li indices: An ND Tensor.
*Must be one of the following types: int32, int64
*Must be one of the following types: int32 or int64
*@li updates: An ND Tensor. *@li updates: An ND Tensor.
*Must be one of the following types: float16, float, int32, int8, uint8 *Must be one of the following types: float16, float, int32, int8, uint8
*@par Attributes: *@par Attributes:
@@ -622,10 +622,10 @@ REG_OP(TensorScatterSub)
* Compatible with the TensorFlow operator ScatterSub. * Compatible with the TensorFlow operator ScatterSub.
*/ */
REG_OP(ScatterSub) REG_OP(ScatterSub)
.INPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(indices, TensorType::IndexNumberType()) .INPUT(indices, TensorType::IndexNumberType())
.INPUT(updates, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.ATTR(use_locking, Bool, false) .ATTR(use_locking, Bool, false)
.OP_END_FACTORY_REG(ScatterSub) .OP_END_FACTORY_REG(ScatterSub)


@@ -796,7 +796,7 @@ REG_OP(ConfusionMatrix)
*@li var: An ND Tensor. *@li var: An ND Tensor.
*Must be one of the following types: float16, float, int32, int8, uint8 *Must be one of the following types: float16, float, int32, int8, uint8
*@li indices: An ND Tensor. *@li indices: An ND Tensor.
*Must be one of the following types: int32
*Must be one of the following types: int32 or int64
*@li updates: An ND Tensor . \n *@li updates: An ND Tensor . \n


*Must be one of the following types: float16, float, int32, int8, uint8 *Must be one of the following types: float16, float, int32, int8, uint8
@@ -813,7 +813,7 @@ REG_OP(ConfusionMatrix)
*/ */
REG_OP(ScatterMul) REG_OP(ScatterMul)
.INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(indices, TensorType({DT_INT32}))
.INPUT(indices, TensorType::IndexNumberType())
.INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.ATTR(use_locking, Bool, false) .ATTR(use_locking, Bool, false)
@@ -826,13 +826,13 @@ REG_OP(ScatterMul)
*@par Inputs: *@par Inputs:
* Three inputs, including: * Three inputs, including:
*@li var: An ND Tensor. *@li var: An ND Tensor.
*Must be one of the following types: float16, float, int32
*Must be one of the following types: float16, float, int32, int8, uint8


*@li indices: An ND Tensor. *@li indices: An ND Tensor.
*Must be one of the following types: int32
*Must be one of the following types: int32 or int64


*@li updates: An ND Tensor. *@li updates: An ND Tensor.
*Must be one of the following types: float16, float, int32
*Must be one of the following types: float16, float, int32, int8, uint8


*@par Attributes: *@par Attributes:
*use_locking: An optional bool. Defaults to "False". If "True", the operation *use_locking: An optional bool. Defaults to "False". If "True", the operation
@@ -845,10 +845,10 @@ REG_OP(ScatterMul)
* Compatible with the TensorFlow operator ScatterMin. * Compatible with the TensorFlow operator ScatterMin.
*/ */
REG_OP(ScatterMin) REG_OP(ScatterMin)
.INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32}))
.INPUT(indices, TensorType({DT_INT32}))
.INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32}))
.OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32}))
.INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(indices, TensorType::IndexNumberType())
.INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.ATTR(use_locking, Bool, false) .ATTR(use_locking, Bool, false)
.OP_END_FACTORY_REG(ScatterMin) .OP_END_FACTORY_REG(ScatterMin)


@@ -859,13 +859,13 @@ REG_OP(ScatterMin)
* Three inputs, including: * Three inputs, including:
*@li var: An ND Tensor . \n *@li var: An ND Tensor . \n


*Must be one of the following types: float16, float, int32
*Must be one of the following types: float16, float, int32, int8, uint8
*@li indices: An NCHW, NHWC, or ND Tensor . \n *@li indices: An NCHW, NHWC, or ND Tensor . \n


*Must be one of the following types: int32
*Must be one of the following types: int32 or int64
*@li updates: An NCHW, NHWC, or ND Tensor . \n *@li updates: An NCHW, NHWC, or ND Tensor . \n


*Must be one of the following types: float16, float, int32
*Must be one of the following types: float16, float, int32, int8, uint8


*@par Attributes: *@par Attributes:
*use_locking: An optional bool. Defaults to "False". *use_locking: An optional bool. Defaults to "False".
@@ -878,10 +878,10 @@ REG_OP(ScatterMin)
* Compatible with the TensorFlow operator ScatterMax. * Compatible with the TensorFlow operator ScatterMax.
*/ */
REG_OP(ScatterMax) REG_OP(ScatterMax)
.INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32}))
.INPUT(indices, TensorType({DT_INT32}))
.INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32}))
.OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32}))
.INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(indices, TensorType::IndexNumberType())
.INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.ATTR(use_locking, Bool, false) .ATTR(use_locking, Bool, false)
.OP_END_FACTORY_REG(ScatterMax) .OP_END_FACTORY_REG(ScatterMax)


@@ -895,7 +895,7 @@ REG_OP(ScatterMax)
*Must be one of the following types: float16, float, int32, int8, uint8 *Must be one of the following types: float16, float, int32, int8, uint8
*@li indices: An ND Tensor . \n *@li indices: An ND Tensor . \n


*Must be one of the following types: int32
*Must be one of the following types: int32 or int64
*@li updates: An ND Tensor . \n *@li updates: An ND Tensor . \n


*Must be one of the following types: float16, float, int32, int8, uint8 *Must be one of the following types: float16, float, int32, int8, uint8
@@ -911,10 +911,10 @@ REG_OP(ScatterMax)
* Compatible with the TensorFlow operator ScatterUpdate. * Compatible with the TensorFlow operator ScatterUpdate.
*/ */
REG_OP(ScatterUpdate) REG_OP(ScatterUpdate)
.INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT8,DT_UINT8}))
.INPUT(indices, TensorType({DT_INT32}))
.INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT8,DT_UINT8}))
.INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.INPUT(indices, TensorType::IndexNumberType())
.INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
.ATTR(use_locking, Bool, false) .ATTR(use_locking, Bool, false)
.OP_END_FACTORY_REG(ScatterUpdate) .OP_END_FACTORY_REG(ScatterUpdate)




+ 17
- 0
third_party/fwkacllib/inc/ops/nn_pooling_ops.h View File

@@ -1184,6 +1184,7 @@ REG_OP(MaxPool3DGrad)
.OUTPUT(y, TensorType::RealNumberType()) .OUTPUT(y, TensorType::RealNumberType())
.REQUIRED_ATTR(ksize, ListInt) .REQUIRED_ATTR(ksize, ListInt)
.REQUIRED_ATTR(strides, ListInt) .REQUIRED_ATTR(strides, ListInt)
.ATTR(padding, String, "SAME")
.REQUIRED_ATTR(pads, ListInt) .REQUIRED_ATTR(pads, ListInt)
.ATTR(data_format, String, "NDHWC") .ATTR(data_format, String, "NDHWC")
.OP_END_FACTORY_REG(MaxPool3DGrad) .OP_END_FACTORY_REG(MaxPool3DGrad)
@@ -1678,6 +1679,22 @@ REG_OP(MaxPoolWithArgmaxV1)
.ATTR(dilation, ListInt, {1, 1, 1, 1}) .ATTR(dilation, ListInt, {1, 1, 1, 1})
.ATTR(ceil_mode, Bool, false) .ATTR(ceil_mode, Bool, false)
.OP_END_FACTORY_REG(MaxPoolWithArgmaxV1) .OP_END_FACTORY_REG(MaxPoolWithArgmaxV1)
// SubSample
REG_OP(SubSample)
.INPUT(labels, TensorType({DT_INT32}))
.OUTPUT(y, TensorType({DT_INT32}))
.REQUIRED_ATTR(batch_size_per_images, Int)
.REQUIRED_ATTR(positive_fraction, Float)
.OP_END_FACTORY_REG(SubSample)

// SubSampleLabels
REG_OP(SubSampleLabels)
.INPUT(labels, TensorType({DT_INT32}))
.INPUT(shuffle_matrix, TensorType({DT_INT32}))
.OUTPUT(y, TensorType({DT_INT32}))
.REQUIRED_ATTR(batch_size_per_images, Int)
.REQUIRED_ATTR(positive_fraction, Float)
.OP_END_FACTORY_REG(SubSampleLabels)


} // namespace ge } // namespace ge
#endif // OPS_BUILT_IN_OP_PROTO_INC_NN_POOLING_OPS_H #endif // OPS_BUILT_IN_OP_PROTO_INC_NN_POOLING_OPS_H

+ 9
- 9
third_party/fwkacllib/inc/ops/selection_ops.h View File

@@ -1006,9 +1006,9 @@ REG_OP(TopK)


*@par Inputs: *@par Inputs:
*Inputs including: *Inputs including:
* @li indices: A required index tensor. Must be one of the following types: float32, float16, int32, int8, uint8.
* @li x: A required slice tensor. Must be one of the following types: float32, float16, int32, int8, uint8.
* @li shape: A required list of int32, specifying the output shape.
* @li indices: A required index tensor. Must be one of the following types: int32 or int64.
* @li x: A required slice tensor. Must be one of the following types: float32, float16, int32, int8, uint8...
* @li shape: A required list of int32 or int64, specifying the output shape.
*@par Outputs: *@par Outputs:
*y:A output Tensor with same datatype as "updates" . \n *y:A output Tensor with same datatype as "updates" . \n


@@ -1019,7 +1019,7 @@ REG_OP(TopK)
* Compatible with the TensorFlow operator ScatterNd. * Compatible with the TensorFlow operator ScatterNd.
*/ */
REG_OP(ScatterNd) REG_OP(ScatterNd)
.INPUT(indices, TensorType::BasicType())
.INPUT(indices, TensorType::IndexNumberType())
.INPUT(x, TensorType::BasicType()) .INPUT(x, TensorType::BasicType())
.INPUT(shape, TensorType::IndexNumberType()) .INPUT(shape, TensorType::IndexNumberType())
.OUTPUT(y, TensorType::BasicType()) .OUTPUT(y, TensorType::BasicType())
@@ -1032,11 +1032,11 @@ REG_OP(ScatterNd)
*@par Inputs: *@par Inputs:
*Inputs including: *Inputs including:
* @li indices: A required index tensor. Must be one of the following types: * @li indices: A required index tensor. Must be one of the following types:
* float, float16, int32, int16. format:ND.
* int32 or int64. format:ND.
* @li x: A required slice tensor. Must be one of the following types: * @li x: A required slice tensor. Must be one of the following types:
* float, float16, int32, int16. format:ND.
* float16, float, int32, int8, uint8. format:ND.
*@par Attributes: *@par Attributes:
* @li shape: A required list of int32, specifying the output shape.
* @li shape: A required list of int32 or int64, specifying the output shape.
*@par Outputs: *@par Outputs:
*y: A Tensor. Has the same type as "x". format:ND . \n *y: A Tensor. Has the same type as "x". format:ND . \n


@@ -1051,8 +1051,8 @@ REG_OP(ScatterNd)
*/ */
REG_OP(ScatterNdD) REG_OP(ScatterNdD)
.INPUT(indices, TensorType::IndexNumberType()) .INPUT(indices, TensorType::IndexNumberType())
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT16}))
.OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT16}))
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8}))
.OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8}))
.REQUIRED_ATTR(shape, ListInt) .REQUIRED_ATTR(shape, ListInt)
.OP_END_FACTORY_REG(ScatterNdD) .OP_END_FACTORY_REG(ScatterNdD)




+ 2
- 6
third_party/fwkacllib/inc/ops/transformation_ops.h View File

@@ -418,12 +418,8 @@ REG_OP(BatchToSpace)
* Warning: THIS FUNCTION IS DEPRECATED. Please use BatchToSpace instead. * Warning: THIS FUNCTION IS DEPRECATED. Please use BatchToSpace instead.
*/ */
REG_OP(BatchToSpaceD) REG_OP(BatchToSpaceD)
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8,
DT_UINT16, DT_UINT32, DT_UINT64, DT_INT8, DT_INT16, DT_COMPLEX64,
DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32}))
.OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8,
DT_UINT16, DT_UINT32, DT_UINT64, DT_INT8, DT_INT16, DT_COMPLEX64,
DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32}))
.INPUT(x, TensorType::BasicType())
.OUTPUT(y, TensorType::BasicType())
.REQUIRED_ATTR(block_size, Int) .REQUIRED_ATTR(block_size, Int)
.REQUIRED_ATTR(crops, ListInt) .REQUIRED_ATTR(crops, ListInt)
.OP_END_FACTORY_REG(BatchToSpaceD) .OP_END_FACTORY_REG(BatchToSpaceD)


+ 6
- 0
third_party/fwkacllib/inc/runtime/dev.h View File

@@ -59,6 +59,7 @@ typedef enum tagRtAicpuDeployType {


typedef enum tagRtFeatureType { typedef enum tagRtFeatureType {
FEATURE_TYPE_MEMCPY = 0, FEATURE_TYPE_MEMCPY = 0,
FEATURE_TYPE_MEMORY = 1,
FEATURE_TYPE_RSV FEATURE_TYPE_RSV
} rtFeatureType_t; } rtFeatureType_t;


@@ -72,6 +73,11 @@ typedef enum tagMemcpyInfo {
MEMCPY_INFO_RSV MEMCPY_INFO_RSV
} rtMemcpyInfo_t; } rtMemcpyInfo_t;


typedef enum tagMemoryInfo {
MEMORY_INFO_TS_4G_LIMITED = 0,
MEMORY_INFO_RSV
} rtMemoryInfo_t;

/** /**
* @ingroup dvrt_dev * @ingroup dvrt_dev
* @brief get total device number. * @brief get total device number.


Loading…
Cancel
Save