Browse Source

update include file 0325

tags/v1.2.0
shenwei41 4 years ago
parent
commit
23b471ca2b
3 changed files with 114 additions and 36 deletions
  1. +77
    -2
      third_party/fwkacllib/inc/ops/nn_pooling_ops.h
  2. +1
    -0
      third_party/fwkacllib/inc/toolchain/prof_callback.h
  3. +36
    -34
      third_party/fwkacllib/inc/toolchain/prof_reporter.h

+ 77
- 2
third_party/fwkacllib/inc/ops/nn_pooling_ops.h View File

@@ -1444,8 +1444,7 @@ REG_OP(MaxPoolV3Grad)


*@par Inputs: *@par Inputs:
*x: A tensor of shape is 4d, format is support NHWC. *x: A tensor of shape is 4d, format is support NHWC.
*filter: A tensor of shape is 3d, the type is same with x,
and the c dimension is same with x. \n
*filter: A tensor of shape is 3d, the type is same with x, and the c dimension is same with x. \n


*@par Attributes: *@par Attributes:
*@li strides: A required list of 4 ints, specifying the stride of the sliding window. The strides of the N and C dimensions are 1. *@li strides: A required list of 4 ints, specifying the stride of the sliding window. The strides of the N and C dimensions are 1.
@@ -1473,6 +1472,82 @@ REG_OP(Dilation2D)
.ATTR(data_format, String, "NHWC") .ATTR(data_format, String, "NHWC")
.OP_END_FACTORY_REG(Dilation2D) .OP_END_FACTORY_REG(Dilation2D)


/*
* @brief Performs Dilation2DBackpropFilter on the input. \n

*@par Inputs:
*x: A tensor of shape is 4d, format is support NHWC
*filter: A tensor of shape is 3d the type is same with x,
*out_backprop: Has the same type and format as input "x" and the c dimension is same with x. \n

*@par Attributes
*@li stride: A required list of 4 ints, specifying the stride of the sliding window. The strides of the N and C
dimension are 1
*@li rates: A required list of 4 ints, the rates of the N and C dimensions are 1
*@li padding_mode: A optional string. Defaults to "SAME", it support SAME and VALID
*@li pads: A optional list of 4 ints. \n

*@par Outputs:
*y: The output tensor. Has the same type and format as input "filter" . \n

*@par Third-party framework compatibility
* Compatible with the TensorFlow operator Dilation2D
*/

REG_OP(Dilation2DBackpropFilter)
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16}))
.INPUT(filter,
TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16}))
.INPUT(out_backprop,
TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16}))
.OUTPUT(y,
TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16}))
.REQUIRED_ATTR(strides, ListInt)
.REQUIRED_ATTR(rates, ListInt)
.ATTR(padding_mode, String, "SAME")
.ATTR(pads, ListInt, {0, 0, 0, 0})
.ATTR(ceil_mode, Bool, false)
.ATTR(data_format, String, "NHWC")
.OP_END_FACTORY_REG(Dilation2DBackpropFilter)

/*
* @brief Performs Dilation2DBackpropInput on the input. \n

*@par Inputs:
*x: A tensor of shape is 4d, format is support NHWC
*filter: A tensor of shape is 3d the type is same with x,
*out_backprop: Has the same type and format as input "x" and the c dimension is same with x. \n

*@par Attributes
*@li stride: A required list of 4 ints, specifying the stride of the sliding window. The strides of the N and C
dimension are 1
*@li rates: A required list of 4 ints, the rates of the N and C dimensions are 1
*@li padding_mode: A optional string. Defaults to "SAME", it support SAME and VALID
*@li pads: A optional list of 4 ints. \n

*@par Outputs:
*y: The output tensor. Has the same type and format as input "filter" . \n

*@par Third-party framework compatibility
* Compatible with the TensorFlow operator Dilation2D
*/

REG_OP(Dilation2DBackpropInput)
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16}))
.INPUT(filter,
TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16}))
.INPUT(out_backprop,
TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16}))
.OUTPUT(y,
TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16}))
.REQUIRED_ATTR(strides, ListInt)
.REQUIRED_ATTR(rates, ListInt)
.ATTR(padding_mode, String, "SAME")
.ATTR(pads, ListInt, {0, 0, 0, 0})
.ATTR(ceil_mode, Bool, false)
.ATTR(data_format, String, "NHWC")
.OP_END_FACTORY_REG(Dilation2DBackpropInput)

/** /**
* @brief Applies a 2D adaptive average pooling over * @brief Applies a 2D adaptive average pooling over
* an input signal composed of several input planes. \n * an input signal composed of several input planes. \n


+ 1
- 0
third_party/fwkacllib/inc/toolchain/prof_callback.h View File

@@ -74,6 +74,7 @@ enum MsprofReporterCallbackType {
MSPROF_REPORTER_REPORT = 0, // report data MSPROF_REPORTER_REPORT = 0, // report data
MSPROF_REPORTER_INIT, // init reporter MSPROF_REPORTER_INIT, // init reporter
MSPROF_REPORTER_UNINIT, // uninit reporter MSPROF_REPORTER_UNINIT, // uninit reporter
MSPROF_REPORTER_DATA_MAX_LEN, // data max length for calling report callback
}; };


/** /**


+ 36
- 34
third_party/fwkacllib/inc/toolchain/prof_reporter.h View File

@@ -41,42 +41,44 @@ namespace Engine {
* the Reporter class .used to send data to profiling * the Reporter class .used to send data to profiling
*/ */
class MSVP_PROF_API Reporter { class MSVP_PROF_API Reporter {
public:
virtual ~Reporter() {}
public:
virtual ~Reporter() {}


public:
/**
* @ingroup reporter
* @name : Report
* @brief : API of libmsprof, report data to libmsprof, it's a non-blocking function \n
The data will be firstly appended to cache, if the cache is full, data will be ignored
* @param data [IN] const ReporterData * the data send to libmsporf
* @retval PROFILING_SUCCESS 0 (success)
* @retval PROFILING_FAILED -1 (failed)
*
* @par depend:
* @li libmsprof
* @li prof_reporter.h
* @since c60
* @see Flush
*/
virtual int Report(const ReporterData *data) = 0;
public:
/**
* @ingroup reporter
* @name : Report
* @brief : API of libmsprof, report data to libmsprof, it's a non-blocking function \n
The data will be firstly appended to cache, if the cache is full, data will be ignored
* @param data [IN] const ReporterData * the data send to libmsporf
* @retval PROFILING_SUCCESS 0 (success)
* @retval PROFILING_FAILED -1 (failed)
*
* @par depend:
* @li libmsprof
* @li prof_reporter.h
* @since c60
* @see Flush
*/
virtual int Report(const ReporterData *data) = 0;


/**
* @ingroup reporter
* @name : Flush
* @brief : API of libmsprof, notify libmsprof send data over, it's a blocking function \n
The all datas of cache will be write to file or send to host
* @retval PROFILING_SUCCESS 0 (success)
* @retval PROFILING_FAILED -1 (failed)
*
* @par depend:
* @li libmsprof
* @li prof_reporter.h
* @since c60
* @see ProfMgrStop
*/
virtual int Flush() = 0;
/**
* @ingroup reporter
* @name : Flush
* @brief : API of libmsprof, notify libmsprof send data over, it's a blocking function \n
The all datas of cache will be write to file or send to host
* @retval PROFILING_SUCCESS 0 (success)
* @retval PROFILING_FAILED -1 (failed)
*
* @par depend:
* @li libmsprof
* @li prof_reporter.h
* @since c60
* @see ProfMgrStop
*/
virtual int Flush() = 0;

virtual uint32_t GetReportDataMaxLen() = 0;
}; };


} // namespace Engine } // namespace Engine


Loading…
Cancel
Save