Browse Source

sync-from-trunk-to-blue-zone-1009

pull/187/head
wuweikang taoxiangdong 4 years ago
parent
commit
1a651a4e57
31 changed files with 305 additions and 353 deletions
  1. +4
    -2
      ge/client/module.mk
  2. +0
    -2
      ge/common/profiling/profiling_manager.cc
  3. +1
    -0
      ge/executor/module.mk
  4. +0
    -1
      ge/graph/build/memory/graph_mem_assigner.cc
  5. +3
    -0
      ge/graph/build/memory/graph_mem_assigner.h
  6. +1
    -5
      ge/graph/load/new_model_manager/data_dumper.cc
  7. +1
    -1
      ge/graph/load/new_model_manager/model_manager.cc
  8. +12
    -10
      ge/graph/preprocess/multi_batch_copy_graph.cc
  9. +0
    -2
      ge/graph/preprocess/multi_batch_copy_graph.h
  10. +2
    -0
      inc/framework/common/string_util.h
  11. +10
    -5
      third_party/fwkacllib/inc/ops/aipp.h
  12. +105
    -50
      third_party/fwkacllib/inc/ops/elewise_calculation_ops.h
  13. +10
    -10
      third_party/fwkacllib/inc/ops/functional_ops.h
  14. +4
    -3
      third_party/fwkacllib/inc/ops/image_ops.h
  15. +107
    -53
      third_party/fwkacllib/inc/ops/nn_batch_norm_ops.h
  16. +0
    -3
      third_party/fwkacllib/inc/ops/nn_calculation_ops.h
  17. +14
    -7
      third_party/fwkacllib/inc/ops/nn_detect_ops.h
  18. +0
    -22
      third_party/fwkacllib/inc/ops/nn_norm_ops.h
  19. +0
    -3
      third_party/fwkacllib/inc/ops/nn_pooling_ops.h
  20. +0
    -39
      third_party/fwkacllib/inc/ops/nn_training_ops.h
  21. +0
    -12
      third_party/fwkacllib/inc/ops/pad_ops.h
  22. +2
    -2
      third_party/fwkacllib/inc/ops/ragged_conversion_ops.h
  23. +0
    -3
      third_party/fwkacllib/inc/ops/random_ops.h
  24. +0
    -24
      third_party/fwkacllib/inc/ops/reduce_ops.h
  25. +2
    -1
      third_party/fwkacllib/inc/ops/rnn.h
  26. +1
    -1
      third_party/fwkacllib/inc/ops/save_ops.h
  27. +6
    -7
      third_party/fwkacllib/inc/ops/sdca_ops.h
  28. +2
    -50
      third_party/fwkacllib/inc/ops/selection_ops.h
  29. +4
    -11
      third_party/fwkacllib/inc/ops/split_combination_ops.h
  30. +0
    -16
      third_party/fwkacllib/inc/ops/transformation_ops.h
  31. +14
    -8
      third_party/fwkacllib/inc/toolchain/adx_datadump_server.h

+ 4
- 2
ge/client/module.mk View File

@@ -70,9 +70,10 @@ LOCAL_SHARED_LIBRARIES := \
libregister \ libregister \
libge_compiler \ libge_compiler \
libge_common \ libge_common \
libmsprof
libmsprof \
stub/libascend_hal


LOCAL_STATIC_LIBRARIES := libmsprofiler


LOCAL_LDFLAGS := -lrt -ldl LOCAL_LDFLAGS := -lrt -ldl


@@ -107,6 +108,7 @@ LOCAL_SHARED_LIBRARIES := \
libge_common \ libge_common \
libmsprof libmsprof


LOCAL_STATIC_LIBRARIES := libmsprofiler


LOCAL_LDFLAGS := -lrt -ldl LOCAL_LDFLAGS := -lrt -ldl
LOCAL_CFLAGS += \ LOCAL_CFLAGS += \


+ 0
- 2
ge/common/profiling/profiling_manager.cc View File

@@ -58,8 +58,6 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ge::Status ProfilingManager::In


GELOGI("ProfilingManager::Init job_id:%s", job_id_.c_str()); GELOGI("ProfilingManager::Init job_id:%s", job_id_.c_str());




Status ret; Status ret;
if (!recv_profiling_config_.empty()) { if (!recv_profiling_config_.empty()) {
GELOGI("Profiling json config from acl:%s", recv_profiling_config_.c_str()); GELOGI("Profiling json config from acl:%s", recv_profiling_config_.c_str());


+ 1
- 0
ge/executor/module.mk View File

@@ -92,6 +92,7 @@ local_ge_executor_shared_library := \
libregister \ libregister \
libmsprof \ libmsprof \
liberror_manager \ liberror_manager \
libascend_hal


local_ge_executor_ldflags := -lrt -ldl \ local_ge_executor_ldflags := -lrt -ldl \




+ 0
- 1
ge/graph/build/memory/graph_mem_assigner.cc View File

@@ -907,7 +907,6 @@ Status GraphMemoryAssigner::ReAssignAtomicMemory(bool is_loop_graph) {
int64_t atomic_mem_start = static_cast<int64_t>(mem_iter->second.mem_offset_); int64_t atomic_mem_start = static_cast<int64_t>(mem_iter->second.mem_offset_);
GELOGD("Begin to reAssign atomic memory, atomic address memory start = %ld", atomic_mem_start); GELOGD("Begin to reAssign atomic memory, atomic address memory start = %ld", atomic_mem_start);


for (auto &atomic_node : iter.second) {
vector<int64_t> mem_offset_end; vector<int64_t> mem_offset_end;
status = AssignAtomicOutputAndWorkspaceMemory(atomic_node, mem_offset_end); status = AssignAtomicOutputAndWorkspaceMemory(atomic_node, mem_offset_end);
if (status != SUCCESS) { if (status != SUCCESS) {


+ 3
- 0
ge/graph/build/memory/graph_mem_assigner.h View File

@@ -140,6 +140,9 @@ class GraphMemoryAssigner {
ge::Status FilterAtomicNodesForMemoryAssign(std::map<NodePtr, vector<NodePtr>> &normal_atomic_nodes_map, ge::Status FilterAtomicNodesForMemoryAssign(std::map<NodePtr, vector<NodePtr>> &normal_atomic_nodes_map,
std::vector<NodePtr> &connecting_output_atomic_nodes); std::vector<NodePtr> &connecting_output_atomic_nodes);


ge::Status FilterAtomicNodesForMemoryAssign(std::map<NodePtr, vector<NodePtr>> &normal_atomic_nodes_map,
std::vector<NodePtr> &connecting_output_atomic_nodes);

ge::Status AssignContinuousInputMemory(const ge::NodePtr &node, int64_t &continuous_mem_start, ge::Status AssignContinuousInputMemory(const ge::NodePtr &node, int64_t &continuous_mem_start,
int64_t &continuous_mem_size, int64_t memory_type); int64_t &continuous_mem_size, int64_t memory_type);




+ 1
- 5
ge/graph/load/new_model_manager/data_dumper.cc View File

@@ -728,11 +728,7 @@ Status DataDumper::BuildTaskInfo(aicpu::dump::OpMappingInfo &op_mapping_info) {
} }
if (dump_properties_.GetDumpMode() == kDumpInput) { if (dump_properties_.GetDumpMode() == kDumpInput) {
if (op_iter.is_task) { if (op_iter.is_task) {
Status ret = DumpInput(op_iter, task);
if (ret != SUCCESS) {
GELOGE(ret, "Dump input failed");
return ret;
}
GE_CHK_STATUS_RET(DumpInput(op_iter, task), "Dump input failed");
} }
op_mapping_info.mutable_task()->Add(std::move(task)); op_mapping_info.mutable_task()->Add(std::move(task));
continue; continue;


+ 1
- 1
ge/graph/load/new_model_manager/model_manager.cc View File

@@ -236,7 +236,6 @@ ModelManager::~ModelManager() {
std::lock_guard<std::mutex> lock(map_mutex_); std::lock_guard<std::mutex> lock(map_mutex_);
model_map_.clear(); model_map_.clear();
model_aicpu_kernel_.clear(); model_aicpu_kernel_.clear();
cust_aicpu_so_.clear();


GE_IF_BOOL_EXEC(device_count > 0, GE_CHK_RT(rtDeviceReset(0))); GE_IF_BOOL_EXEC(device_count > 0, GE_CHK_RT(rtDeviceReset(0)));
} }
@@ -400,6 +399,7 @@ Status ModelManager::Unload(uint32_t model_id) {
} }
std::lock_guard<std::mutex> lock(exeception_infos_mutex_); std::lock_guard<std::mutex> lock(exeception_infos_mutex_);
exception_infos_.clear(); exception_infos_.clear();
cust_aicpu_so_.clear();
return SUCCESS; return SUCCESS;
} }




+ 12
- 10
ge/graph/preprocess/multi_batch_copy_graph.cc View File

@@ -40,7 +40,6 @@
#include "inc/pass_manager.h" #include "inc/pass_manager.h"
#include "graph/common/local_context.h" #include "graph/common/local_context.h"


using std::map;
using std::set; using std::set;
using std::string; using std::string;
using std::vector; using std::vector;
@@ -264,24 +263,27 @@ Status MultiBatchGraphCopyer::Init() {
} }


Status MultiBatchGraphCopyer::LabelStatus() { Status MultiBatchGraphCopyer::LabelStatus() {
map<string, vector<NodePtr>> frame_enters;
InitStatus(frame_enters);

for (const auto &data : origin_data_nodes_) {
auto data_shape = NodeUtils::GetOutputDesc(*data, kDataOutIndex).GetShape();
if (!IsAllDimsPositive(data_shape.GetDims())) {
origin_nodes_status_[data.get()] = kNodeInBatchBranch;
}
}
bool changed = true; bool changed = true;
// If anyone of in node is kNodeInBatchBranch, it is also kNodeInBatchBranch // If anyone of in node is kNodeInBatchBranch, it is also kNodeInBatchBranch
while (changed) { while (changed) {
changed = false; changed = false;
for (const auto &node : origin_all_nodes_) { for (const auto &node : origin_all_nodes_) {
auto iter = origin_nodes_status_.find(node.get());
if (iter != origin_nodes_status_.end()) {
continue;
}
for (auto &in_node : node->GetInAllNodes()) { for (auto &in_node : node->GetInAllNodes()) {
bool is_in_batch = origin_nodes_status_.find(in_node.get()) != origin_nodes_status_.end() && bool is_in_batch = origin_nodes_status_.find(in_node.get()) != origin_nodes_status_.end() &&
origin_nodes_status_[in_node.get()] == kNodeInBatchBranch; origin_nodes_status_[in_node.get()] == kNodeInBatchBranch;
if (is_in_batch) { if (is_in_batch) {
if (origin_nodes_status_.find(node.get()) == origin_nodes_status_.end() ||
origin_nodes_status_[node.get()] != kNodeInBatchBranch) {
origin_nodes_status_[node.get()] = kNodeInBatchBranch;
ResetEnterStatus(frame_enters, node);
changed = true;
}
origin_nodes_status_[node.get()] = kNodeInBatchBranch;
changed = true;
break; break;
} }
} }


+ 0
- 2
ge/graph/preprocess/multi_batch_copy_graph.h View File

@@ -69,8 +69,6 @@ class MultiBatchGraphCopyer {


// label status for origin_all_nodes_ // label status for origin_all_nodes_
Status LabelStatus(); Status LabelStatus();
void InitStatus(std::map<string, vector<NodePtr>> &frame_enters);
void ResetEnterStatus(std::map<string, vector<NodePtr>> &frame_enters, const NodePtr &node);
// add nodes functions // add nodes functions
Status CreateNewNodes(); Status CreateNewNodes();




+ 2
- 0
inc/framework/common/string_util.h View File

@@ -61,8 +61,10 @@ class StringUtils {
/// @param [in] delim separator /// @param [in] delim separator
/// @return string array after segmentation /// @return string array after segmentation
/// ///
/*lint -e1077*/
static std::vector<std::string> Split(const std::string &str, char delim) { static std::vector<std::string> Split(const std::string &str, char delim) {
std::vector<std::string> elems; std::vector<std::string> elems;
/*lint +e1077*/


if (str.empty()) { if (str.empty()) {
elems.emplace_back(""); elems.emplace_back("");


+ 10
- 5
third_party/fwkacllib/inc/ops/aipp.h View File

@@ -25,16 +25,21 @@


namespace ge { namespace ge {
/** /**
*@brief Performs AI pre-processing (AIPP) on images including color space conversion (CSC),
image normalization (by subtracting the mean value or multiplying a factor), image cropping
(by specifying the crop start and cropping the image to the size required by the neural network), and much more. \n
*@brief Performs AI pre-processing (AIPP) on images including color space
conversion (CSC),
image normalization (by subtracting the mean value or multiplying a factor),
image cropping
(by specifying the crop start and cropping the image to the size required by
the neural network), and much more. \n


*@par Inputs: *@par Inputs:
*@li images: An NCHW or NHWC tensor of type uint8, specifying the input to the data layer.
*@li images: An NCHW or NHWC tensor of type uint8, specifying the input to the
data layer.
*@li params: Dynamic AIPP configuration parameters of type uint8. \n *@li params: Dynamic AIPP configuration parameters of type uint8. \n


*@par Attributes: *@par Attributes:
*aipp_config_path: A required string, specifying the path of the AIPP configuration file. \n
*aipp_config_path: A required string, specifying the path of the AIPP
configuration file. \n


*@par Outputs: *@par Outputs:
*features: The AIPP-processed output tensor of type float16 or uint8. *features: The AIPP-processed output tensor of type float16 or uint8.


+ 105
- 50
third_party/fwkacllib/inc/ops/elewise_calculation_ops.h View File

@@ -28,9 +28,10 @@ namespace ge {


*@par Inputs: *@par Inputs:
*Dynamic inputs, including: *Dynamic inputs, including:
* @li x: A list of Tensor objects, each with same shape and type. The supported types are:
* @li x: A list of Tensor objects, each with same shape and type. The supported
types are:
* float16, float32, double, int32, uint8, int16, int8, complex64, int64, * float16, float32, double, int32, uint8, int16, int8, complex64, int64,
* qint8, quint8, qint32, uint16, complex128, uint32, uint64. It's a dynamic input. \n
* qint8, quint8, qint32, uint16, complex128, uint32, uint64. \n


*@par Outputs: *@par Outputs:
*y: A Tensor. Has the same shape and type as the elements of "x". \n *y: A Tensor. Has the same shape and type as the elements of "x". \n
@@ -121,7 +122,8 @@ REG_OP(MinimumGrad)


*@par Inputs: *@par Inputs:
*One input: *One input:
*x:A Tensor. Must be one of the following types: bool, float16, float, int8, int32, uint32, uint8,
*x:A Tensor. Must be one of the following types: bool, float16, float, int8,
int32, uint32, uint8,
int64, uint64, int16, uint16, double, complex64, complex128, qint8, quint8, qint16, quint16, qint32. \n int64, uint64, int16, uint16, double, complex64, complex128, qint8, quint8, qint16, quint16, qint32. \n


*@par Attributes: *@par Attributes:
@@ -385,7 +387,8 @@ REG_OP(Sign)


*@par Inputs: *@par Inputs:
*Two inputs, including: \n *Two inputs, including: \n
*@li x1: A Tensor. Must be one of the following types: float16, float32, float64, int32, int64, complex64,complex128
*@li x1: A Tensor. Must be one of the following types: float16, float32,
float64, int32, int64, complex64,complex128
*@li x2: A Tensor. Has the same type as "x1". \n *@li x2: A Tensor. Has the same type as "x1". \n


*@par Outputs: *@par Outputs:
@@ -484,12 +487,16 @@ REG_OP(Equal)


*@par Inputs: *@par Inputs:
*One input:\n *One input:\n
*x: A Tensor. Must be one of the following types: float16, float32, double, complex64, complex128. \n
*x: A Tensor. Must be one of the following types: float16, float32, double,
complex64, complex128. \n


*@par Attributes: *@par Attributes:
*@li base: An optional attribute of type float32, specifying the base gamma. Defaults to "-1.0".
*@li scale: An optional attribute of type float32, specifying the scale alpha. Defaults to "1.0".
*@li shift: An optional attribute of type float32, specifying the shift beta. Defaults to "0.0". \n
*@li base: An optional attribute of type float32, specifying the base gamma.
Defaults to "-1.0".
*@li scale: An optional attribute of type float32, specifying the scale alpha.
Defaults to "1.0".
*@li shift: An optional attribute of type float32, specifying the shift beta.
Defaults to "0.0". \n


*@par Outputs: *@par Outputs:
*y: A Tensor of the same type as "x". \n *y: A Tensor of the same type as "x". \n
@@ -510,7 +517,8 @@ REG_OP(Exp)


*@par Inputs: *@par Inputs:
*One input: *One input:
*x: A Tensor. Must be one of the following types: float16, float32, double, complex64, complex128. \n
*x: A Tensor. Must be one of the following types: float16, float32, double,
complex64, complex128. \n


*@par Outputs: *@par Outputs:
*y: A Tensor of the same type as "x". \n *y: A Tensor of the same type as "x". \n
@@ -527,7 +535,9 @@ REG_OP(Expm1)
*@brief: Computes the reciprocal of "x". \n *@brief: Computes the reciprocal of "x". \n


*@par Inputs:\n *@par Inputs:\n
*x: A Tensor. Must be one of the following types: float16, float32, int32, int64, double, complex64, complex128. \n
*x: A Tensor. Must be one of the following types: float16, float32,
int32, int64, double,
complex64, complex128. \n


*@par Outputs: *@par Outputs:
*y: A Tensor. Has the same type as "x". \n *y: A Tensor. Has the same type as "x". \n
@@ -749,7 +759,8 @@ REG_OP(Xlogy)


*@par Inputs: *@par Inputs:
*One input: \n *One input: \n
*x: A Tensor. Must be one of the following types: float16, float32, float64, int32, int64, complex64, complex128
*x: A Tensor. Must be one of the following types: float16, float32, float64,
int32, int64, complex64, complex128


*@par Outputs: *@par Outputs:
*y: A Tensor. Has the same type as "x". \n *y: A Tensor. Has the same type as "x". \n
@@ -790,7 +801,8 @@ REG_OP(Rsqrt)


* *
*@par Inputs: *@par Inputs:
* x: A tensor. Must be one of the following types: float16, float32, float64, int32, int64, complex64, complex128.
* x: A tensor. Must be one of the following types: float16, float32, float64,
int32, int64, complex64, complex128.
* *
*@par Outputs: *@par Outputs:
* y: A tensor. Has the same type as "x". * y: A tensor. Has the same type as "x".
@@ -811,7 +823,8 @@ REG_OP(Asin)


* *
*@par Inputs: *@par Inputs:
*@li y: A tensor of type float16, float32, float64, int32, int64, complex64, complex128.
*@li y: A tensor of type float16, float32, float64,
int32, int64, complex64, complex128.
*@li dy: A tensor of the same type as "y". *@li dy: A tensor of the same type as "y".
* *
*@attention Constraints: *@attention Constraints:
@@ -838,7 +851,8 @@ REG_OP(AsinGrad)


* *
*@par Inputs: *@par Inputs:
* x: A tensor. Must be one of the following types: float16, float32, float64, int32, int64, complex64, complex128.
* x: A tensor. Must be one of the following types: float16, float32, float64,
int32, int64, complex64, complex128.
* *
*@par Outputs: *@par Outputs:
* y: A tensor. Has the same type as "x". * y: A tensor. Has the same type as "x".
@@ -883,7 +897,8 @@ REG_OP(AcosGrad)


* *
*@par Inputs: *@par Inputs:
* x: A tensor. Must be one of the following types: float16, float32, float64, complex64, complex128.
* x: A tensor. Must be one of the following types: float16, float32, float64,
complex64, complex128.
* *
*@attention Constraints: *@attention Constraints:
* x Given an input tensor, the function computes inverse hyperbolic cosine of every element.\n * x Given an input tensor, the function computes inverse hyperbolic cosine of every element.\n
@@ -1160,7 +1175,8 @@ REG_OP(FusedMulAdd)


* *
*@par Inputs: *@par Inputs:
*@li x1: A tensor. Must be one of the following types: float16, float32, float64, uint8, int8, int16, int32, int64, complex64, complex128.
*@li x1: A tensor. Must be one of the following types: float16, float32, float64,
uint8, int8, int16, int32, int64, complex64, complex128.
*@li x2: A tensor of the same type as "x1". *@li x2: A tensor of the same type as "x1".
* *
*@attention Constraints: *@attention Constraints:
@@ -1189,7 +1205,8 @@ REG_OP(AddV2)
*@brief Updates "ref" by adding "value" to it. \n *@brief Updates "ref" by adding "value" to it. \n


*@par Inputs: *@par Inputs:
*@li ref: A Tensor. Must be one of the following types: float16, float32, int8, int16, int32, int64, uint8, uint16, uint32, uint64.
*@li ref: A Tensor. Must be one of the following types: float16, float32, int8,
int16, int32, int64, uint8, uint16, uint32, uint64.
*@li value: A Tensor of the same type as "ref". \n *@li value: A Tensor of the same type as "ref". \n


*@par Attributes: *@par Attributes:
@@ -1218,12 +1235,14 @@ REG_OP(AssignAdd)
*@brief Updates "ref" by assigning "value" to it. \n *@brief Updates "ref" by assigning "value" to it. \n


*@par Inputs: *@par Inputs:
*@li ref: A Tensor. Must be one of the following types: float16, float32, int8, int16, int32, int64, uint8, uint16, uint32, uint64.
*@li ref: A Tensor. Must be one of the following types: float16, float32, int8, int16,
int32, int64, uint8, uint16, uint32, uint64.
*@li value: A Tensor of the same type as "ref". \n *@li value: A Tensor of the same type as "ref". \n


*@par Attributes: *@par Attributes:
*@li validate_shape: An optional bool. Defaults to "true". *@li validate_shape: An optional bool. Defaults to "true".
If "true", the operation will validate that the shape of "value" matches the shape of the Tensor being assigned to.
If "true", the operation will validate that the shape of "value"
matches the shape of the Tensor being assigned to.
* If "false", "ref" will take on the shape of "value". * If "false", "ref" will take on the shape of "value".
* This attribute is reserved. * This attribute is reserved.
*@li use_locking: An optional bool. Defaults to True. *@li use_locking: An optional bool. Defaults to True.
@@ -1252,7 +1271,8 @@ REG_OP(Assign)


* *
*@par Inputs: *@par Inputs:
*@li var: A tensor. Must be one of the following types: float32, float64, int32, uint8, int16, int8, complex64, int64, qint8, quint8, qint32, uint16, complex128, uint32, uint64
*@li var: A tensor. Must be one of the following types: float32, float64,
int32, uint8, int16, int8, complex64, int64, qint8, quint8, qint32, uint16, complex128, uint32, uint64
*@li value: A tensor of the same type as "var". *@li value: A tensor of the same type as "var".
* *
*@par Attributes: *@par Attributes:
@@ -1644,7 +1664,9 @@ REG_OP(Atan2)


* *
*@par Inputs: *@par Inputs:
*@li x1: A tensor. Must be one of the following types: float32, float64, int32, uint8, int16, int8, complex64, int64, qint8, quint8, qint32, uint16, complex128, float16, uint32, uint64
*@li x1: A tensor. Must be one of the following types: float32, float64, int32,
uint8, int16, int8, complex64, int64, qint8, quint8, qint32, uint16, complex128,
float16, uint32, uint64
*@li x2: A tensor of the same type as "x1". *@li x2: A tensor of the same type as "x1".
* *
*@par Attributes: *@par Attributes:
@@ -1666,16 +1688,18 @@ REG_OP(ApproximateEqual)


/** /**
*@brief Returns the element-wise sum of a list of tensors.\n *@brief Returns the element-wise sum of a list of tensors.\n
* AccumulateNV2 performs the same operation as AddN, but does not wait for all of its inputs
to be ready before beginning to sum.\n This can save memory if inputs are ready at different times,
since minimum temporary storage is proportional to the output size rather than the inputs size.
Returns a Tensor of same shape and type as the elements of inputs. \n
* AccumulateNV2 performs the same operation as AddN, but does not wait for all
of its inputs to be ready before beginning to sum.\n This can save memory if
inputs are ready at different times, \n since minimum temporary storage is
proportional to the output size rather than the inputs size.\n Returns a Tensor
of same shape and type as the elements of inputs. \n


* *
*@par Inputs: *@par Inputs:
*Dynamic inputs, including: *Dynamic inputs, including:
* x: A tensor. Must be one of the following types: float32, float64, int32, uint8, int16, int8, complex64, int64,
qint8, quint8, qint32, uint16, complex128, float16, uint32, uint64. It's a dynamic input. \n
* x: A tensor. Must be one of the following types: float32, float64, int32,
uint8, int16, int8, complex64, int64, \n qint8, quint8, qint32, uint16,
complex128, float16, uint32, uint64.
* *
*@par Outputs: *@par Outputs:
* y: A tensor. Has the same type as "x". * y: A tensor. Has the same type as "x".
@@ -1731,7 +1755,8 @@ REG_OP(FakeQuantWithMinMaxArgs)


*@par Inputs: *@par Inputs:
*Two inputs, including: \n *Two inputs, including: \n
*@li gradients: A Tensor of type float32. Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.
*@li gradients: A Tensor of type float32. Backpropagated gradients
above the FakeQuantWithMinMaxArgs operation.
*@li x: A Tensor of type float32. Has the same type and format as "gradients".\n *@li x: A Tensor of type float32. Has the same type and format as "gradients".\n
* This is the input Tensor of the FakeQuantWithMinMaxArgs operator.\n * This is the input Tensor of the FakeQuantWithMinMaxArgs operator.\n


@@ -2210,9 +2235,13 @@ REG_OP(BiasAdd)


*@par Inputs: *@par Inputs:
*Two inputs, including: *Two inputs, including:
*@li x: A Tensor. Must be one of the following types: float32, float64, int32, uint8, int16, int8, complex64, int64, qint8, quint8, qint32, bfloat16, uint16, complex128, float16, uint32, uint64.
*@li x: A Tensor. Must be one of the following types: float32, float64, int32,
uint8, int16, int8, complex64, int64, qint8, quint8, qint32, bfloat16, uint16,
complex128, float16, uint32, uint64.
*format is ND. *format is ND.
*@li dimension: A Tensor. Must be one of the following types: int32, int64. Must be in the range [-rank(input x), rank(input x)]. Describes which dimension of the input Tensor to reduce across.
*@li dimension: A Tensor. Must be one of the following types: int32, int64.
Must be in the range [-rank(input x), rank(input x)]. Describes which dimension
of the input Tensor to reduce across.
* The format is ND. * The format is ND.
*@par Attributes: *@par Attributes:
*dtype: The output type, either "int32" or "int64". Defaults to "int64". \n *dtype: The output type, either "int32" or "int64". Defaults to "int64". \n
@@ -2286,6 +2315,7 @@ REG_OP(ArgMaxV2)
.ATTR(dtype, Type, DT_INT64) .ATTR(dtype, Type, DT_INT64)
.OP_END_FACTORY_REG(ArgMaxV2) .OP_END_FACTORY_REG(ArgMaxV2)



/** /**
*@brief Returns the index with the largest value across axes of a tensor. \n *@brief Returns the index with the largest value across axes of a tensor. \n


@@ -2298,15 +2328,16 @@ REG_OP(ArgMaxV2)
*@li dtype: The output type, either "int32" or "int64". Defaults to "int64". \n *@li dtype: The output type, either "int32" or "int64". Defaults to "int64". \n


*@par Outputs: *@par Outputs:
*y: A multi-dimensional Tensor of type int32, specifying the index with the largest value. The dimension is one less than that of "x". \n
*y: A multi-dimensional Tensor of type int32, specifying the index with the
largest value. The dimension is one less than that of "x". \n


*@attention Constraints: *@attention Constraints:
*@li x: If there are multiple maximum values, the index of the first maximum value is used. *@li x: If there are multiple maximum values, the index of the first maximum value is used.
*@li The value range of "dimension" is [-dims, dims - 1]. "dims" is the dimension length of "x". \n
*@li The value range of "dimension" is [-dims, dims - 1]. "dims" is the
dimension length of "x". \n


*@par Third-party framework compatibility *@par Third-party framework compatibility
* Compatible with TensorFlow operator ArgMax. * Compatible with TensorFlow operator ArgMax.
*
* @par Restrictions: * @par Restrictions:
*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
*/ */
@@ -2929,9 +2960,13 @@ REG_OP(FusedMulAddN)
*@li bias: An ND tensor of type float16 or float32. \n *@li bias: An ND tensor of type float16 or float32. \n


*@par Attributes: *@par Attributes:
*@li axis: An optional int32 used to compute the shape of bias input from the online bottoms. Defaults to "1".
*@li num_axes: An optional int32 used to compute the shape of bias input from a Caffe model trained offline. Defaults to "1".
*@li bias_from_blob: An optional bool. If "true", bias is input from a Caffe model trained offline. If "false", bias is input from online bottoms. Defaults to "true". \n
*@li axis: An optional int32 used to compute the shape of bias input from the
online bottoms. Defaults to "1".
*@li num_axes: An optional int32 used to compute the shape of bias input from a
Caffe model trained offline. Defaults to "1".
*@li bias_from_blob: An optional bool. If "true", bias is input from a Caffe
model trained offline. If "false", bias is input from online bottoms. Defaults
to "true". \n


*@par Outputs: *@par Outputs:
*y: An ND tensor of type float16 or float32. \n *y: An ND tensor of type float16 or float32. \n
@@ -2939,13 +2974,25 @@ REG_OP(FusedMulAddN)
*@attention Constraints:\n *@attention Constraints:\n
* Assume that the shape length of "x" is "n" and that of "bias" is "m". * Assume that the shape length of "x" is "n" and that of "bias" is "m".
*@li "axis" is within the range [-n, n-1]. num_axes >= -1. *@li "axis" is within the range [-n, n-1]. num_axes >= -1.
*@li If "bias_from_blob = true", "num_axes = -1", and "axis >= 0", the ith axis of "bias" and the (i+"axis")th axis of "x" must have the same size (0 <= i < n-axis).\n
* If "axis < 0", the ith axis of "bias" and the (i+n+"axis")th axis of "x" must have the same size (0 <= i < -axis).
*@li If "bias_from_blob = true" and "num_axes = 0", "bias" is a scalar with shape length 1 and dimension size 1.
*@li If "bias_from_blob = true", "num_axes > 0, and "axis >= 0", "axis + num_axes" must be less than or equal to "n" and the ith axis of "bias" and the (i+"axis")th axis of "x" must have the same size (0 <= i < num_axes).\n
* If "axis < 0", "n + axis + num_axes" must be less than or equal to "n" and the ith axis of "bias" and the (i+n+"axis")th axis of "x" must have the same size (0 <= i < num_axes).
*@li If "bias_from_blob = false", "bias" is not a scalar, and "axis >= 0","axis + m" must be less than or equal to "n" and the ith axis of "bias" and the (i+"axis")th axis of "x" must have the same size (0 <= i < m).\n
* If "axis < 0", "n + axis + m" must be less than or equal to "n" and the ith axis of "bias" and the (i+n+"axis")th axis of "x" must have the same size (0 <= i < m).
*@li If "bias_from_blob = true", "num_axes = -1", and "axis >= 0", the ith axis
of "bias" and the (i+"axis")th axis of "x" must have the same size (0 <= i <
n-axis).\n
* If "axis < 0", the ith axis of "bias" and the (i+n+"axis")th axis of "x" must
have the same size (0 <= i < -axis).
*@li If "bias_from_blob = true" and "num_axes = 0", "bias" is a scalar with
shape length 1 and dimension size 1.
*@li If "bias_from_blob = true", "num_axes > 0, and "axis >= 0", "axis +
num_axes" must be less than or equal to "n" and the ith axis of "bias" and the
(i+"axis")th axis of "x" must have the same size (0 <= i < num_axes).\n
* If "axis < 0", "n + axis + num_axes" must be less than or equal to "n" and
the ith axis of "bias" and the (i+n+"axis")th axis of "x" must have the same
size (0 <= i < num_axes).
*@li If "bias_from_blob = false", "bias" is not a scalar, and "axis >= 0","axis
+ m" must be less than or equal to "n" and the ith axis of "bias" and the (i
+"axis")th axis of "x" must have the same size (0 <= i < m).\n
* If "axis < 0", "n + axis + m" must be less than or equal to "n" and the ith
axis of "bias" and the (i+n+"axis")th axis of "x" must have the same size (0 <=
i < m).
*@par Third-party framework compatibility *@par Third-party framework compatibility
* Compatible with the Caffe operator Bias. * Compatible with the Caffe operator Bias.
*/ */
@@ -3023,10 +3070,12 @@ REG_OP(FusedMulAddNL2loss)
*@li x: A Tensor with any format. Must be one of the following types: float16, float32. \n *@li x: A Tensor with any format. Must be one of the following types: float16, float32. \n


*@par Attributes: *@par Attributes:
*@li threshold: A required float32. Defaults to "0.0". "x" is compared with "threshold", outputs "1" for inputs above threshold; "0" otherwise. \n
*@li threshold: A required float32. Defaults to "0.0". "x" is compared with
"threshold", outputs "1" for inputs above threshold; "0" otherwise. \n


*@par Outputs: *@par Outputs:
*@li y: A Tensor with any format. Has the same type as the input. Must be one of the following types: float16, float32.
*@li y: A Tensor with any format. Has the same type as the input. Must be one
of the following types: float16, float32.
*@par Third-party framework compatibility *@par Third-party framework compatibility
* Compatible with the Caffe operator Threshold. * Compatible with the Caffe operator Threshold.
*/ */
@@ -3044,11 +3093,16 @@ REG_OP(FusedMulAddNL2loss)
*@li x: A tensor. Must be one of the following types: float16, float32. \n *@li x: A tensor. Must be one of the following types: float16, float32. \n


*@par Attributes: *@par Attributes:
*@li axis: An optional int. Specify the axis to be cut at the input tensor. If this parameter is not provided, find the topk for each batch. Defaults to 10000
*@li out_max_val: An optional bool. Whether to output the maximum value. If it is True, the maximum value and index are output, otherwise only the index is output.
*@li axis: An optional int. Specify the axis to be cut at the input tensor. If
this parameter is not provided, find the topk for each batch. Defaults to 10000
*@li out_max_val: An optional bool. Whether to output the maximum value. If it
is True, the maximum value and index are output, otherwise only the index is
output.
* Defaults to False * Defaults to False
*@li topk: An optional int. It means the number of top tok in each axis (the value is greater than or equal to 1), and the value range must be in [1,x.shape(axis)].
* Defaults to 1
*@li topk: An optional int. It means the number of top tok in each axis (the
value is greater than or equal to 1), and the value range must be in [1,x.shape
(axis)].
* Defaults to 1 \n


*@par Outputs: *@par Outputs:
*@li indices: A tensor of type float16, float32, int32. The index of the maximum value of the output. *@li indices: A tensor of type float16, float32, int32. The index of the maximum value of the output.
@@ -3168,7 +3222,8 @@ REG_OP(Axpy)
.OP_END_FACTORY_REG(Axpy) .OP_END_FACTORY_REG(Axpy)


/** /**
*@brief Creates a criterion that measures the loss given input tensors x1 x2 and a Tensor label y with values 1 or -1. \n
*@brief Creates a criterion that measures the loss given input tensors x1 x2
and a Tensor label y with values 1 or -1. \n


*@par Inputs: *@par Inputs:
*@li x1: A ND Tensor with one of the following types: int8, uint8, int32, float16, float32. *@li x1: A ND Tensor with one of the following types: int8, uint8, int32, float16, float32.


+ 10
- 10
third_party/fwkacllib/inc/ops/functional_ops.h View File

@@ -36,7 +36,7 @@ namespace ge {
* if "cond" is a numerical scalar, non-zero means True and zero means False; * if "cond" is a numerical scalar, non-zero means True and zero means False;
* if "cond" is a string scalar, non-empty means True and empty means False; * if "cond" is a string scalar, non-empty means True and empty means False;
* if "cond" is not a scalar, non-empty means True and empty means False. * if "cond" is not a scalar, non-empty means True and empty means False.
*@li input: The input tensors . It's a dynamic input. \n
*@li input: The input tensors . \n


*@par Graphs: *@par Graphs:
*@li then_branch: A subgraph takes 'input' and returns a list of tensors, *@li then_branch: A subgraph takes 'input' and returns a list of tensors,
@@ -69,7 +69,7 @@ REG_OP(_If)
* if "cond" is a numerical scalar, non-zero means True and zero means False; * if "cond" is a numerical scalar, non-zero means True and zero means False;
* if "cond" is a string scalar, non-empty means True and empty means False; * if "cond" is a string scalar, non-empty means True and empty means False;
* if "cond" is not a scalar, non-empty means True and empty means False. * if "cond" is not a scalar, non-empty means True and empty means False.
*@li input: The input tensors . It's a dynamic input. \n
*@li input: The input tensors . \n


*@par Graphs: *@par Graphs:
*@li then_branch: A subgraph takes 'input' and returns a list of tensors, *@li then_branch: A subgraph takes 'input' and returns a list of tensors,
@@ -102,7 +102,7 @@ REG_OP(StatelessIf)
* if "cond" is a numerical scalar, non-zero means True and zero means False; * if "cond" is a numerical scalar, non-zero means True and zero means False;
* if "cond" is a string scalar, non-empty means True and empty means False; * if "cond" is a string scalar, non-empty means True and empty means False;
* if "cond" is not a scalar, non-empty means True and empty means False. * if "cond" is not a scalar, non-empty means True and empty means False.
*@li input: The input tensors . It's a dynamic input. \n
*@li input: The input tensors . \n


*@par Graphs: *@par Graphs:
*@li then_branch: A subgraph takes 'input' and returns a list of tensors, *@li then_branch: A subgraph takes 'input' and returns a list of tensors,
@@ -129,7 +129,7 @@ REG_OP(If)


*@par Inputs: *@par Inputs:
*@li branch_index: A int32 scalar which determines the selected subgraph. *@li branch_index: A int32 scalar which determines the selected subgraph.
*@li input: The input tensors, which will be passed to the subgraph . It's a dynamic input. \n
*@li input: The input tensors, which will be passed to the subgraph . \n


*@par Graphs: *@par Graphs:
*branches: A list of subgraphs, each of which takes 'input' and returns a list of tensors, *branches: A list of subgraphs, each of which takes 'input' and returns a list of tensors,
@@ -152,7 +152,7 @@ REG_OP(Case)
*@brief Cyclic execute the "body" subgraph until the return tensor of "cond" subgraph means False . \n *@brief Cyclic execute the "body" subgraph until the return tensor of "cond" subgraph means False . \n


*@par Inputs: *@par Inputs:
*input: The input tensors . It's a dynamic input. \n
*input: The input tensors . \n


*@par Graphs: *@par Graphs:
*@li cond: A subgraph takes 'input' and returns a tensor. *@li cond: A subgraph takes 'input' and returns a tensor.
@@ -183,7 +183,7 @@ REG_OP(_While)
*@brief Cyclic execute the "body" subgraph until the return tensor of "cond" subgraph means False . \n *@brief Cyclic execute the "body" subgraph until the return tensor of "cond" subgraph means False . \n


*@par Inputs: *@par Inputs:
*input: The input tensors . It's a dynamic input. \n
*input: The input tensors . \n


*@par Graphs: *@par Graphs:
*@li cond: A subgraph takes 'input' and returns a tensor. *@li cond: A subgraph takes 'input' and returns a tensor.
@@ -215,7 +215,7 @@ REG_OP(While)
*@brief Cyclic execute the "body" subgraph until the return tensor of "cond" subgraph means False . \n *@brief Cyclic execute the "body" subgraph until the return tensor of "cond" subgraph means False . \n


*@par Inputs: *@par Inputs:
*input: The input tensors . It's a dynamic input. \n
*input: The input tensors . \n


*@par Graphs: *@par Graphs:
*@li cond: A subgraph takes 'input' and returns a tensor. *@li cond: A subgraph takes 'input' and returns a tensor.
@@ -250,7 +250,7 @@ REG_OP(StatelessWhile)
*@li start: A int32 scalar. The lower bound. *@li start: A int32 scalar. The lower bound.
*@li limit: A int32 scalar. The upper bound. *@li limit: A int32 scalar. The upper bound.
*@li delta: A int32 scalar. The step size. *@li delta: A int32 scalar. The step size.
*@li input: The input tensors, which will be passed to "body" . It's a dynamic input. \n
*@li input: The input tensors, which will be passed to "body" . \n


*@par Graphs: *@par Graphs:
*body: A subgraph takes 'input' and returns a another list of tensors . \n *body: A subgraph takes 'input' and returns a another list of tensors . \n
@@ -274,7 +274,7 @@ REG_OP(For)
*@brief Pass the input tensors to the subgraph "f" and return the output tensors . \n *@brief Pass the input tensors to the subgraph "f" and return the output tensors . \n


*@par Inputs: *@par Inputs:
*args: The input tensors, which will be passed to "f" . It's a dynamic input. \n
*args: The input tensors, which will be passed to "f" . \n


*@par Graphs: *@par Graphs:
*f: A subgraph takes 'args' and returns a another list of tensors . \n *f: A subgraph takes 'args' and returns a another list of tensors . \n
@@ -303,7 +303,7 @@ REG_OP(PartitionedCall)
*@brief Pass the input tensors to the subgraph "f" and return the output tensors . \n *@brief Pass the input tensors to the subgraph "f" and return the output tensors . \n


*@par Inputs: *@par Inputs:
*args: The input tensors, which will be passed to "f" . It's a dynamic input. \n
*args: The input tensors, which will be passed to "f" . \n


*@par Graphs: *@par Graphs:
*f: A subgraph takes 'args' and returns a another list of tensors . \n *f: A subgraph takes 'args' and returns a another list of tensors . \n


+ 4
- 3
third_party/fwkacllib/inc/ops/image_ops.h View File

@@ -160,8 +160,10 @@ REG_OP(CropAndResize)
*@li box_index: A Tensor of type int32. A 1-D tensor of shape [num_boxes] with int32 values in [0, batch) . \n *@li box_index: A Tensor of type int32. A 1-D tensor of shape [num_boxes] with int32 values in [0, batch) . \n


*@par Attributes: *@par Attributes:
*@li crop_size: list int. [crop_height, crop_width]. All cropped image patches are resized to this size.
*@li extrapolation_value: An optional float. Defaults to 0. Value used for extrapolation, when applicable.
*@li crop_size: list int. [crop_height, crop_width]. All cropped image patches
are resized to this size.
*@li extrapolation_value: An optional float. Defaults to 0. Value used for
extrapolation, when applicable.
*@li method: An optional string from: '"bilinear"'. Defaults to "bilinear" . \n *@li method: An optional string from: '"bilinear"'. Defaults to "bilinear" . \n


*@par Outputs: *@par Outputs:
@@ -172,7 +174,6 @@ REG_OP(CropAndResize)


*@par Third-party framework compatibility *@par Third-party framework compatibility
*Compatible with tensorflow CropAndResize operator. *Compatible with tensorflow CropAndResize operator.

* @par Restrictions: * @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use CropAndResize instead. * Warning: THIS FUNCTION IS DEPRECATED. Please use CropAndResize instead.
*/ */


+ 107
- 53
third_party/fwkacllib/inc/ops/nn_batch_norm_ops.h View File

@@ -87,39 +87,58 @@ REG_OP(L2NormalizeGrad)


*@par Inputs: *@par Inputs:
* Five inputs, including: (NHWC, NCHW, or NC1HWC0 supported) * Five inputs, including: (NHWC, NCHW, or NC1HWC0 supported)
*@li x: A 4D or 5D Tensor of type float16 or float32, with format NHWC or NCHW for 4D or NC1HWC0 for 5D.
*@li scale: A Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. Must be 5D
*@li x: A 4D or 5D Tensor of type float16 or float32, with format NHWC or NCHW
for 4D or NC1HWC0 for 5D.
*@li scale: A Tensor of type float32. Must be 1D if input "x" is with format
NHWC or NCHW. Must be 5D
if input "x" is with format NC1HWC0. Specifies the scaling factor. if input "x" is with format NC1HWC0. Specifies the scaling factor.
*@li offset: A Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. Must be 5D *@li offset: A Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. Must be 5D
if input "x" is with format NC1HWC0. Specifies the offset. if input "x" is with format NC1HWC0. Specifies the offset.
*@li mean: A Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. Must be 5D
if input "x" is with format NC1HWC0. Specifies the mean used for inference. Must be "None" if the
*@li mean: A Tensor of type float32. Must be 1D if input "x" is with format
NHWC or NCHW. Must be 5D
if input "x" is with format NC1HWC0. Specifies the mean used for inference.
Must be "None" if the
operation is used for training. operation is used for training.
*@li variance: A Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. Must be
5D if input "x" is with format NC1HWC0. Specifies the variance used for inference. Must be "None"
*@li variance: A Tensor of type float32. Must be 1D if input "x" is with format
NHWC or NCHW. Must be
5D if input "x" is with format NC1HWC0. Specifies the variance used for
inference. Must be "None"
if the operation is used for training . \n if the operation is used for training . \n


*@par Attributes: *@par Attributes:
*@li epsilon: An optional float32, specifying the small value added to variance to avoid dividing by zero. Defaults to "0.0001".
*@li data_format: An optional string, specifying the format of "x". Defaults to "NHWC".
*@li is_training: An optional bool, specifying if the operation is used for training or inference. Defaults to "True" . \n
*@li epsilon: An optional float32, specifying the small value added to variance
to avoid dividing by zero. Defaults to "0.0001".
*@li data_format: An optional string, specifying the format of "x". Defaults to
"NHWC".
*@li is_training: An optional bool, specifying if the operation is used for
training or inference. Defaults to "True" . \n


*@par Outputs: *@par Outputs:
* Five outputs, including: (NHWC, NCHW, or NC1HWC0 supported) * Five outputs, including: (NHWC, NCHW, or NC1HWC0 supported)
*@li y: A 4D or 5D Tensor of type float16 or float32 for the normalized "x", with format NHWC or NCHW for 4D or NC1HWC0 for 5D.
*@li batch_mean: A Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. Must be 5D
*@li y: A 4D or 5D Tensor of type float16 or float32 for the normalized "x",
with format NHWC or NCHW for 4D or NC1HWC0 for 5D.
*@li batch_mean: A Tensor of type float32. Must be 1D if input "x" is with
format NHWC or NCHW. Must be 5D
if input "x" is with format NC1HWC0. Specifies the mean of "x". if input "x" is with format NC1HWC0. Specifies the mean of "x".
*@li batch_variance: A Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW.
*@li batch_variance: A Tensor of type float32. Must be 1D if input "x" is with
format NHWC or NCHW.
Must be 5D if input "x" is with format NC1HWC0. Specifies the variance of "x". Must be 5D if input "x" is with format NC1HWC0. Specifies the variance of "x".
*@li reserve_space_1: An optional Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW.
Must be 5D if input "x" is with format NC1HWC0. Specifies the mean of "x" for gradient computation. Pass "None" to skip this output.
*@li reserve_space_2: An optional Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW.
Must be 5D if input "x" is with format NC1HWC0. Specifies the variance of "x" for gradient computation. Pass "None" to skip this output . \n
*@li reserve_space_1: An optional Tensor of type float32. Must be 1D if input
"x" is with format NHWC or NCHW.
Must be 5D if input "x" is with format NC1HWC0. Specifies the mean of "x" for
gradient computation. Pass "None" to skip this output.
*@li reserve_space_2: An optional Tensor of type float32. Must be 1D if input
"x" is with format NHWC or NCHW.
Must be 5D if input "x" is with format NC1HWC0. Specifies the variance of "x"
for gradient computation. Pass "None" to skip this output . \n


*@attention Constraints: *@attention Constraints:
*@li If the operation is used for inference and outputs "reserve_space_1" and "reserve_space_2" are available,
then "reserve_space_1" has the same value as "mean" and "reserve_space_2" has the same value as "variance".
*@li For Ascend 310, the result accuracy fails to reach 1‰ due to the square root instruction . \n
*@li If the operation is used for inference and outputs "reserve_space_1" and
"reserve_space_2" are available,
then "reserve_space_1" has the same value as "mean" and "reserve_space_2" has
the same value as "variance".
*@li For Ascend 310, the result accuracy fails to reach 1‰ due to the square
root instruction . \n


*@par Third-party framework compatibility *@par Third-party framework compatibility
*@li Compatible with the TensorFlow operator fused_batch_norm. *@li Compatible with the TensorFlow operator fused_batch_norm.
@@ -166,13 +185,17 @@ is used for training or inference. Defaults to "True" . \n
*@li y: A 4D Tensor of type float16 or float32, for the normalized "x". *@li y: A 4D Tensor of type float16 or float32, for the normalized "x".
*@li batch_mean: A 1D Tensor of type float32, for the mean of "x". *@li batch_mean: A 1D Tensor of type float32, for the mean of "x".
*@li batch_variance: A 1D Tensor of type float32, for the variance of "x". *@li batch_variance: A 1D Tensor of type float32, for the variance of "x".
*@li reserve_space_1: A 1D Tensor of type float32, for the mean of "x" for gradient computation.
*@li reserve_space_2: A 1D Tensor of type float32, for the variance of "x" for gradient computation . \n
*@li reserve_space_1: A 1D Tensor of type float32, for the mean of "x" for
gradient computation.
*@li reserve_space_2: A 1D Tensor of type float32, for the variance of "x"
for gradient computation . \n


*@attention Constraints: *@attention Constraints:
*@li If the operation is used for inference, then output "reserve_space_1" *@li If the operation is used for inference, then output "reserve_space_1"
has the same value as "mean" and output "reserve_space_2" has the same value as "variance".
*@li For Ascend 310, the result accuracy fails to reach 1‰ due to the square root instruction . \n
has the same value as "mean" and output "reserve_space_2" has the same value as
"variance".
*@li For Ascend 310, the result accuracy fails to reach 1‰ due to the square
root instruction . \n


*@par Third-party framework compatibility *@par Third-party framework compatibility
* Compatible with the TensorFlow operator fused_batch_norm_v2. * Compatible with the TensorFlow operator fused_batch_norm_v2.
@@ -198,23 +221,34 @@ REG_OP(BatchNormExt2)


*@par Inputs: *@par Inputs:
* Five inputs, including: * Five inputs, including:
*@li y_backprop: A 4D or 5D Tensor of type float16 or float32, with format NHWC, NCHW, or NC1HWC0, for the gradient.
*@li x: A 4D or 5D Tensor of type float16 or float32, with format NHWC, NCHW, or NC1HWC0.
*@li scale: A 4D or 5D Tensor of type float32, with format NHWC, NCHW, or NC1HWC0.
*@li reserve_space_1: A 4D or 5D Tensor of type float32, with format NHWC, NCHW, or NC1HWC0. It is an output of BatchNorm.
*@li reserve_space_2: A 4D or 5D Tensor of type float32, with format NHWC, NCHW, or NC1HWC0. It is an output of BatchNorm . \n
*@li y_backprop: A 4D or 5D Tensor of type float16 or float32, with format
NHWC, NCHW, or NC1HWC0, for the gradient.
*@li x: A 4D or 5D Tensor of type float16 or float32, with format NHWC, NCHW,
or NC1HWC0.
*@li scale: A 4D or 5D Tensor of type float32, with format NHWC, NCHW, or
NC1HWC0.
*@li reserve_space_1: A 4D or 5D Tensor of type float32, with format NHWC,
NCHW, or NC1HWC0. It is an output of BatchNorm.
*@li reserve_space_2: A 4D or 5D Tensor of type float32, with format NHWC,
NCHW, or NC1HWC0. It is an output of BatchNorm . \n


*@par Attributes: *@par Attributes:
*@li epsilon: An optional float32. Defaults to "0.0001". A small float number added to the variance of "x".
*@li epsilon: An optional float32. Defaults to "0.0001". A small float number
added to the variance of "x".
*@li data_format: An optional string. Defaults to "NHWC". *@li data_format: An optional string. Defaults to "NHWC".
*@li is_training: An optional bool. Defaults to "true". Specifies the operation is for training (default) or inference . \n *@li is_training: An optional bool. Defaults to "true". Specifies the operation is for training (default) or inference . \n


*@par Outputs: *@par Outputs:
*@li x_backprop: A Tensor of type float16 or float32, with format NHWC, NCHW, or NC1HWC0, for the offset of "x".
*@li scale_backprop: A Tensor of type float32, with format NHWC, NCHW, or NC1HWC0, for the offset of "scale".
*@li *offset_backprop: A Tensor of type float32, with format NHWC, NCHW, or NC1HWC0, for the offset of "offset".
*@li *reserve_space_4: A Tensor of type float32, with shape NHWC, NCHW, or NC1HWC0. Pass "None" to skip this output.
*@li *reserve_space_5: A Tensor of type float32, with shape NHWC, NCHW, or NC1HWC0. Pass "None" to skip this output . \n
*@li x_backprop: A Tensor of type float16 or float32, with format NHWC, NCHW,
or NC1HWC0, for the offset of "x".
*@li scale_backprop: A Tensor of type float32, with format NHWC, NCHW, or
NC1HWC0, for the offset of "scale".
*@li *offset_backprop: A Tensor of type float32, with format NHWC, NCHW, or
NC1HWC0, for the offset of "offset".
*@li *reserve_space_4: A Tensor of type float32, with shape NHWC, NCHW, or
NC1HWC0. Pass "None" to skip this output.
*@li *reserve_space_5: A Tensor of type float32, with shape NHWC, NCHW, or
NC1HWC0. Pass "None" to skip this output . \n


*@attention Constraints: *@attention Constraints:
* The preceding layer of this operator must be operator BatchNorm . \n * The preceding layer of this operator must be operator BatchNorm . \n
@@ -244,21 +278,28 @@ REG_OP(BatchNormGrad)


*@par Inputs: *@par Inputs:
* Five inputs, including: * Five inputs, including:
*@li y_backprop: A 4D Tensor of type float16 or float32, with format NHWC or NCHW, for the gradient.
*@li y_backprop: A 4D Tensor of type float16 or float32, with format NHWC or
NCHW, for the gradient.
*@li x: A 4D Tensor of type float16 or float32, with format NHWC or NCHW. *@li x: A 4D Tensor of type float16 or float32, with format NHWC or NCHW.
*@li scale: A 4D Tensor of type float32, with format NHWC or NCHW. *@li scale: A 4D Tensor of type float32, with format NHWC or NCHW.
*@li reserve_space_1: A 4D Tensor of type float32, with format NHWC or NCHW. It is an output of BatchNormExt2.
*@li reserve_space_2: A 4D Tensor of type float32, with format NHWC or NCHW. It is an output of BatchNormExt2 . \n
*@li reserve_space_1: A 4D Tensor of type float32, with format NHWC or NCHW. It
is an output of BatchNormExt2.
*@li reserve_space_2: A 4D Tensor of type float32, with format NHWC or NCHW. It
is an output of BatchNormExt2 . \n


*@par Attributes: *@par Attributes:
*@li epsilon: A required float32. A small float number added to the variance of "x". *@li epsilon: A required float32. A small float number added to the variance of "x".
*@li data_format: A required string for the format. *@li data_format: A required string for the format.
*@li is_training: A required bool for specifying the operation is for training (true) or inference (false) . \n
*@li is_training: A required bool for specifying the operation is for training
(true) or inference (false) . \n


*@par Outputs: *@par Outputs:
*@li x_backprop: A Tensor of type float16 or float32, with format NHWC or NCHW, for the offset of "x".
*@li scale_backprop: A Tensor of type float32, with format NHWC or NCHW, for the offset of "scale".
*@li offset_backprop: A Tensor of type float32, with format NHWC or NCHW, for the offset of "offset".
*@li x_backprop: A Tensor of type float16 or float32, with format NHWC or NCHW,
for the offset of "x".
*@li scale_backprop: A Tensor of type float32, with format NHWC or NCHW, for
the offset of "scale".
*@li offset_backprop: A Tensor of type float32, with format NHWC or NCHW, for
the offset of "offset".
*@li reserve_space_3: A Tensor of type float32, with format NHWC or NCHW. *@li reserve_space_3: A Tensor of type float32, with format NHWC or NCHW.
*@li reserve_space_4: A Tensor of type float32, with format NHWC or NCHW . \n *@li reserve_space_4: A Tensor of type float32, with format NHWC or NCHW . \n


@@ -290,14 +331,18 @@ REG_OP(BatchNormGradExt2)
*@brief Performs batch normalization . \n *@brief Performs batch normalization . \n


*@par Inputs: *@par Inputs:
*@li x: A 4D or 5D Tensor of type float16 or float32, with format NHWC or NCHW for 4D or NC1HWC0 for 5D.
*@li mean: A Tensor of type float32 or float16. Must be 1D if input "x" Specifies the mean used for inference.
*@li variance: A Tensor of type float32 or float16 . Must be 1D if input "x" Specifies the variance used for inference.
*@li x: A 4D or 5D Tensor of type float16 or float32, with format NHWC or NCHW
for 4D or NC1HWC0 for 5D.
*@li mean: A Tensor of type float32 or float16. Must be 1D if input "x"
Specifies the mean used for inference.
*@li variance: A Tensor of type float32 or float16 . Must be 1D if input "x"
Specifies the variance used for inference.
*@li momentum: A Tensor,represents the mean and the variance's scale factor *@li momentum: A Tensor,represents the mean and the variance's scale factor
*@li scale: An optional tensor of type float16 or float32, no use *@li scale: An optional tensor of type float16 or float32, no use
*@li offset: An optional tensor of type float16 or float32, no use *@li offset: An optional tensor of type float16 or float32, no use
*@par Attributes: *@par Attributes:
*@li epsilon: An optional float32, specifying the small value added to variance to avoid dividing by zero. Defaults to "0.00001".
*@li epsilon: An optional float32, specifying the small value added to variance
to avoid dividing by zero. Defaults to "0.00001".
*@li use_global_stats: mean inference mode , only can be "True". *@li use_global_stats: mean inference mode , only can be "True".
*@li mode: An optional input, not use *@li mode: An optional input, not use
*@par Outputs: *@par Outputs:
@@ -315,16 +360,20 @@ REG_OP(BNInference)
.ATTR(use_global_stats, Bool,true) .ATTR(use_global_stats, Bool,true)
.ATTR(mode, Int,1) .ATTR(mode, Int,1)
.OP_END_FACTORY_REG(BNInference) .OP_END_FACTORY_REG(BNInference)

/** /**
*@brief aicpu batch normalization host . \n *@brief aicpu batch normalization host . \n


*@par Inputs: *@par Inputs:


*@li mean: A Tensor of type float32 or float16. Must be 1D if input "x" Specifies the mean used for inference.
*@li variance: A Tensor of type float32 or float16 . Must be 1D if input "x" Specifies the variance used for inference.
*@li mean: A Tensor of type float32 or float16. Must be 1D if input "x"
Specifies the mean used for inference.
*@li variance: A Tensor of type float32 or float16 . Must be 1D if input "x"
Specifies the variance used for inference.
*@li momentum: An optional float, mean and variance's Scale factor *@li momentum: An optional float, mean and variance's Scale factor
*@par Attributes: *@par Attributes:
*@li epsilon: An optional float32, specifying the small value added to variance to avoid dividing by zero. Defaults to "0.00001".
*@li epsilon: An optional float32, specifying the small value added to variance
to avoid dividing by zero. Defaults to "0.00001".
*@li use_global_stats: mean inference mode , only can be "True". *@li use_global_stats: mean inference mode , only can be "True".
*@li mode: An optional attr, not use *@li mode: An optional attr, not use
*@par Outputs: *@par Outputs:
@@ -348,14 +397,19 @@ REG_OP(BnHost)
*@brief Performs batch normalization . \n *@brief Performs batch normalization . \n


*@par Inputs: *@par Inputs:
*@li x: A 4D or 5D Tensor of type float16 or float32, with format NHWC or NCHW for 4D or NC1HWC0 for 5D.
*@li mean: A Tensor of type float32 or float16. Must be 1D if input "x" Specifies the mean used for inference.
*@li variance: A Tensor of type float32 or float16 . Must be 1D if input "x" Specifies the variance used for inference.
*@li x: A 4D or 5D Tensor of type float16 or float32, with format NHWC or NCHW
for 4D or NC1HWC0 for 5D.
*@li mean: A Tensor of type float32 or float16. Must be 1D if input "x"
Specifies the mean used for inference.
*@li variance: A Tensor of type float32 or float16 . Must be 1D if input "x"
Specifies the variance used for inference.
*@li scale: An optional tensor of type float16 or float32, no use *@li scale: An optional tensor of type float16 or float32, no use
*@li offset: An optional tensor of type float16 or float32, no use *@li offset: An optional tensor of type float16 or float32, no use
*@par Attributes: *@par Attributes:
*@li momentum: An optional float32 num, represents the mean and the variance's scale factor
*@li epsilon: An optional float32, specifying the small value added to variance to avoid dividing by zero. Defaults to "0.00001".
*@li momentum: An optional float32 num, represents the mean and the variance's
scale factor
*@li epsilon: An optional float32, specifying the small value added to variance
to avoid dividing by zero. Defaults to "0.00001".
*@li use_global_stats: mean inference mode , only can be "True". *@li use_global_stats: mean inference mode , only can be "True".
*@li mode: An optional attr, not use *@li mode: An optional attr, not use
*@par Outputs: *@par Outputs:


+ 0
- 3
third_party/fwkacllib/inc/ops/nn_calculation_ops.h View File

@@ -310,9 +310,6 @@ REG_OP(DepthwiseConv2DBackpropInputD)
* @par Third-party framework compatibility * @par Third-party framework compatibility
* @li Compatible with the TensorFlow operator DepthwiseConv2D. * @li Compatible with the TensorFlow operator DepthwiseConv2D.
* @li Compatible with the Caffe operator DepthwiseConv2D. * @li Compatible with the Caffe operator DepthwiseConv2D.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
*/ */
REG_OP(DepthwiseConv2D) REG_OP(DepthwiseConv2D)
.INPUT(x, TensorType({DT_FLOAT16, DT_INT8})) .INPUT(x, TensorType({DT_FLOAT16, DT_INT8}))


+ 14
- 7
third_party/fwkacllib/inc/ops/nn_detect_ops.h View File

@@ -158,18 +158,25 @@ REG_OP(Iou)
*@par Inputs: *@par Inputs:
* Three inputs, including: * Three inputs, including:
*@li ydiff: A 5HD gradient input of type float32. *@li ydiff: A 5HD gradient input of type float32.
*@li rois: ROI position. A 2D Tensor of float32 with shape (N, 5). "N" indicates the number of ROIs,
the value "5" indicates the indexes of images where the ROIs are located, "x0", "x1", "y0", and "y1".
*@li rois_n: An optional input, specifying the number of valid ROIs. This parameter is reserved . \n
*@li rois: ROI position. A 2D Tensor of float32 with shape (N, 5). "N"
indicates the number of ROIs,
the value "5" indicates the indexes of images where the ROIs are located, "x0",
"x1", "y0", and "y1".
*@li rois_n: An optional input, specifying the number of valid ROIs. This
parameter is reserved . \n


*@par Attributes: *@par Attributes:
*@li xdiff_shape: A required list of 4 ints, obtained based on the shape of "features" of ROIAlign. *@li xdiff_shape: A required list of 4 ints, obtained based on the shape of "features" of ROIAlign.
*@li pooled_width: A required attribute of type int, specifying the W dimension. *@li pooled_width: A required attribute of type int, specifying the W dimension.
*@li pooled_height: A required attribute of type int, specifying the H dimension. *@li pooled_height: A required attribute of type int, specifying the H dimension.
*@li spatial_scale: A required attribute of type float, specifying the scaling ratio of "features" to the original image.
*@li sample_num: An optional attribute of type int, specifying the horizontal and vertical
sampling frequency of each output. If this attribute is set to "0", the sampling frequency is
equal to the rounded up value of "rois", which is a floating point number. Defaults to "2" . \n
*@li spatial_scale: A required attribute of type float, specifying the scaling
ratio of "features" to the original image.
*@li sample_num: An optional attribute of type int, specifying the horizontal
and vertical
sampling frequency of each output. If this attribute is set to "0", the
sampling frequency is
equal to the rounded up value of "rois", which is a floating point number.
Defaults to "2" . \n


*@par Outputs: *@par Outputs:
*xdiff: Gradient added to input "features". Has the same 5HD shape as input "features". *xdiff: Gradient added to input "features". Has the same 5HD shape as input "features".


+ 0
- 22
third_party/fwkacllib/inc/ops/nn_norm_ops.h View File

@@ -924,29 +924,7 @@ REG_OP(InstanceNormV2)
.ATTR(epsilon, Float, 0.00001) .ATTR(epsilon, Float, 0.00001)
.OP_END_FACTORY_REG(InstanceNormV2) .OP_END_FACTORY_REG(InstanceNormV2)


/**
*@brief Performs instance normalization for inference.

*@par Inputs:\n
* Five inputs, including: (NC1HWC0 supported)
*@li x: A Tensor of type float16 or float32.
*@li gamma: A [N, C1, 1, 1, C0] Tensor of type float32, for the scaling gamma.
*@li beta: A [N, C1, 1, 1, C0] Tensor of type float32, for the scaling beta.
*@li mean: A [N, C1, 1, 1, C0] ensor of type float32, for the mean.
*@li variance: A [N, C1, 1, 1, C0] Tensor of type float32, for the variance.
*@li variance_sqrt: A [N, C1, 1, 1, C0] Tensor of type float32, for the variance_sqrt.

*@par Outputs:\n
*y: A Tensor of type float16 or float32 for the normalized "x".
*batch_mean: A Tensor of type float32 for the result mean.
*batch_ variance: A Tensor of type float32 for the result variance.

*@attention Constraints:
*For Ascend 310, the result accuracy fails to reach 1<89> due to the square root instruction.


* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use INInferV2 instead.
*/
REG_OP(INInferV2D) REG_OP(INInferV2D)
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
.OPTIONAL_INPUT(gamma, TensorType({DT_FLOAT})) .OPTIONAL_INPUT(gamma, TensorType({DT_FLOAT}))


+ 0
- 3
third_party/fwkacllib/inc/ops/nn_pooling_ops.h View File

@@ -168,9 +168,6 @@ REG_OP(AvgPoolV2)


*@par Third-party framework compatibility *@par Third-party framework compatibility
* Compatible with the TensorFlow operator AvgPool3D. * Compatible with the TensorFlow operator AvgPool3D.
*
* @par Restrictions:
*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
*/ */
REG_OP(AvgPool3D) REG_OP(AvgPool3D)
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE})) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE}))


+ 0
- 39
third_party/fwkacllib/inc/ops/nn_training_ops.h View File

@@ -111,9 +111,6 @@ REG_OP(ApplyAdaMax)
* *
*@par Third-party framework compatibility *@par Third-party framework compatibility
*Compatible with the TensorFlow operator ApplyAdaMax. *Compatible with the TensorFlow operator ApplyAdaMax.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use ApplyAdaMax instead.
*/ */
REG_OP(ApplyAdaMaxD) REG_OP(ApplyAdaMaxD)
.INPUT(var, TensorType::NumberType()) .INPUT(var, TensorType::NumberType())
@@ -352,9 +349,6 @@ REG_OP(ApplyMomentum)
* accum: A mutable tensor. Has the same type as input "accum". * accum: A mutable tensor. Has the same type as input "accum".
*@par Third-party framework compatibility *@par Third-party framework compatibility
*Compatible with the TensorFlow operator ApplyMomentum. *Compatible with the TensorFlow operator ApplyMomentum.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use ApplyMomentum instead.
*/ */


REG_OP(ApplyMomentumD) REG_OP(ApplyMomentumD)
@@ -681,9 +675,6 @@ REG_OP(ApplyPowerSign)
* *
*@par Third-party framework compatibility *@par Third-party framework compatibility
*Compatible with the TensorFlow operator ApplyPowerSign. *Compatible with the TensorFlow operator ApplyPowerSign.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use ApplyPowerSign instead.
*/ */
REG_OP(ApplyPowerSignD) REG_OP(ApplyPowerSignD)
.INPUT(var, TensorType::NumberType()) .INPUT(var, TensorType::NumberType())
@@ -804,9 +795,6 @@ REG_OP(ApplyAddSign)


*@par Third-party framework compatibility *@par Third-party framework compatibility
* Compatible with the TensorFlow operator ApplyAddSign. * Compatible with the TensorFlow operator ApplyAddSign.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use ApplyAddSign instead.
*/ */
REG_OP(ApplyAddSignD) REG_OP(ApplyAddSignD)
.INPUT(var, TensorType::NumberType()) .INPUT(var, TensorType::NumberType())
@@ -928,9 +916,6 @@ REG_OP(ApplyCenteredRMSProp)


*@par Third-party framework compatibility *@par Third-party framework compatibility
*Compatible with the TensorFlow operator ApplyCenteredRMSPropD. *Compatible with the TensorFlow operator ApplyCenteredRMSPropD.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use ApplyCenteredRMSProp instead.
*/ */
REG_OP(ApplyCenteredRMSPropD) REG_OP(ApplyCenteredRMSPropD)
.INPUT(var, TensorType::NumberType()) .INPUT(var, TensorType::NumberType())
@@ -1049,9 +1034,6 @@ REG_OP(ApplyAdagrad)
* *
*@par Third-party framework compatibility *@par Third-party framework compatibility
*Compatible with the TensorFlow operator ApplyAdagrad. *Compatible with the TensorFlow operator ApplyAdagrad.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use ApplyAdagrad instead.
*/ */
REG_OP(ApplyAdagradD) REG_OP(ApplyAdagradD)
.INPUT(var, TensorType::NumberType()) .INPUT(var, TensorType::NumberType())
@@ -1236,9 +1218,6 @@ REG_OP(ApplyAdagradDA)


*@par Third-party framework compatibility *@par Third-party framework compatibility
*Compatible with the TensorFlow operator ApplyAdagradDA. *Compatible with the TensorFlow operator ApplyAdagradDA.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use ApplyAdagradDA instead.
*/ */
REG_OP(ApplyAdagradDAD) REG_OP(ApplyAdagradDAD)
.INPUT(var, TensorType::NumberType()) .INPUT(var, TensorType::NumberType())
@@ -1496,9 +1475,6 @@ REG_OP(ApplyProximalAdagrad)


*@par Third-party framework compatibility *@par Third-party framework compatibility
*Compatible with the TensorFlow operator ApplyProximalAdagradD. *Compatible with the TensorFlow operator ApplyProximalAdagradD.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use ApplyProximalAdagrad instead.
*/ */
REG_OP(ApplyProximalAdagradD) REG_OP(ApplyProximalAdagradD)
.INPUT(var, TensorType::NumberType()) .INPUT(var, TensorType::NumberType())
@@ -1592,9 +1568,6 @@ REG_OP(SparseApplyProximalAdagrad)


*@par Third-party framework compatibility *@par Third-party framework compatibility
*Compatible with the TensorFlow operator SparseApplyProximalAdagrad. *Compatible with the TensorFlow operator SparseApplyProximalAdagrad.

* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use SparseApplyProximalAdagrad instead.
*/ */
REG_OP(SparseApplyProximalAdagradD) REG_OP(SparseApplyProximalAdagradD)
.INPUT(var, TensorType::NumberType()) .INPUT(var, TensorType::NumberType())
@@ -1681,9 +1654,6 @@ REG_OP(ApplyFtrl)


*@par Third-party framework compatibility *@par Third-party framework compatibility
*Compatible with the TensorFlow operator ApplyFtrl. *Compatible with the TensorFlow operator ApplyFtrl.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use ApplyFtrl instead.
*/ */
REG_OP(ApplyFtrlD) REG_OP(ApplyFtrlD)
.INPUT(var, TensorType::NumberType()) .INPUT(var, TensorType::NumberType())
@@ -1775,9 +1745,6 @@ REG_OP(ApplyFtrlV2)


*@par Third-party framework compatibility *@par Third-party framework compatibility
*Compatible with the TensorFlow operator ApplyFtrlV2. *Compatible with the TensorFlow operator ApplyFtrlV2.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use ApplyFtrlV2 instead.
*/ */
REG_OP(ApplyFtrlV2D) REG_OP(ApplyFtrlV2D)
.INPUT(var, TensorType::NumberType()) .INPUT(var, TensorType::NumberType())
@@ -1890,9 +1857,6 @@ REG_OP(ApplyAdam)


*@par Third-party framework compatibility *@par Third-party framework compatibility
*Compatible with the TensorFlow operator ApplyAdam. *Compatible with the TensorFlow operator ApplyAdam.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use ApplyAdam instead.
*/ */
REG_OP(ApplyAdamD) REG_OP(ApplyAdamD)
.INPUT(var, TensorType::NumberType()) .INPUT(var, TensorType::NumberType())
@@ -1981,9 +1945,6 @@ REG_OP(ApplyAdadelta)


*@par Third-party framework compatibility *@par Third-party framework compatibility
* Compatible with the TensorFlow operator ApplyAdadelta. * Compatible with the TensorFlow operator ApplyAdadelta.

* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use ApplyAdadelta instead.
*/ */
REG_OP(ApplyAdadeltaD) REG_OP(ApplyAdadeltaD)
.INPUT(var, TensorType::NumberType()) .INPUT(var, TensorType::NumberType())


+ 0
- 12
third_party/fwkacllib/inc/ops/pad_ops.h View File

@@ -65,9 +65,6 @@ REG_OP(Fill)
* *
*@par Outputs: *@par Outputs:
* y: A tensor. Has the same type as "value". * y: A tensor. Has the same type as "value".
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use Fill instead.
*/ */
REG_OP(FillD) REG_OP(FillD)
.INPUT(value, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, .INPUT(value, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16,
@@ -125,9 +122,6 @@ REG_OP(BroadcastTo)
* *
*@par Third-party framework compatibility *@par Third-party framework compatibility
*Compatible with the TensorFlow operator BroadcastTo. *Compatible with the TensorFlow operator BroadcastTo.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use BroadcastTo instead.
*/ */
REG_OP(BroadcastToD) REG_OP(BroadcastToD)
.INPUT(x, TensorType::BasicType()) .INPUT(x, TensorType::BasicType())
@@ -175,9 +169,6 @@ REG_OP(Pad)


*@par Third-party framework compatibility: *@par Third-party framework compatibility:
* Compatible with TensorFlow operator Pad. * Compatible with TensorFlow operator Pad.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use Pad instead.
*/ */
REG_OP(PadD) REG_OP(PadD)
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8, DT_FLOAT})) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8, DT_FLOAT}))
@@ -272,9 +263,6 @@ REG_OP(PadV3D)
*@see Diag() *@see Diag()
*@par Third-party framework compatibility *@par Third-party framework compatibility
* Compatible with the TensorFlow operator Diag. * Compatible with the TensorFlow operator Diag.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use Diag instead.
*/ */
REG_OP(DiagD) REG_OP(DiagD)
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))


+ 2
- 2
third_party/fwkacllib/inc/ops/ragged_conversion_ops.h View File

@@ -30,7 +30,7 @@ namespace ge {
*@par Inputs: *@par Inputs:
*Two inputs, including: *Two inputs, including:
*@li rt_nested_splits: A list of at least 1 Tensor objects with the same type *@li rt_nested_splits: A list of at least 1 Tensor objects with the same type
in: int32, int64. The row_splits for the RaggedTensor. It's a dynamic input.
in: int32, int64. The row_splits for the RaggedTensor.
*@li rt_dense_values: A Tensor. The flat_values for the RaggedTensor *@li rt_dense_values: A Tensor. The flat_values for the RaggedTensor
Must be one of the following types: bool, int8, int16, uint16, int32, Must be one of the following types: bool, int8, int16, uint16, int32,
int64, double, float, float16 . \n int64, double, float, float16 . \n
@@ -66,7 +66,7 @@ REG_OP(RaggedTensorToSparse)
*@li values:A 1D tensor representing the values of the ragged tensor. *@li values:A 1D tensor representing the values of the ragged tensor.
*@li default_value:A `Tensor`. Must have the same type as `values`. *@li default_value:A `Tensor`. Must have the same type as `values`.
*@li row_partition_tensors:A list of at least 1 `Tensor` objects with the same *@li row_partition_tensors:A list of at least 1 `Tensor` objects with the same
type in: `int64`, `int32` . It's a dynamic input.\n
type in: `int64`, `int32` .\n


*@par Attributes: *@par Attributes:
*@li num_row_partition_tensors:Numbers of row partition tensors. *@li num_row_partition_tensors:Numbers of row partition tensors.


+ 0
- 3
third_party/fwkacllib/inc/ops/random_ops.h View File

@@ -374,9 +374,6 @@ REG_OP(DropOutGenMask)


*@par Third-party framework compatibility *@par Third-party framework compatibility
* Compatible with the TensorFlow operator lin_space. * Compatible with the TensorFlow operator lin_space.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use LinSpace instead.
*/ */
REG_OP(LinSpaceD) REG_OP(LinSpaceD)
.INPUT(assist, TensorType({DT_FLOAT})) .INPUT(assist, TensorType({DT_FLOAT}))


+ 0
- 24
third_party/fwkacllib/inc/ops/reduce_ops.h View File

@@ -353,9 +353,6 @@ REG_OP(ReduceSum)


*@par Third-party framework compatibility *@par Third-party framework compatibility
* Compatible with the TensorFlow operator Sum. * Compatible with the TensorFlow operator Sum.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use ReduceSum instead.
*/ */
REG_OP(ReduceSumD) REG_OP(ReduceSumD)
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
@@ -381,9 +378,6 @@ REG_OP(ReduceSumD)


*@par Third-party framework compatibility *@par Third-party framework compatibility
* Compatible with the TensorFlow operator ReduceAll. * Compatible with the TensorFlow operator ReduceAll.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use ReduceAll instead.
*/ */
REG_OP(ReduceAllD) REG_OP(ReduceAllD)
.INPUT(x, TensorType({DT_BOOL})) .INPUT(x, TensorType({DT_BOOL}))
@@ -459,9 +453,6 @@ REG_OP(ReduceProd)


*@par Third-party framework compatibility *@par Third-party framework compatibility
* Compatible with the TensorFlow operator ReduceProd. * Compatible with the TensorFlow operator ReduceProd.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use ReduceProd instead.
*/ */
REG_OP(ReduceProdD) REG_OP(ReduceProdD)
.INPUT(x,TensorType({DT_FLOAT, DT_UINT8, DT_INT8, DT_INT32, DT_FLOAT16})) .INPUT(x,TensorType({DT_FLOAT, DT_UINT8, DT_INT8, DT_INT32, DT_FLOAT16}))
@@ -516,9 +507,6 @@ REG_OP(ReduceMean)


*@par Third-party framework compatibility: *@par Third-party framework compatibility:
* Compatible with the TensorFlow operator ReduceMean. * Compatible with the TensorFlow operator ReduceMean.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use ReduceMean instead.
*/ */
REG_OP(ReduceMeanD) REG_OP(ReduceMeanD)
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
@@ -573,9 +561,6 @@ REG_OP(ReduceMax)


*@par Third-party framework compatibility *@par Third-party framework compatibility
* Compatible with TensorFlow operator Max. * Compatible with TensorFlow operator Max.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use ReduceMax instead.
*/ */
REG_OP(ReduceMaxD) REG_OP(ReduceMaxD)
.INPUT(x, TensorType({DT_FLOAT, DT_UINT8, DT_INT8, .INPUT(x, TensorType({DT_FLOAT, DT_UINT8, DT_INT8,
@@ -630,9 +615,6 @@ REG_OP(ReduceMin)


*@par Third-party framework compatibility *@par Third-party framework compatibility
* Compatible with the TensorFlow operator reduce_min. * Compatible with the TensorFlow operator reduce_min.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use ReduceMin instead.
*/ */
REG_OP(ReduceMinD) REG_OP(ReduceMinD)
.INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT8,DT_UINT8})) .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT8,DT_UINT8}))
@@ -699,9 +681,6 @@ REG_OP(ReduceAny)
* *
*@par Third-party framework compatibility *@par Third-party framework compatibility
*Compatible with the TensorFlow operator reduce_any. *Compatible with the TensorFlow operator reduce_any.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use ReduceAny instead.
*/ */
REG_OP(ReduceAnyD) REG_OP(ReduceAnyD)
.INPUT(x, TensorType({DT_BOOL})) .INPUT(x, TensorType({DT_BOOL}))
@@ -787,9 +766,6 @@ REG_OP(EuclideanNorm)


*@par Third-party framework compatibility *@par Third-party framework compatibility
* Compatible with the TensorFlow operator EuclideanNorm. * Compatible with the TensorFlow operator EuclideanNorm.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use EuclideanNorm instead.
*/ */
REG_OP(EuclideanNormD) REG_OP(EuclideanNormD)
.INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_FLOAT16})) .INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_FLOAT16}))


+ 2
- 1
third_party/fwkacllib/inc/ops/rnn.h View File

@@ -92,6 +92,7 @@ REG_OP(DynamicLSTM)
.OUTPUT(output_h, TensorType({DT_FLOAT32})) .OUTPUT(output_h, TensorType({DT_FLOAT32}))
.OP_END_FACTORY_REG(DynamicLSTM) .OP_END_FACTORY_REG(DynamicLSTM)



/** /**
*@brief: DynamicRNNGrad calculation. *@brief: DynamicRNNGrad calculation.
*@par Inputs: *@par Inputs:
@@ -126,7 +127,7 @@ REG_OP(DynamicLSTM)
*@li keep_prob:An float identifying the keep prob in the op. Default to 1. *@li keep_prob:An float identifying the keep prob in the op. Default to 1.
*@li cell_clip:An float identifying the cell clip in the op. Default to -1. *@li cell_clip:An float identifying the cell clip in the op. Default to -1.
*@li num_proj:An integer identifying the num projection in the op. Default to 0. *@li num_proj:An integer identifying the num projection in the op. Default to 0.
*@li time_major:An bool identifying the time major in the op. Default to false.
*@li time_major:An bool identifying the time major in the op. Default to true.
*@li activation:An string identifying the type of activation function in the op. Default to "tanh". Only tanh is currently supported. *@li activation:An string identifying the type of activation function in the op. Default to "tanh". Only tanh is currently supported.
*@li forget_bias:An float identifying the forget bias in the op. Default to 0. *@li forget_bias:An float identifying the forget bias in the op. Default to 0.
*@li is_training:An bool identifying is training in the op. Default to true. *@li is_training:An bool identifying is training in the op. Default to true.


+ 1
- 1
third_party/fwkacllib/inc/ops/save_ops.h View File

@@ -28,7 +28,7 @@ namespace ge {
/** /**
*@brief Mark which tensors need to be saved to the ckpt file. *@brief Mark which tensors need to be saved to the ckpt file.
*@par Inputs: *@par Inputs:
*tensors: A list of input tensor.It's a dynamic input.
*tensors: A list of input tensor.
*@par Restrictions: *@par Restrictions:
*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
*/ */


+ 6
- 7
third_party/fwkacllib/inc/ops/sdca_ops.h View File

@@ -35,16 +35,16 @@ namespace ge {
*rate . \n *rate . \n


*@par Inputs: *@par Inputs:
*@li sparse_example_indices: a list of vectors which contain example indices.It's a dynamic input.
*@li sparse_feature_indices: a list of vectors which contain feature indices.It's a dynamic input.
*@li sparse_feature_values: a list of vectors which contains feature value associated with each feature group.It's a dynamic input.
*@li dense_features: a list of matrices which contains the dense feature values.It's a dynamic input.
*@li sparse_example_indices: a list of vectors which contain example indices.
*@li sparse_feature_indices: a list of vectors which contain feature indices.
*@li sparse_feature_values: a list of vectors which contains feature value associated with each feature group.
*@li dense_features: a list of matrices which contains the dense feature values.
*@li example_weights: a vector which contains the weight associated with each example. *@li example_weights: a vector which contains the weight associated with each example.
*@li example_labels: a vector which contains the label/target associated with each example. *@li example_labels: a vector which contains the label/target associated with each example.
*@li sparse_indices: a list of vectors where each value is the indices which has *@li sparse_indices: a list of vectors where each value is the indices which has
*corresponding weights in sparse_weights. This field maybe omitted for the dense approach.It's a dynamic input.
*corresponding weights in sparse_weights. This field maybe omitted for the dense approach.
*@li sparse_weights: a list of vectors where each value is the weight associated with a sparse feature group. *@li sparse_weights: a list of vectors where each value is the weight associated with a sparse feature group.
*@li dense_weights: a list of vectors where the values are the weights associated with a dense feature group.It's a dynamic input.
*@li dense_weights: a list of vectors where the values are the weights associated with a dense feature group.
*@li example_state_data: a list of vectors containing the example state data. *@li example_state_data: a list of vectors containing the example state data.
*@li loss_type: Type of the primal loss. Currently SdcaSolver supports logistic, squared and hinge losses. *@li loss_type: Type of the primal loss. Currently SdcaSolver supports logistic, squared and hinge losses.
*@li l1: Symmetric l1 regularization strength. *@li l1: Symmetric l1 regularization strength.
@@ -61,7 +61,6 @@ namespace ge {
*@par Third-party framework compatibility *@par Third-party framework compatibility
* Compatible with tensorflow SdcaOptimizerV2 operator. * Compatible with tensorflow SdcaOptimizerV2 operator.
*/ */

REG_OP(SdcaOptimizerV2) REG_OP(SdcaOptimizerV2)
.DYNAMIC_INPUT(sparse_example_indices, TensorType({DT_INT64})) .DYNAMIC_INPUT(sparse_example_indices, TensorType({DT_INT64}))
.DYNAMIC_INPUT(sparse_feature_indices, TensorType({DT_INT64})) .DYNAMIC_INPUT(sparse_feature_indices, TensorType({DT_INT64}))


+ 2
- 50
third_party/fwkacllib/inc/ops/selection_ops.h View File

@@ -79,9 +79,6 @@ REG_OP(Range)


*@see Range() *@see Range()
*@since V100R001C33 *@since V100R001C33
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use Range instead.
*/ */
REG_OP(RangeD) REG_OP(RangeD)
.INPUT(x, TensorType({DT_FLOAT,DT_INT32})) .INPUT(x, TensorType({DT_FLOAT,DT_INT32}))
@@ -226,9 +223,6 @@ REG_OP(GatherV2)


*@par Third-party framework compatibility *@par Third-party framework compatibility
* Compatible with the TensorFlow operator GatherV2. * Compatible with the TensorFlow operator GatherV2.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use GatherV2 instead.
*/ */
REG_OP(GatherV2D) REG_OP(GatherV2D)
.INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_UINT32, DT_INT8, DT_UINT8, .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_UINT32, DT_INT8, DT_UINT8,
@@ -331,9 +325,6 @@ REG_OP(StridedSlice)


*@par Third-party framework compatibility *@par Third-party framework compatibility
* Compatible with the TensorFlow operator StridedSlice. * Compatible with the TensorFlow operator StridedSlice.

* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use StridedSlice instead.
*/ */
REG_OP(StridedSliceD) REG_OP(StridedSliceD)
.INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_UINT8, DT_INT8, .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_UINT8, DT_INT8,
@@ -389,9 +380,6 @@ REG_OP(StridedSliceD)


*@par Third-party framework compatibility *@par Third-party framework compatibility
* Compatible with the TensorFlow operator StridedSliceGradD. * Compatible with the TensorFlow operator StridedSliceGradD.

* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use StridedSliceGrad instead.
*/ */
REG_OP(StridedSliceGradD) REG_OP(StridedSliceGradD)
.INPUT(dy, TensorType::BasicType()) .INPUT(dy, TensorType::BasicType())
@@ -503,9 +491,6 @@ REG_OP(UnsortedSegmentSum)


*@par Third-party framework compatibility *@par Third-party framework compatibility
* Compatible with the TensorFlow operator UnsortedSegmentSum. * Compatible with the TensorFlow operator UnsortedSegmentSum.

* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use UnsortedSegmentSum instead.
*/ */
REG_OP(UnsortedSegmentSumD) REG_OP(UnsortedSegmentSumD)
.INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8, DT_UINT8})) .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8, DT_UINT8}))
@@ -730,9 +715,6 @@ REG_OP(OneHot)


*@par Third-party framework compatibility: *@par Third-party framework compatibility:
* Compatible with the TensorFlow operator OneHot. * Compatible with the TensorFlow operator OneHot.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use OneHot instead.
*/ */
REG_OP(OneHotD) REG_OP(OneHotD)
.INPUT(x, TensorType({DT_UINT8, DT_INT32})) .INPUT(x, TensorType({DT_UINT8, DT_INT32}))
@@ -808,7 +790,7 @@ REG_OP(SliceD)
* @li assist_seq: A 1D tensor of type float16. * @li assist_seq: A 1D tensor of type float16.
* with size of 2N, which "N" is the last dimension. * with size of 2N, which "N" is the last dimension.
* The first N numbers is indices, and the next N numbers is deviation of casting * The first N numbers is indices, and the next N numbers is deviation of casting
* int32 to float16. \n
* float16 to int32 . \n


* @par Attributes: * @par Attributes:
* @li k: A required int that is at least 0, specifying the number of top elements * @li k: A required int that is at least 0, specifying the number of top elements
@@ -817,7 +799,7 @@ REG_OP(SliceD)
* If true, the resulting "k" elements will be sorted by the values in descending * If true, the resulting "k" elements will be sorted by the values in descending
* order. * order.
* @li dim: An optional int. Defaults to -1. For reserved use. * @li dim: An optional int. Defaults to -1. For reserved use.
* @li largest: An optional bool. Defaults to true. For reserved use. \n
* @li largest: An optional bool. Defaults to true. For reserved use.


* @par Outputs: * @par Outputs:
* @li values: A Tensor, specifying the sorted data. Has the same type as "input". * @li values: A Tensor, specifying the sorted data. Has the same type as "input".
@@ -1280,9 +1262,6 @@ REG_OP(InplaceUpdate)


*@par Third-party framework compatibility *@par Third-party framework compatibility
*Compatible with the TensorFlow operator InplaceUpdate. *Compatible with the TensorFlow operator InplaceUpdate.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use InplaceUpdate instead.
*/ */
REG_OP(InplaceUpdateD) REG_OP(InplaceUpdateD)
.INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32})) .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
@@ -1335,9 +1314,6 @@ REG_OP(InplaceAdd)


*@par Third-party framework compatibility *@par Third-party framework compatibility
*Compatible with the TensorFlow operator InplaceAdd. *Compatible with the TensorFlow operator InplaceAdd.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use InplaceAdd instead.
*/ */
REG_OP(InplaceAddD) REG_OP(InplaceAddD)
.INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32})) .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
@@ -1389,9 +1365,6 @@ REG_OP(InplaceSub)


*@par Third-party framework compatibility *@par Third-party framework compatibility
*Compatible with the TensorFlow operator InplaceSub. *Compatible with the TensorFlow operator InplaceSub.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use InplaceSub instead.
*/ */
REG_OP(InplaceSubD) REG_OP(InplaceSubD)
.INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32})) .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
@@ -1443,9 +1416,6 @@ REG_OP(ScatterNonAliasingAdd)
* @par Outputs: * @par Outputs:
* y: A Tensor of type RealNumberType . \n * y: A Tensor of type RealNumberType . \n


* @attention Constraints:
* @li segment_ids must be non-negative tensor.

* @see UnsortedSegmentSum(), UnsortedSegmentProd(), * @see UnsortedSegmentSum(), UnsortedSegmentProd(),


* @par Third-party framework compatibility * @par Third-party framework compatibility
@@ -1473,9 +1443,6 @@ REG_OP(UnsortedSegmentMin)
* @par Outputs: * @par Outputs:
* y: A Tensor.Must have the same type as input "x" . \n * y: A Tensor.Must have the same type as input "x" . \n


* @attention Constraints:
* @li segment_ids must be non-negative tensor.

* @see UnsortedSegmentProdD(), UnsortedSegmentSumD(), * @see UnsortedSegmentProdD(), UnsortedSegmentSumD(),
* *
* @par Restrictions: * @par Restrictions:
@@ -1501,9 +1468,6 @@ REG_OP(UnsortedSegmentMinD)
* @par Outputs: * @par Outputs:
* y: A Tensor of type RealNumberType . \n * y: A Tensor of type RealNumberType . \n


* @attention Constraints:
* @li segment_ids must be non-negative tensor.

* @see UnsortedSegmentSum(), UnsortedSegmentProd(), * @see UnsortedSegmentSum(), UnsortedSegmentProd(),


* @par Third-party framework compatibility * @par Third-party framework compatibility
@@ -1531,9 +1495,6 @@ REG_OP(UnsortedSegmentMax)
* @par Outputs: * @par Outputs:
* y: A Tensor.Must have the same type as input "x" . \n * y: A Tensor.Must have the same type as input "x" . \n


* @attention Constraints:
* @li segment_ids must be non-negative tensor.

* @see UnsortedSegmentProdD(), * @see UnsortedSegmentProdD(),
* *
* @par Restrictions: * @par Restrictions:
@@ -1558,9 +1519,6 @@ REG_OP(UnsortedSegmentMaxD)
* @par Outputs: * @par Outputs:
* y: A Tensor of type NumberType . \n * y: A Tensor of type NumberType . \n


* @attention Constraints:
* @li segment_ids must be non-negative tensor.

* @see UnsortedSegmentSum(), UnsortedSegmentMin(), * @see UnsortedSegmentSum(), UnsortedSegmentMin(),


* @par Third-party framework compatibility * @par Third-party framework compatibility
@@ -1592,9 +1550,6 @@ REG_OP(UnsortedSegmentProd)
* @li segment_ids must be non-negative tensor. * @li segment_ids must be non-negative tensor.


* @see UnsortedSegmentMinD() * @see UnsortedSegmentMinD()
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use UnsortedSegmentProd instead.
*/ */
REG_OP(UnsortedSegmentProdD) REG_OP(UnsortedSegmentProdD)
.INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT16})) .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT16}))
@@ -1910,9 +1865,6 @@ REG_OP(CumulativeLogsumexp)
*y: A Tensor. Has the same type as "x". *y: A Tensor. Has the same type as "x".
*@par Third-party framework compatibility *@par Third-party framework compatibility
* Compatible with the TensorFlow operator Cumsum. * Compatible with the TensorFlow operator Cumsum.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use CumulativeLogsumexp instead.
*/ */
REG_OP(CumulativeLogsumexpD) REG_OP(CumulativeLogsumexpD)
.INPUT(x, TensorType({DT_DOUBLE, DT_FLOAT, DT_FLOAT16})) .INPUT(x, TensorType({DT_DOUBLE, DT_FLOAT, DT_FLOAT16}))


+ 4
- 11
third_party/fwkacllib/inc/ops/split_combination_ops.h View File

@@ -75,9 +75,6 @@ REG_OP(Split)


*@par Third-party framework compatibility *@par Third-party framework compatibility
* Compatible with the TensorFlow operator Split. * Compatible with the TensorFlow operator Split.

* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use Split instead.
*/ */
REG_OP(SplitD) REG_OP(SplitD)
.INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, .INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
@@ -144,9 +141,6 @@ Under the caffe framework, the conversion of slice_point through the cut point t
Under the caffe framework,size_splits or axis transformat to split_dim.Only one can effect. Under the caffe framework,size_splits or axis transformat to split_dim.Only one can effect.
*@par Third-party framework compatibility *@par Third-party framework compatibility
* Compatible with the TensorFlow operator SplitV. * Compatible with the TensorFlow operator SplitV.

* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use SplitV instead.
*/ */
REG_OP(SplitVD) REG_OP(SplitVD)
.INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, .INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
@@ -164,8 +158,7 @@ REG_OP(SplitVD)
* Two inputs, including: * Two inputs, including:
* @li values: A list of Tensors. Must be one of the following types: int8, int16, int32, * @li values: A list of Tensors. Must be one of the following types: int8, int16, int32,
* int64, uint8, uint16, uint32, uint64, float16, float32. * int64, uint8, uint16, uint32, uint64, float16, float32.
* Tensors to be concatenated. All must have size 1 in the first dimension and same shape.
* It's a dynamic input.
* Tensors to be concatenated. All must have size 1 in the first dimension and same shape.
* @li shape: A Tensor of the same type as "x". * @li shape: A Tensor of the same type as "x".
* The final shape of the result. Should be equal to the shapes of any input * The final shape of the result. Should be equal to the shapes of any input
* but with the number of input values in the first dimension . \n * but with the number of input values in the first dimension . \n
@@ -314,7 +307,7 @@ REG_OP(Concat)


*@par Inputs: *@par Inputs:
* x: A list of N Tensors. Must be one of the following types: int8, int16, int32, * x: A list of N Tensors. Must be one of the following types: int8, int16, int32,
* int64, uint8, uint16, uint32, uint64, float16, float32, bool . It's a dynamic input. \n
* int64, uint8, uint16, uint32, uint64, float16, float32, bool . \n


*@par Attributes: *@par Attributes:
*@li axis: A optional int, defaultvalue is 0. *@li axis: A optional int, defaultvalue is 0.
@@ -340,7 +333,7 @@ REG_OP(Pack)
*@par Inputs: *@par Inputs:
*Two inputs, including: *Two inputs, including:
* @li concat_dim: A Tensor of type int32. * @li concat_dim: A Tensor of type int32.
* @li x: A list of 1D Tensor objects of type int32 . It's a dynamic input. \n
* @li x: A list of 1D Tensor objects of type int32 . \n


*@par Attributes: *@par Attributes:
*N: A required int . \n *N: A required int . \n
@@ -364,7 +357,7 @@ REG_OP(ConcatOffset)
*@par Inputs: *@par Inputs:
*Two inputs, including: *Two inputs, including:
* @li concat_dim: A Tensor of type int32. * @li concat_dim: A Tensor of type int32.
* @li x: A list of 1D Tensor objects of type int32 . It's a dynamic input. \n
* @li x: A list of 1D Tensor objects of type int32 . \n


*@par Attributes: *@par Attributes:
*@li Concat_dim: A required int. Must be within the rank of input "x". *@li Concat_dim: A required int. Must be within the rank of input "x".


+ 0
- 16
third_party/fwkacllib/inc/ops/transformation_ops.h View File

@@ -235,12 +235,8 @@ REG_OP(BatchToSpaceND)
*@par Outputs: *@par Outputs:
*y: A Tensor with format NC1HWC0. Has the same type as input "x". *y: A Tensor with format NC1HWC0. Has the same type as input "x".



*@par Third-party framework compatibility *@par Third-party framework compatibility
* Compatible with the TensorFlow operator BatchToSpaceND. * Compatible with the TensorFlow operator BatchToSpaceND.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use BatchToSpaceND instead.
*/ */
REG_OP(BatchToSpaceNDD) REG_OP(BatchToSpaceNDD)
.INPUT(x, TensorType::BasicType()) .INPUT(x, TensorType::BasicType())
@@ -287,9 +283,6 @@ REG_OP(SpaceToBatchND)


*@par Third-party framework compatibility *@par Third-party framework compatibility
* Compatible with the TensorFlow operator SpaceToBatchND. * Compatible with the TensorFlow operator SpaceToBatchND.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use SpaceToBatchND instead.
*/ */
REG_OP(SpaceToBatchNDD) REG_OP(SpaceToBatchNDD)
.INPUT(x, TensorType::BasicType()) .INPUT(x, TensorType::BasicType())
@@ -411,9 +404,6 @@ REG_OP(BatchToSpace)


*@par Third-party framework compatibility *@par Third-party framework compatibility
* Compatible with the TensorFlow operator BatchToSpace. * Compatible with the TensorFlow operator BatchToSpace.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use BatchToSpace instead.
*/ */
REG_OP(BatchToSpaceD) REG_OP(BatchToSpaceD)
.INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8, .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8,
@@ -467,9 +457,6 @@ REG_OP(SpaceToBatch)
*y: A Tensor. Has the same type as input "x". *y: A Tensor. Has the same type as input "x".
*@par Third-party framework compatibility *@par Third-party framework compatibility
*@ Compatible with the TensorFlow operator SpaceToBatch. *@ Compatible with the TensorFlow operator SpaceToBatch.
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use SpaceToBatch instead.
*/ */
REG_OP(SpaceToBatchD) REG_OP(SpaceToBatchD)
.INPUT(x, TensorType::BasicType()) .INPUT(x, TensorType::BasicType())
@@ -598,9 +585,6 @@ REG_OP(ExtractVolumePatches)


*@par Outputs: *@par Outputs:
*y: A Tensor. Has the same type as "x". *y: A Tensor. Has the same type as "x".
*
* @par Restrictions:
* Warning: THIS FUNCTION IS DEPRECATED. Please use ConfusionTranspose instead.
*/ */
REG_OP(ConfusionTransposeD) REG_OP(ConfusionTransposeD)
.INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, .INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8,


+ 14
- 8
third_party/fwkacllib/inc/toolchain/adx_datadump_server.h View File

@@ -1,12 +1,18 @@
/** /**
* @file adx_datadump_server.h
*
* Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/


#ifndef ADX_DATADUMP_SERVER_H #ifndef ADX_DATADUMP_SERVER_H
#define ADX_DATADUMP_SERVER_H #define ADX_DATADUMP_SERVER_H


Loading…
Cancel
Save