diff --git a/ge/client/module.mk b/ge/client/module.mk index 1a304cbf..476841c9 100644 --- a/ge/client/module.mk +++ b/ge/client/module.mk @@ -70,9 +70,10 @@ LOCAL_SHARED_LIBRARIES := \ libregister \ libge_compiler \ libge_common \ - libmsprof - + libmsprof \ + stub/libascend_hal +LOCAL_STATIC_LIBRARIES := libmsprofiler LOCAL_LDFLAGS := -lrt -ldl @@ -107,6 +108,7 @@ LOCAL_SHARED_LIBRARIES := \ libge_common \ libmsprof +LOCAL_STATIC_LIBRARIES := libmsprofiler LOCAL_LDFLAGS := -lrt -ldl LOCAL_CFLAGS += \ diff --git a/ge/common/profiling/profiling_manager.cc b/ge/common/profiling/profiling_manager.cc index e21bcb25..5ed95562 100644 --- a/ge/common/profiling/profiling_manager.cc +++ b/ge/common/profiling/profiling_manager.cc @@ -58,8 +58,6 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ge::Status ProfilingManager::In GELOGI("ProfilingManager::Init job_id:%s", job_id_.c_str()); - - Status ret; if (!recv_profiling_config_.empty()) { GELOGI("Profiling json config from acl:%s", recv_profiling_config_.c_str()); diff --git a/ge/executor/module.mk b/ge/executor/module.mk index 309feb10..a543f36b 100755 --- a/ge/executor/module.mk +++ b/ge/executor/module.mk @@ -92,6 +92,7 @@ local_ge_executor_shared_library := \ libregister \ libmsprof \ liberror_manager \ + libascend_hal local_ge_executor_ldflags := -lrt -ldl \ diff --git a/ge/graph/build/memory/graph_mem_assigner.cc b/ge/graph/build/memory/graph_mem_assigner.cc index ef78f254..f4674a07 100755 --- a/ge/graph/build/memory/graph_mem_assigner.cc +++ b/ge/graph/build/memory/graph_mem_assigner.cc @@ -907,7 +907,6 @@ Status GraphMemoryAssigner::ReAssignAtomicMemory(bool is_loop_graph) { int64_t atomic_mem_start = static_cast(mem_iter->second.mem_offset_); GELOGD("Begin to reAssign atomic memory, atomic address memory start = %ld", atomic_mem_start); - for (auto &atomic_node : iter.second) { vector mem_offset_end; status = AssignAtomicOutputAndWorkspaceMemory(atomic_node, mem_offset_end); if (status != SUCCESS) { diff --git a/ge/graph/build/memory/graph_mem_assigner.h b/ge/graph/build/memory/graph_mem_assigner.h index 8ac166fe..da694e78 100755 --- a/ge/graph/build/memory/graph_mem_assigner.h +++ b/ge/graph/build/memory/graph_mem_assigner.h @@ -140,6 +140,9 @@ class GraphMemoryAssigner { ge::Status FilterAtomicNodesForMemoryAssign(std::map> &normal_atomic_nodes_map, std::vector &connecting_output_atomic_nodes); + ge::Status FilterAtomicNodesForMemoryAssign(std::map> &normal_atomic_nodes_map, + std::vector &connecting_output_atomic_nodes); + ge::Status AssignContinuousInputMemory(const ge::NodePtr &node, int64_t &continuous_mem_start, int64_t &continuous_mem_size, int64_t memory_type); diff --git a/ge/graph/load/new_model_manager/data_dumper.cc b/ge/graph/load/new_model_manager/data_dumper.cc index c6100129..cbd27c7d 100644 --- a/ge/graph/load/new_model_manager/data_dumper.cc +++ b/ge/graph/load/new_model_manager/data_dumper.cc @@ -728,11 +728,7 @@ Status DataDumper::BuildTaskInfo(aicpu::dump::OpMappingInfo &op_mapping_info) { } if (dump_properties_.GetDumpMode() == kDumpInput) { if (op_iter.is_task) { - Status ret = DumpInput(op_iter, task); - if (ret != SUCCESS) { - GELOGE(ret, "Dump input failed"); - return ret; - } + GE_CHK_STATUS_RET(DumpInput(op_iter, task), "Dump input failed"); } op_mapping_info.mutable_task()->Add(std::move(task)); continue; diff --git a/ge/graph/load/new_model_manager/model_manager.cc b/ge/graph/load/new_model_manager/model_manager.cc index 8d4cd01e..ac906c67 100755 --- a/ge/graph/load/new_model_manager/model_manager.cc +++ b/ge/graph/load/new_model_manager/model_manager.cc @@ -236,7 +236,6 @@ ModelManager::~ModelManager() { std::lock_guard lock(map_mutex_); model_map_.clear(); model_aicpu_kernel_.clear(); - cust_aicpu_so_.clear(); GE_IF_BOOL_EXEC(device_count > 0, GE_CHK_RT(rtDeviceReset(0))); } @@ -400,6 +399,7 @@ Status ModelManager::Unload(uint32_t model_id) { } std::lock_guard lock(exeception_infos_mutex_); exception_infos_.clear(); + cust_aicpu_so_.clear(); return SUCCESS; } diff --git a/ge/graph/preprocess/multi_batch_copy_graph.cc b/ge/graph/preprocess/multi_batch_copy_graph.cc index 78c55dec..c0ba89f4 100644 --- a/ge/graph/preprocess/multi_batch_copy_graph.cc +++ b/ge/graph/preprocess/multi_batch_copy_graph.cc @@ -40,7 +40,6 @@ #include "inc/pass_manager.h" #include "graph/common/local_context.h" -using std::map; using std::set; using std::string; using std::vector; @@ -264,24 +263,27 @@ Status MultiBatchGraphCopyer::Init() { } Status MultiBatchGraphCopyer::LabelStatus() { - map> frame_enters; - InitStatus(frame_enters); - + for (const auto &data : origin_data_nodes_) { + auto data_shape = NodeUtils::GetOutputDesc(*data, kDataOutIndex).GetShape(); + if (!IsAllDimsPositive(data_shape.GetDims())) { + origin_nodes_status_[data.get()] = kNodeInBatchBranch; + } + } bool changed = true; // If anyone of in node is kNodeInBatchBranch, it is also kNodeInBatchBranch while (changed) { changed = false; for (const auto &node : origin_all_nodes_) { + auto iter = origin_nodes_status_.find(node.get()); + if (iter != origin_nodes_status_.end()) { + continue; + } for (auto &in_node : node->GetInAllNodes()) { bool is_in_batch = origin_nodes_status_.find(in_node.get()) != origin_nodes_status_.end() && origin_nodes_status_[in_node.get()] == kNodeInBatchBranch; if (is_in_batch) { - if (origin_nodes_status_.find(node.get()) == origin_nodes_status_.end() || - origin_nodes_status_[node.get()] != kNodeInBatchBranch) { - origin_nodes_status_[node.get()] = kNodeInBatchBranch; - ResetEnterStatus(frame_enters, node); - changed = true; - } + origin_nodes_status_[node.get()] = kNodeInBatchBranch; + changed = true; break; } } diff --git a/ge/graph/preprocess/multi_batch_copy_graph.h b/ge/graph/preprocess/multi_batch_copy_graph.h index edd79ada..f8aa6ab4 100644 --- a/ge/graph/preprocess/multi_batch_copy_graph.h +++ b/ge/graph/preprocess/multi_batch_copy_graph.h @@ -69,8 +69,6 @@ class MultiBatchGraphCopyer { // label status for origin_all_nodes_ Status LabelStatus(); - void InitStatus(std::map> &frame_enters); - void ResetEnterStatus(std::map> &frame_enters, const NodePtr &node); // add nodes functions Status CreateNewNodes(); diff --git a/inc/framework/common/string_util.h b/inc/framework/common/string_util.h index 47e80e75..3d7f6488 100644 --- a/inc/framework/common/string_util.h +++ b/inc/framework/common/string_util.h @@ -61,8 +61,10 @@ class StringUtils { /// @param [in] delim separator /// @return string array after segmentation /// + /*lint -e1077*/ static std::vector Split(const std::string &str, char delim) { std::vector elems; + /*lint +e1077*/ if (str.empty()) { elems.emplace_back(""); diff --git a/third_party/fwkacllib/inc/ops/aipp.h b/third_party/fwkacllib/inc/ops/aipp.h index 478f6c83..dbd80a09 100644 --- a/third_party/fwkacllib/inc/ops/aipp.h +++ b/third_party/fwkacllib/inc/ops/aipp.h @@ -25,16 +25,21 @@ namespace ge { /** -*@brief Performs AI pre-processing (AIPP) on images including color space conversion (CSC), -image normalization (by subtracting the mean value or multiplying a factor), image cropping -(by specifying the crop start and cropping the image to the size required by the neural network), and much more. \n +*@brief Performs AI pre-processing (AIPP) on images including color space +conversion (CSC), +image normalization (by subtracting the mean value or multiplying a factor), +image cropping +(by specifying the crop start and cropping the image to the size required by +the neural network), and much more. \n *@par Inputs: -*@li images: An NCHW or NHWC tensor of type uint8, specifying the input to the data layer. +*@li images: An NCHW or NHWC tensor of type uint8, specifying the input to the +data layer. *@li params: Dynamic AIPP configuration parameters of type uint8. \n *@par Attributes: -*aipp_config_path: A required string, specifying the path of the AIPP configuration file. \n +*aipp_config_path: A required string, specifying the path of the AIPP +configuration file. \n *@par Outputs: *features: The AIPP-processed output tensor of type float16 or uint8. diff --git a/third_party/fwkacllib/inc/ops/elewise_calculation_ops.h b/third_party/fwkacllib/inc/ops/elewise_calculation_ops.h index 07fab272..6c7904a6 100644 --- a/third_party/fwkacllib/inc/ops/elewise_calculation_ops.h +++ b/third_party/fwkacllib/inc/ops/elewise_calculation_ops.h @@ -28,9 +28,10 @@ namespace ge { *@par Inputs: *Dynamic inputs, including: -* @li x: A list of Tensor objects, each with same shape and type. The supported types are: +* @li x: A list of Tensor objects, each with same shape and type. The supported +types are: * float16, float32, double, int32, uint8, int16, int8, complex64, int64, -* qint8, quint8, qint32, uint16, complex128, uint32, uint64. It's a dynamic input. \n +* qint8, quint8, qint32, uint16, complex128, uint32, uint64. \n *@par Outputs: *y: A Tensor. Has the same shape and type as the elements of "x". \n @@ -121,7 +122,8 @@ REG_OP(MinimumGrad) *@par Inputs: *One input: -*x:A Tensor. Must be one of the following types: bool, float16, float, int8, int32, uint32, uint8, +*x:A Tensor. Must be one of the following types: bool, float16, float, int8, +int32, uint32, uint8, int64, uint64, int16, uint16, double, complex64, complex128, qint8, quint8, qint16, quint16, qint32. \n *@par Attributes: @@ -385,7 +387,8 @@ REG_OP(Sign) *@par Inputs: *Two inputs, including: \n -*@li x1: A Tensor. Must be one of the following types: float16, float32, float64, int32, int64, complex64,complex128 +*@li x1: A Tensor. Must be one of the following types: float16, float32, + float64, int32, int64, complex64,complex128 *@li x2: A Tensor. Has the same type as "x1". \n *@par Outputs: @@ -484,12 +487,16 @@ REG_OP(Equal) *@par Inputs: *One input:\n -*x: A Tensor. Must be one of the following types: float16, float32, double, complex64, complex128. \n +*x: A Tensor. Must be one of the following types: float16, float32, double, +complex64, complex128. \n *@par Attributes: -*@li base: An optional attribute of type float32, specifying the base gamma. Defaults to "-1.0". -*@li scale: An optional attribute of type float32, specifying the scale alpha. Defaults to "1.0". -*@li shift: An optional attribute of type float32, specifying the shift beta. Defaults to "0.0". \n +*@li base: An optional attribute of type float32, specifying the base gamma. +Defaults to "-1.0". +*@li scale: An optional attribute of type float32, specifying the scale alpha. +Defaults to "1.0". +*@li shift: An optional attribute of type float32, specifying the shift beta. +Defaults to "0.0". \n *@par Outputs: *y: A Tensor of the same type as "x". \n @@ -510,7 +517,8 @@ REG_OP(Exp) *@par Inputs: *One input: -*x: A Tensor. Must be one of the following types: float16, float32, double, complex64, complex128. \n +*x: A Tensor. Must be one of the following types: float16, float32, double, +complex64, complex128. \n *@par Outputs: *y: A Tensor of the same type as "x". \n @@ -527,7 +535,9 @@ REG_OP(Expm1) *@brief: Computes the reciprocal of "x". \n *@par Inputs:\n -*x: A Tensor. Must be one of the following types: float16, float32, int32, int64, double, complex64, complex128. \n +*x: A Tensor. Must be one of the following types: float16, float32, +int32, int64, double, +complex64, complex128. \n *@par Outputs: *y: A Tensor. Has the same type as "x". \n @@ -749,7 +759,8 @@ REG_OP(Xlogy) *@par Inputs: *One input: \n -*x: A Tensor. Must be one of the following types: float16, float32, float64, int32, int64, complex64, complex128 +*x: A Tensor. Must be one of the following types: float16, float32, float64, +int32, int64, complex64, complex128 *@par Outputs: *y: A Tensor. Has the same type as "x". \n @@ -790,7 +801,8 @@ REG_OP(Rsqrt) * *@par Inputs: -* x: A tensor. Must be one of the following types: float16, float32, float64, int32, int64, complex64, complex128. +* x: A tensor. Must be one of the following types: float16, float32, float64, +int32, int64, complex64, complex128. * *@par Outputs: * y: A tensor. Has the same type as "x". @@ -811,7 +823,8 @@ REG_OP(Asin) * *@par Inputs: -*@li y: A tensor of type float16, float32, float64, int32, int64, complex64, complex128. +*@li y: A tensor of type float16, float32, float64, +int32, int64, complex64, complex128. *@li dy: A tensor of the same type as "y". * *@attention Constraints: @@ -838,7 +851,8 @@ REG_OP(AsinGrad) * *@par Inputs: -* x: A tensor. Must be one of the following types: float16, float32, float64, int32, int64, complex64, complex128. +* x: A tensor. Must be one of the following types: float16, float32, float64, +int32, int64, complex64, complex128. * *@par Outputs: * y: A tensor. Has the same type as "x". @@ -883,7 +897,8 @@ REG_OP(AcosGrad) * *@par Inputs: -* x: A tensor. Must be one of the following types: float16, float32, float64, complex64, complex128. +* x: A tensor. Must be one of the following types: float16, float32, float64, + complex64, complex128. * *@attention Constraints: * x Given an input tensor, the function computes inverse hyperbolic cosine of every element.\n @@ -1160,7 +1175,8 @@ REG_OP(FusedMulAdd) * *@par Inputs: -*@li x1: A tensor. Must be one of the following types: float16, float32, float64, uint8, int8, int16, int32, int64, complex64, complex128. +*@li x1: A tensor. Must be one of the following types: float16, float32, float64, +uint8, int8, int16, int32, int64, complex64, complex128. *@li x2: A tensor of the same type as "x1". * *@attention Constraints: @@ -1189,7 +1205,8 @@ REG_OP(AddV2) *@brief Updates "ref" by adding "value" to it. \n *@par Inputs: -*@li ref: A Tensor. Must be one of the following types: float16, float32, int8, int16, int32, int64, uint8, uint16, uint32, uint64. +*@li ref: A Tensor. Must be one of the following types: float16, float32, int8, +int16, int32, int64, uint8, uint16, uint32, uint64. *@li value: A Tensor of the same type as "ref". \n *@par Attributes: @@ -1218,12 +1235,14 @@ REG_OP(AssignAdd) *@brief Updates "ref" by assigning "value" to it. \n *@par Inputs: -*@li ref: A Tensor. Must be one of the following types: float16, float32, int8, int16, int32, int64, uint8, uint16, uint32, uint64. +*@li ref: A Tensor. Must be one of the following types: float16, float32, int8, int16, +int32, int64, uint8, uint16, uint32, uint64. *@li value: A Tensor of the same type as "ref". \n *@par Attributes: *@li validate_shape: An optional bool. Defaults to "true". - If "true", the operation will validate that the shape of "value" matches the shape of the Tensor being assigned to. + If "true", the operation will validate that the shape of "value" + matches the shape of the Tensor being assigned to. * If "false", "ref" will take on the shape of "value". * This attribute is reserved. *@li use_locking: An optional bool. Defaults to True. @@ -1252,7 +1271,8 @@ REG_OP(Assign) * *@par Inputs: -*@li var: A tensor. Must be one of the following types: float32, float64, int32, uint8, int16, int8, complex64, int64, qint8, quint8, qint32, uint16, complex128, uint32, uint64 +*@li var: A tensor. Must be one of the following types: float32, float64, +int32, uint8, int16, int8, complex64, int64, qint8, quint8, qint32, uint16, complex128, uint32, uint64 *@li value: A tensor of the same type as "var". * *@par Attributes: @@ -1644,7 +1664,9 @@ REG_OP(Atan2) * *@par Inputs: -*@li x1: A tensor. Must be one of the following types: float32, float64, int32, uint8, int16, int8, complex64, int64, qint8, quint8, qint32, uint16, complex128, float16, uint32, uint64 +*@li x1: A tensor. Must be one of the following types: float32, float64, int32, + uint8, int16, int8, complex64, int64, qint8, quint8, qint32, uint16, complex128, +float16, uint32, uint64 *@li x2: A tensor of the same type as "x1". * *@par Attributes: @@ -1666,16 +1688,18 @@ REG_OP(ApproximateEqual) /** *@brief Returns the element-wise sum of a list of tensors.\n -* AccumulateNV2 performs the same operation as AddN, but does not wait for all of its inputs -to be ready before beginning to sum.\n This can save memory if inputs are ready at different times, -since minimum temporary storage is proportional to the output size rather than the inputs size. - Returns a Tensor of same shape and type as the elements of inputs. \n +* AccumulateNV2 performs the same operation as AddN, but does not wait for all +of its inputs to be ready before beginning to sum.\n This can save memory if +inputs are ready at different times, \n since minimum temporary storage is +proportional to the output size rather than the inputs size.\n Returns a Tensor +of same shape and type as the elements of inputs. \n * *@par Inputs: *Dynamic inputs, including: -* x: A tensor. Must be one of the following types: float32, float64, int32, uint8, int16, int8, complex64, int64, -qint8, quint8, qint32, uint16, complex128, float16, uint32, uint64. It's a dynamic input. \n +* x: A tensor. Must be one of the following types: float32, float64, int32, +uint8, int16, int8, complex64, int64, \n qint8, quint8, qint32, uint16, +complex128, float16, uint32, uint64. * *@par Outputs: * y: A tensor. Has the same type as "x". @@ -1731,7 +1755,8 @@ REG_OP(FakeQuantWithMinMaxArgs) *@par Inputs: *Two inputs, including: \n -*@li gradients: A Tensor of type float32. Backpropagated gradients above the FakeQuantWithMinMaxArgs operation. +*@li gradients: A Tensor of type float32. Backpropagated gradients +above the FakeQuantWithMinMaxArgs operation. *@li x: A Tensor of type float32. Has the same type and format as "gradients".\n * This is the input Tensor of the FakeQuantWithMinMaxArgs operator.\n @@ -2210,9 +2235,13 @@ REG_OP(BiasAdd) *@par Inputs: *Two inputs, including: -*@li x: A Tensor. Must be one of the following types: float32, float64, int32, uint8, int16, int8, complex64, int64, qint8, quint8, qint32, bfloat16, uint16, complex128, float16, uint32, uint64. +*@li x: A Tensor. Must be one of the following types: float32, float64, int32, +uint8, int16, int8, complex64, int64, qint8, quint8, qint32, bfloat16, uint16, +complex128, float16, uint32, uint64. *format is ND. -*@li dimension: A Tensor. Must be one of the following types: int32, int64. Must be in the range [-rank(input x), rank(input x)]. Describes which dimension of the input Tensor to reduce across. +*@li dimension: A Tensor. Must be one of the following types: int32, int64. +Must be in the range [-rank(input x), rank(input x)]. Describes which dimension +of the input Tensor to reduce across. * The format is ND. *@par Attributes: *dtype: The output type, either "int32" or "int64". Defaults to "int64". \n @@ -2286,6 +2315,7 @@ REG_OP(ArgMaxV2) .ATTR(dtype, Type, DT_INT64) .OP_END_FACTORY_REG(ArgMaxV2) + /** *@brief Returns the index with the largest value across axes of a tensor. \n @@ -2298,15 +2328,16 @@ REG_OP(ArgMaxV2) *@li dtype: The output type, either "int32" or "int64". Defaults to "int64". \n *@par Outputs: -*y: A multi-dimensional Tensor of type int32, specifying the index with the largest value. The dimension is one less than that of "x". \n +*y: A multi-dimensional Tensor of type int32, specifying the index with the +largest value. The dimension is one less than that of "x". \n *@attention Constraints: *@li x: If there are multiple maximum values, the index of the first maximum value is used. -*@li The value range of "dimension" is [-dims, dims - 1]. "dims" is the dimension length of "x". \n +*@li The value range of "dimension" is [-dims, dims - 1]. "dims" is the +dimension length of "x". \n *@par Third-party framework compatibility * Compatible with TensorFlow operator ArgMax. -* * @par Restrictions: *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ @@ -2929,9 +2960,13 @@ REG_OP(FusedMulAddN) *@li bias: An ND tensor of type float16 or float32. \n *@par Attributes: -*@li axis: An optional int32 used to compute the shape of bias input from the online bottoms. Defaults to "1". -*@li num_axes: An optional int32 used to compute the shape of bias input from a Caffe model trained offline. Defaults to "1". -*@li bias_from_blob: An optional bool. If "true", bias is input from a Caffe model trained offline. If "false", bias is input from online bottoms. Defaults to "true". \n +*@li axis: An optional int32 used to compute the shape of bias input from the +online bottoms. Defaults to "1". +*@li num_axes: An optional int32 used to compute the shape of bias input from a +Caffe model trained offline. Defaults to "1". +*@li bias_from_blob: An optional bool. If "true", bias is input from a Caffe +model trained offline. If "false", bias is input from online bottoms. Defaults +to "true". \n *@par Outputs: *y: An ND tensor of type float16 or float32. \n @@ -2939,13 +2974,25 @@ REG_OP(FusedMulAddN) *@attention Constraints:\n * Assume that the shape length of "x" is "n" and that of "bias" is "m". *@li "axis" is within the range [-n, n-1]. num_axes >= -1. -*@li If "bias_from_blob = true", "num_axes = -1", and "axis >= 0", the ith axis of "bias" and the (i+"axis")th axis of "x" must have the same size (0 <= i < n-axis).\n -* If "axis < 0", the ith axis of "bias" and the (i+n+"axis")th axis of "x" must have the same size (0 <= i < -axis). -*@li If "bias_from_blob = true" and "num_axes = 0", "bias" is a scalar with shape length 1 and dimension size 1. -*@li If "bias_from_blob = true", "num_axes > 0, and "axis >= 0", "axis + num_axes" must be less than or equal to "n" and the ith axis of "bias" and the (i+"axis")th axis of "x" must have the same size (0 <= i < num_axes).\n -* If "axis < 0", "n + axis + num_axes" must be less than or equal to "n" and the ith axis of "bias" and the (i+n+"axis")th axis of "x" must have the same size (0 <= i < num_axes). -*@li If "bias_from_blob = false", "bias" is not a scalar, and "axis >= 0","axis + m" must be less than or equal to "n" and the ith axis of "bias" and the (i+"axis")th axis of "x" must have the same size (0 <= i < m).\n -* If "axis < 0", "n + axis + m" must be less than or equal to "n" and the ith axis of "bias" and the (i+n+"axis")th axis of "x" must have the same size (0 <= i < m). +*@li If "bias_from_blob = true", "num_axes = -1", and "axis >= 0", the ith axis +of "bias" and the (i+"axis")th axis of "x" must have the same size (0 <= i < +n-axis).\n +* If "axis < 0", the ith axis of "bias" and the (i+n+"axis")th axis of "x" must +have the same size (0 <= i < -axis). +*@li If "bias_from_blob = true" and "num_axes = 0", "bias" is a scalar with +shape length 1 and dimension size 1. +*@li If "bias_from_blob = true", "num_axes > 0, and "axis >= 0", "axis + +num_axes" must be less than or equal to "n" and the ith axis of "bias" and the +(i+"axis")th axis of "x" must have the same size (0 <= i < num_axes).\n +* If "axis < 0", "n + axis + num_axes" must be less than or equal to "n" and +the ith axis of "bias" and the (i+n+"axis")th axis of "x" must have the same +size (0 <= i < num_axes). +*@li If "bias_from_blob = false", "bias" is not a scalar, and "axis >= 0","axis ++ m" must be less than or equal to "n" and the ith axis of "bias" and the (i ++"axis")th axis of "x" must have the same size (0 <= i < m).\n +* If "axis < 0", "n + axis + m" must be less than or equal to "n" and the ith +axis of "bias" and the (i+n+"axis")th axis of "x" must have the same size (0 <= +i < m). *@par Third-party framework compatibility * Compatible with the Caffe operator Bias. */ @@ -3023,10 +3070,12 @@ REG_OP(FusedMulAddNL2loss) *@li x: A Tensor with any format. Must be one of the following types: float16, float32. \n *@par Attributes: -*@li threshold: A required float32. Defaults to "0.0". "x" is compared with "threshold", outputs "1" for inputs above threshold; "0" otherwise. \n +*@li threshold: A required float32. Defaults to "0.0". "x" is compared with +"threshold", outputs "1" for inputs above threshold; "0" otherwise. \n *@par Outputs: -*@li y: A Tensor with any format. Has the same type as the input. Must be one of the following types: float16, float32. +*@li y: A Tensor with any format. Has the same type as the input. Must be one +of the following types: float16, float32. *@par Third-party framework compatibility * Compatible with the Caffe operator Threshold. */ @@ -3044,11 +3093,16 @@ REG_OP(FusedMulAddNL2loss) *@li x: A tensor. Must be one of the following types: float16, float32. \n *@par Attributes: -*@li axis: An optional int. Specify the axis to be cut at the input tensor. If this parameter is not provided, find the topk for each batch. Defaults to 10000 -*@li out_max_val: An optional bool. Whether to output the maximum value. If it is True, the maximum value and index are output, otherwise only the index is output. +*@li axis: An optional int. Specify the axis to be cut at the input tensor. If +this parameter is not provided, find the topk for each batch. Defaults to 10000 +*@li out_max_val: An optional bool. Whether to output the maximum value. If it +is True, the maximum value and index are output, otherwise only the index is +output. * Defaults to False -*@li topk: An optional int. It means the number of top tok in each axis (the value is greater than or equal to 1), and the value range must be in [1,x.shape(axis)]. -* Defaults to 1 +*@li topk: An optional int. It means the number of top tok in each axis (the +value is greater than or equal to 1), and the value range must be in [1,x.shape +(axis)]. +* Defaults to 1 \n *@par Outputs: *@li indices: A tensor of type float16, float32, int32. The index of the maximum value of the output. @@ -3168,7 +3222,8 @@ REG_OP(Axpy) .OP_END_FACTORY_REG(Axpy) /** -*@brief Creates a criterion that measures the loss given input tensors x1 x2 and a Tensor label y with values 1 or -1. \n +*@brief Creates a criterion that measures the loss given input tensors x1 x2 +and a Tensor label y with values 1 or -1. \n *@par Inputs: *@li x1: A ND Tensor with one of the following types: int8, uint8, int32, float16, float32. diff --git a/third_party/fwkacllib/inc/ops/functional_ops.h b/third_party/fwkacllib/inc/ops/functional_ops.h index b09ac058..07cf57a0 100644 --- a/third_party/fwkacllib/inc/ops/functional_ops.h +++ b/third_party/fwkacllib/inc/ops/functional_ops.h @@ -36,7 +36,7 @@ namespace ge { * if "cond" is a numerical scalar, non-zero means True and zero means False; * if "cond" is a string scalar, non-empty means True and empty means False; * if "cond" is not a scalar, non-empty means True and empty means False. - *@li input: The input tensors . It's a dynamic input. \n + *@li input: The input tensors . \n *@par Graphs: *@li then_branch: A subgraph takes 'input' and returns a list of tensors, @@ -69,7 +69,7 @@ REG_OP(_If) * if "cond" is a numerical scalar, non-zero means True and zero means False; * if "cond" is a string scalar, non-empty means True and empty means False; * if "cond" is not a scalar, non-empty means True and empty means False. - *@li input: The input tensors . It's a dynamic input. \n + *@li input: The input tensors . \n *@par Graphs: *@li then_branch: A subgraph takes 'input' and returns a list of tensors, @@ -102,7 +102,7 @@ REG_OP(StatelessIf) * if "cond" is a numerical scalar, non-zero means True and zero means False; * if "cond" is a string scalar, non-empty means True and empty means False; * if "cond" is not a scalar, non-empty means True and empty means False. - *@li input: The input tensors . It's a dynamic input. \n + *@li input: The input tensors . \n *@par Graphs: *@li then_branch: A subgraph takes 'input' and returns a list of tensors, @@ -129,7 +129,7 @@ REG_OP(If) *@par Inputs: *@li branch_index: A int32 scalar which determines the selected subgraph. - *@li input: The input tensors, which will be passed to the subgraph . It's a dynamic input. \n + *@li input: The input tensors, which will be passed to the subgraph . \n *@par Graphs: *branches: A list of subgraphs, each of which takes 'input' and returns a list of tensors, @@ -152,7 +152,7 @@ REG_OP(Case) *@brief Cyclic execute the "body" subgraph until the return tensor of "cond" subgraph means False . \n *@par Inputs: - *input: The input tensors . It's a dynamic input. \n + *input: The input tensors . \n *@par Graphs: *@li cond: A subgraph takes 'input' and returns a tensor. @@ -183,7 +183,7 @@ REG_OP(_While) *@brief Cyclic execute the "body" subgraph until the return tensor of "cond" subgraph means False . \n *@par Inputs: - *input: The input tensors . It's a dynamic input. \n + *input: The input tensors . \n *@par Graphs: *@li cond: A subgraph takes 'input' and returns a tensor. @@ -215,7 +215,7 @@ REG_OP(While) *@brief Cyclic execute the "body" subgraph until the return tensor of "cond" subgraph means False . \n *@par Inputs: - *input: The input tensors . It's a dynamic input. \n + *input: The input tensors . \n *@par Graphs: *@li cond: A subgraph takes 'input' and returns a tensor. @@ -250,7 +250,7 @@ REG_OP(StatelessWhile) *@li start: A int32 scalar. The lower bound. *@li limit: A int32 scalar. The upper bound. *@li delta: A int32 scalar. The step size. - *@li input: The input tensors, which will be passed to "body" . It's a dynamic input. \n + *@li input: The input tensors, which will be passed to "body" . \n *@par Graphs: *body: A subgraph takes 'input' and returns a another list of tensors . \n @@ -274,7 +274,7 @@ REG_OP(For) *@brief Pass the input tensors to the subgraph "f" and return the output tensors . \n *@par Inputs: - *args: The input tensors, which will be passed to "f" . It's a dynamic input. \n + *args: The input tensors, which will be passed to "f" . \n *@par Graphs: *f: A subgraph takes 'args' and returns a another list of tensors . \n @@ -303,7 +303,7 @@ REG_OP(PartitionedCall) *@brief Pass the input tensors to the subgraph "f" and return the output tensors . \n *@par Inputs: - *args: The input tensors, which will be passed to "f" . It's a dynamic input. \n + *args: The input tensors, which will be passed to "f" . \n *@par Graphs: *f: A subgraph takes 'args' and returns a another list of tensors . \n diff --git a/third_party/fwkacllib/inc/ops/image_ops.h b/third_party/fwkacllib/inc/ops/image_ops.h index a29c8553..5f7aee41 100644 --- a/third_party/fwkacllib/inc/ops/image_ops.h +++ b/third_party/fwkacllib/inc/ops/image_ops.h @@ -160,8 +160,10 @@ REG_OP(CropAndResize) *@li box_index: A Tensor of type int32. A 1-D tensor of shape [num_boxes] with int32 values in [0, batch) . \n *@par Attributes: -*@li crop_size: list int. [crop_height, crop_width]. All cropped image patches are resized to this size. -*@li extrapolation_value: An optional float. Defaults to 0. Value used for extrapolation, when applicable. +*@li crop_size: list int. [crop_height, crop_width]. All cropped image patches +are resized to this size. +*@li extrapolation_value: An optional float. Defaults to 0. Value used for +extrapolation, when applicable. *@li method: An optional string from: '"bilinear"'. Defaults to "bilinear" . \n *@par Outputs: @@ -172,7 +174,6 @@ REG_OP(CropAndResize) *@par Third-party framework compatibility *Compatible with tensorflow CropAndResize operator. - * @par Restrictions: * Warning: THIS FUNCTION IS DEPRECATED. Please use CropAndResize instead. */ diff --git a/third_party/fwkacllib/inc/ops/nn_batch_norm_ops.h b/third_party/fwkacllib/inc/ops/nn_batch_norm_ops.h index a35cee03..848e9f86 100644 --- a/third_party/fwkacllib/inc/ops/nn_batch_norm_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_batch_norm_ops.h @@ -87,39 +87,58 @@ REG_OP(L2NormalizeGrad) *@par Inputs: * Five inputs, including: (NHWC, NCHW, or NC1HWC0 supported) -*@li x: A 4D or 5D Tensor of type float16 or float32, with format NHWC or NCHW for 4D or NC1HWC0 for 5D. -*@li scale: A Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. Must be 5D +*@li x: A 4D or 5D Tensor of type float16 or float32, with format NHWC or NCHW +for 4D or NC1HWC0 for 5D. +*@li scale: A Tensor of type float32. Must be 1D if input "x" is with format +NHWC or NCHW. Must be 5D if input "x" is with format NC1HWC0. Specifies the scaling factor. *@li offset: A Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. Must be 5D if input "x" is with format NC1HWC0. Specifies the offset. -*@li mean: A Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. Must be 5D -if input "x" is with format NC1HWC0. Specifies the mean used for inference. Must be "None" if the +*@li mean: A Tensor of type float32. Must be 1D if input "x" is with format +NHWC or NCHW. Must be 5D +if input "x" is with format NC1HWC0. Specifies the mean used for inference. +Must be "None" if the operation is used for training. -*@li variance: A Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. Must be -5D if input "x" is with format NC1HWC0. Specifies the variance used for inference. Must be "None" +*@li variance: A Tensor of type float32. Must be 1D if input "x" is with format +NHWC or NCHW. Must be +5D if input "x" is with format NC1HWC0. Specifies the variance used for +inference. Must be "None" if the operation is used for training . \n *@par Attributes: -*@li epsilon: An optional float32, specifying the small value added to variance to avoid dividing by zero. Defaults to "0.0001". -*@li data_format: An optional string, specifying the format of "x". Defaults to "NHWC". -*@li is_training: An optional bool, specifying if the operation is used for training or inference. Defaults to "True" . \n +*@li epsilon: An optional float32, specifying the small value added to variance +to avoid dividing by zero. Defaults to "0.0001". +*@li data_format: An optional string, specifying the format of "x". Defaults to +"NHWC". +*@li is_training: An optional bool, specifying if the operation is used for +training or inference. Defaults to "True" . \n *@par Outputs: * Five outputs, including: (NHWC, NCHW, or NC1HWC0 supported) -*@li y: A 4D or 5D Tensor of type float16 or float32 for the normalized "x", with format NHWC or NCHW for 4D or NC1HWC0 for 5D. -*@li batch_mean: A Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. Must be 5D +*@li y: A 4D or 5D Tensor of type float16 or float32 for the normalized "x", +with format NHWC or NCHW for 4D or NC1HWC0 for 5D. +*@li batch_mean: A Tensor of type float32. Must be 1D if input "x" is with +format NHWC or NCHW. Must be 5D if input "x" is with format NC1HWC0. Specifies the mean of "x". -*@li batch_variance: A Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. +*@li batch_variance: A Tensor of type float32. Must be 1D if input "x" is with +format NHWC or NCHW. Must be 5D if input "x" is with format NC1HWC0. Specifies the variance of "x". -*@li reserve_space_1: An optional Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. -Must be 5D if input "x" is with format NC1HWC0. Specifies the mean of "x" for gradient computation. Pass "None" to skip this output. -*@li reserve_space_2: An optional Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. -Must be 5D if input "x" is with format NC1HWC0. Specifies the variance of "x" for gradient computation. Pass "None" to skip this output . \n +*@li reserve_space_1: An optional Tensor of type float32. Must be 1D if input +"x" is with format NHWC or NCHW. +Must be 5D if input "x" is with format NC1HWC0. Specifies the mean of "x" for +gradient computation. Pass "None" to skip this output. +*@li reserve_space_2: An optional Tensor of type float32. Must be 1D if input +"x" is with format NHWC or NCHW. +Must be 5D if input "x" is with format NC1HWC0. Specifies the variance of "x" +for gradient computation. Pass "None" to skip this output . \n *@attention Constraints: -*@li If the operation is used for inference and outputs "reserve_space_1" and "reserve_space_2" are available, -then "reserve_space_1" has the same value as "mean" and "reserve_space_2" has the same value as "variance". -*@li For Ascend 310, the result accuracy fails to reach 1‰ due to the square root instruction . \n +*@li If the operation is used for inference and outputs "reserve_space_1" and +"reserve_space_2" are available, +then "reserve_space_1" has the same value as "mean" and "reserve_space_2" has +the same value as "variance". +*@li For Ascend 310, the result accuracy fails to reach 1‰ due to the square +root instruction . \n *@par Third-party framework compatibility *@li Compatible with the TensorFlow operator fused_batch_norm. @@ -166,13 +185,17 @@ is used for training or inference. Defaults to "True" . \n *@li y: A 4D Tensor of type float16 or float32, for the normalized "x". *@li batch_mean: A 1D Tensor of type float32, for the mean of "x". *@li batch_variance: A 1D Tensor of type float32, for the variance of "x". -*@li reserve_space_1: A 1D Tensor of type float32, for the mean of "x" for gradient computation. -*@li reserve_space_2: A 1D Tensor of type float32, for the variance of "x" for gradient computation . \n +*@li reserve_space_1: A 1D Tensor of type float32, for the mean of "x" for +gradient computation. +*@li reserve_space_2: A 1D Tensor of type float32, for the variance of "x" +for gradient computation . \n *@attention Constraints: *@li If the operation is used for inference, then output "reserve_space_1" -has the same value as "mean" and output "reserve_space_2" has the same value as "variance". -*@li For Ascend 310, the result accuracy fails to reach 1‰ due to the square root instruction . \n +has the same value as "mean" and output "reserve_space_2" has the same value as +"variance". +*@li For Ascend 310, the result accuracy fails to reach 1‰ due to the square +root instruction . \n *@par Third-party framework compatibility * Compatible with the TensorFlow operator fused_batch_norm_v2. @@ -198,23 +221,34 @@ REG_OP(BatchNormExt2) *@par Inputs: * Five inputs, including: -*@li y_backprop: A 4D or 5D Tensor of type float16 or float32, with format NHWC, NCHW, or NC1HWC0, for the gradient. -*@li x: A 4D or 5D Tensor of type float16 or float32, with format NHWC, NCHW, or NC1HWC0. -*@li scale: A 4D or 5D Tensor of type float32, with format NHWC, NCHW, or NC1HWC0. -*@li reserve_space_1: A 4D or 5D Tensor of type float32, with format NHWC, NCHW, or NC1HWC0. It is an output of BatchNorm. -*@li reserve_space_2: A 4D or 5D Tensor of type float32, with format NHWC, NCHW, or NC1HWC0. It is an output of BatchNorm . \n +*@li y_backprop: A 4D or 5D Tensor of type float16 or float32, with format +NHWC, NCHW, or NC1HWC0, for the gradient. +*@li x: A 4D or 5D Tensor of type float16 or float32, with format NHWC, NCHW, +or NC1HWC0. +*@li scale: A 4D or 5D Tensor of type float32, with format NHWC, NCHW, or +NC1HWC0. +*@li reserve_space_1: A 4D or 5D Tensor of type float32, with format NHWC, +NCHW, or NC1HWC0. It is an output of BatchNorm. +*@li reserve_space_2: A 4D or 5D Tensor of type float32, with format NHWC, +NCHW, or NC1HWC0. It is an output of BatchNorm . \n *@par Attributes: -*@li epsilon: An optional float32. Defaults to "0.0001". A small float number added to the variance of "x". +*@li epsilon: An optional float32. Defaults to "0.0001". A small float number +added to the variance of "x". *@li data_format: An optional string. Defaults to "NHWC". *@li is_training: An optional bool. Defaults to "true". Specifies the operation is for training (default) or inference . \n *@par Outputs: -*@li x_backprop: A Tensor of type float16 or float32, with format NHWC, NCHW, or NC1HWC0, for the offset of "x". -*@li scale_backprop: A Tensor of type float32, with format NHWC, NCHW, or NC1HWC0, for the offset of "scale". -*@li *offset_backprop: A Tensor of type float32, with format NHWC, NCHW, or NC1HWC0, for the offset of "offset". -*@li *reserve_space_4: A Tensor of type float32, with shape NHWC, NCHW, or NC1HWC0. Pass "None" to skip this output. -*@li *reserve_space_5: A Tensor of type float32, with shape NHWC, NCHW, or NC1HWC0. Pass "None" to skip this output . \n +*@li x_backprop: A Tensor of type float16 or float32, with format NHWC, NCHW, +or NC1HWC0, for the offset of "x". +*@li scale_backprop: A Tensor of type float32, with format NHWC, NCHW, or +NC1HWC0, for the offset of "scale". +*@li *offset_backprop: A Tensor of type float32, with format NHWC, NCHW, or +NC1HWC0, for the offset of "offset". +*@li *reserve_space_4: A Tensor of type float32, with shape NHWC, NCHW, or +NC1HWC0. Pass "None" to skip this output. +*@li *reserve_space_5: A Tensor of type float32, with shape NHWC, NCHW, or +NC1HWC0. Pass "None" to skip this output . \n *@attention Constraints: * The preceding layer of this operator must be operator BatchNorm . \n @@ -244,21 +278,28 @@ REG_OP(BatchNormGrad) *@par Inputs: * Five inputs, including: -*@li y_backprop: A 4D Tensor of type float16 or float32, with format NHWC or NCHW, for the gradient. +*@li y_backprop: A 4D Tensor of type float16 or float32, with format NHWC or +NCHW, for the gradient. *@li x: A 4D Tensor of type float16 or float32, with format NHWC or NCHW. *@li scale: A 4D Tensor of type float32, with format NHWC or NCHW. -*@li reserve_space_1: A 4D Tensor of type float32, with format NHWC or NCHW. It is an output of BatchNormExt2. -*@li reserve_space_2: A 4D Tensor of type float32, with format NHWC or NCHW. It is an output of BatchNormExt2 . \n +*@li reserve_space_1: A 4D Tensor of type float32, with format NHWC or NCHW. It +is an output of BatchNormExt2. +*@li reserve_space_2: A 4D Tensor of type float32, with format NHWC or NCHW. It +is an output of BatchNormExt2 . \n *@par Attributes: *@li epsilon: A required float32. A small float number added to the variance of "x". *@li data_format: A required string for the format. -*@li is_training: A required bool for specifying the operation is for training (true) or inference (false) . \n +*@li is_training: A required bool for specifying the operation is for training +(true) or inference (false) . \n *@par Outputs: -*@li x_backprop: A Tensor of type float16 or float32, with format NHWC or NCHW, for the offset of "x". -*@li scale_backprop: A Tensor of type float32, with format NHWC or NCHW, for the offset of "scale". -*@li offset_backprop: A Tensor of type float32, with format NHWC or NCHW, for the offset of "offset". +*@li x_backprop: A Tensor of type float16 or float32, with format NHWC or NCHW, +for the offset of "x". +*@li scale_backprop: A Tensor of type float32, with format NHWC or NCHW, for +the offset of "scale". +*@li offset_backprop: A Tensor of type float32, with format NHWC or NCHW, for +the offset of "offset". *@li reserve_space_3: A Tensor of type float32, with format NHWC or NCHW. *@li reserve_space_4: A Tensor of type float32, with format NHWC or NCHW . \n @@ -290,14 +331,18 @@ REG_OP(BatchNormGradExt2) *@brief Performs batch normalization . \n *@par Inputs: -*@li x: A 4D or 5D Tensor of type float16 or float32, with format NHWC or NCHW for 4D or NC1HWC0 for 5D. -*@li mean: A Tensor of type float32 or float16. Must be 1D if input "x" Specifies the mean used for inference. -*@li variance: A Tensor of type float32 or float16 . Must be 1D if input "x" Specifies the variance used for inference. +*@li x: A 4D or 5D Tensor of type float16 or float32, with format NHWC or NCHW +for 4D or NC1HWC0 for 5D. +*@li mean: A Tensor of type float32 or float16. Must be 1D if input "x" +Specifies the mean used for inference. +*@li variance: A Tensor of type float32 or float16 . Must be 1D if input "x" +Specifies the variance used for inference. *@li momentum: A Tensor,represents the mean and the variance's scale factor *@li scale: An optional tensor of type float16 or float32, no use *@li offset: An optional tensor of type float16 or float32, no use *@par Attributes: -*@li epsilon: An optional float32, specifying the small value added to variance to avoid dividing by zero. Defaults to "0.00001". +*@li epsilon: An optional float32, specifying the small value added to variance +to avoid dividing by zero. Defaults to "0.00001". *@li use_global_stats: mean inference mode , only can be "True". *@li mode: An optional input, not use *@par Outputs: @@ -315,16 +360,20 @@ REG_OP(BNInference) .ATTR(use_global_stats, Bool,true) .ATTR(mode, Int,1) .OP_END_FACTORY_REG(BNInference) + /** *@brief aicpu batch normalization host . \n *@par Inputs: -*@li mean: A Tensor of type float32 or float16. Must be 1D if input "x" Specifies the mean used for inference. -*@li variance: A Tensor of type float32 or float16 . Must be 1D if input "x" Specifies the variance used for inference. +*@li mean: A Tensor of type float32 or float16. Must be 1D if input "x" +Specifies the mean used for inference. +*@li variance: A Tensor of type float32 or float16 . Must be 1D if input "x" +Specifies the variance used for inference. *@li momentum: An optional float, mean and variance's Scale factor *@par Attributes: -*@li epsilon: An optional float32, specifying the small value added to variance to avoid dividing by zero. Defaults to "0.00001". +*@li epsilon: An optional float32, specifying the small value added to variance +to avoid dividing by zero. Defaults to "0.00001". *@li use_global_stats: mean inference mode , only can be "True". *@li mode: An optional attr, not use *@par Outputs: @@ -348,14 +397,19 @@ REG_OP(BnHost) *@brief Performs batch normalization . \n *@par Inputs: -*@li x: A 4D or 5D Tensor of type float16 or float32, with format NHWC or NCHW for 4D or NC1HWC0 for 5D. -*@li mean: A Tensor of type float32 or float16. Must be 1D if input "x" Specifies the mean used for inference. -*@li variance: A Tensor of type float32 or float16 . Must be 1D if input "x" Specifies the variance used for inference. +*@li x: A 4D or 5D Tensor of type float16 or float32, with format NHWC or NCHW +for 4D or NC1HWC0 for 5D. +*@li mean: A Tensor of type float32 or float16. Must be 1D if input "x" +Specifies the mean used for inference. +*@li variance: A Tensor of type float32 or float16 . Must be 1D if input "x" +Specifies the variance used for inference. *@li scale: An optional tensor of type float16 or float32, no use *@li offset: An optional tensor of type float16 or float32, no use *@par Attributes: -*@li momentum: An optional float32 num, represents the mean and the variance's scale factor -*@li epsilon: An optional float32, specifying the small value added to variance to avoid dividing by zero. Defaults to "0.00001". +*@li momentum: An optional float32 num, represents the mean and the variance's +scale factor +*@li epsilon: An optional float32, specifying the small value added to variance +to avoid dividing by zero. Defaults to "0.00001". *@li use_global_stats: mean inference mode , only can be "True". *@li mode: An optional attr, not use *@par Outputs: diff --git a/third_party/fwkacllib/inc/ops/nn_calculation_ops.h b/third_party/fwkacllib/inc/ops/nn_calculation_ops.h index 7c06637f..08253522 100644 --- a/third_party/fwkacllib/inc/ops/nn_calculation_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_calculation_ops.h @@ -310,9 +310,6 @@ REG_OP(DepthwiseConv2DBackpropInputD) * @par Third-party framework compatibility * @li Compatible with the TensorFlow operator DepthwiseConv2D. * @li Compatible with the Caffe operator DepthwiseConv2D. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(DepthwiseConv2D) .INPUT(x, TensorType({DT_FLOAT16, DT_INT8})) diff --git a/third_party/fwkacllib/inc/ops/nn_detect_ops.h b/third_party/fwkacllib/inc/ops/nn_detect_ops.h index 476704e5..f5a6201e 100644 --- a/third_party/fwkacllib/inc/ops/nn_detect_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_detect_ops.h @@ -158,18 +158,25 @@ REG_OP(Iou) *@par Inputs: * Three inputs, including: *@li ydiff: A 5HD gradient input of type float32. -*@li rois: ROI position. A 2D Tensor of float32 with shape (N, 5). "N" indicates the number of ROIs, -the value "5" indicates the indexes of images where the ROIs are located, "x0", "x1", "y0", and "y1". -*@li rois_n: An optional input, specifying the number of valid ROIs. This parameter is reserved . \n +*@li rois: ROI position. A 2D Tensor of float32 with shape (N, 5). "N" +indicates the number of ROIs, +the value "5" indicates the indexes of images where the ROIs are located, "x0", +"x1", "y0", and "y1". +*@li rois_n: An optional input, specifying the number of valid ROIs. This +parameter is reserved . \n *@par Attributes: *@li xdiff_shape: A required list of 4 ints, obtained based on the shape of "features" of ROIAlign. *@li pooled_width: A required attribute of type int, specifying the W dimension. *@li pooled_height: A required attribute of type int, specifying the H dimension. -*@li spatial_scale: A required attribute of type float, specifying the scaling ratio of "features" to the original image. -*@li sample_num: An optional attribute of type int, specifying the horizontal and vertical -sampling frequency of each output. If this attribute is set to "0", the sampling frequency is -equal to the rounded up value of "rois", which is a floating point number. Defaults to "2" . \n +*@li spatial_scale: A required attribute of type float, specifying the scaling +ratio of "features" to the original image. +*@li sample_num: An optional attribute of type int, specifying the horizontal +and vertical +sampling frequency of each output. If this attribute is set to "0", the +sampling frequency is +equal to the rounded up value of "rois", which is a floating point number. +Defaults to "2" . \n *@par Outputs: *xdiff: Gradient added to input "features". Has the same 5HD shape as input "features". diff --git a/third_party/fwkacllib/inc/ops/nn_norm_ops.h b/third_party/fwkacllib/inc/ops/nn_norm_ops.h index 0fdf27e3..0c607162 100644 --- a/third_party/fwkacllib/inc/ops/nn_norm_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_norm_ops.h @@ -924,29 +924,7 @@ REG_OP(InstanceNormV2) .ATTR(epsilon, Float, 0.00001) .OP_END_FACTORY_REG(InstanceNormV2) -/** -*@brief Performs instance normalization for inference. - -*@par Inputs:\n -* Five inputs, including: (NC1HWC0 supported) -*@li x: A Tensor of type float16 or float32. -*@li gamma: A [N, C1, 1, 1, C0] Tensor of type float32, for the scaling gamma. -*@li beta: A [N, C1, 1, 1, C0] Tensor of type float32, for the scaling beta. -*@li mean: A [N, C1, 1, 1, C0] ensor of type float32, for the mean. -*@li variance: A [N, C1, 1, 1, C0] Tensor of type float32, for the variance. -*@li variance_sqrt: A [N, C1, 1, 1, C0] Tensor of type float32, for the variance_sqrt. - -*@par Outputs:\n -*y: A Tensor of type float16 or float32 for the normalized "x". -*batch_mean: A Tensor of type float32 for the result mean. -*batch_ variance: A Tensor of type float32 for the result variance. - -*@attention Constraints: -*For Ascend 310, the result accuracy fails to reach 1<89> due to the square root instruction. -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use INInferV2 instead. -*/ REG_OP(INInferV2D) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) .OPTIONAL_INPUT(gamma, TensorType({DT_FLOAT})) diff --git a/third_party/fwkacllib/inc/ops/nn_pooling_ops.h b/third_party/fwkacllib/inc/ops/nn_pooling_ops.h index 473e94b7..471c0062 100644 --- a/third_party/fwkacllib/inc/ops/nn_pooling_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_pooling_ops.h @@ -168,9 +168,6 @@ REG_OP(AvgPoolV2) *@par Third-party framework compatibility * Compatible with the TensorFlow operator AvgPool3D. -* -* @par Restrictions: -*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(AvgPool3D) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE})) diff --git a/third_party/fwkacllib/inc/ops/nn_training_ops.h b/third_party/fwkacllib/inc/ops/nn_training_ops.h index 92074872..d50b3d2b 100644 --- a/third_party/fwkacllib/inc/ops/nn_training_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_training_ops.h @@ -111,9 +111,6 @@ REG_OP(ApplyAdaMax) * *@par Third-party framework compatibility *Compatible with the TensorFlow operator ApplyAdaMax. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use ApplyAdaMax instead. */ REG_OP(ApplyAdaMaxD) .INPUT(var, TensorType::NumberType()) @@ -352,9 +349,6 @@ REG_OP(ApplyMomentum) * accum: A mutable tensor. Has the same type as input "accum". *@par Third-party framework compatibility *Compatible with the TensorFlow operator ApplyMomentum. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use ApplyMomentum instead. */ REG_OP(ApplyMomentumD) @@ -681,9 +675,6 @@ REG_OP(ApplyPowerSign) * *@par Third-party framework compatibility *Compatible with the TensorFlow operator ApplyPowerSign. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use ApplyPowerSign instead. */ REG_OP(ApplyPowerSignD) .INPUT(var, TensorType::NumberType()) @@ -804,9 +795,6 @@ REG_OP(ApplyAddSign) *@par Third-party framework compatibility * Compatible with the TensorFlow operator ApplyAddSign. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use ApplyAddSign instead. */ REG_OP(ApplyAddSignD) .INPUT(var, TensorType::NumberType()) @@ -928,9 +916,6 @@ REG_OP(ApplyCenteredRMSProp) *@par Third-party framework compatibility *Compatible with the TensorFlow operator ApplyCenteredRMSPropD. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use ApplyCenteredRMSProp instead. */ REG_OP(ApplyCenteredRMSPropD) .INPUT(var, TensorType::NumberType()) @@ -1049,9 +1034,6 @@ REG_OP(ApplyAdagrad) * *@par Third-party framework compatibility *Compatible with the TensorFlow operator ApplyAdagrad. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use ApplyAdagrad instead. */ REG_OP(ApplyAdagradD) .INPUT(var, TensorType::NumberType()) @@ -1236,9 +1218,6 @@ REG_OP(ApplyAdagradDA) *@par Third-party framework compatibility *Compatible with the TensorFlow operator ApplyAdagradDA. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use ApplyAdagradDA instead. */ REG_OP(ApplyAdagradDAD) .INPUT(var, TensorType::NumberType()) @@ -1496,9 +1475,6 @@ REG_OP(ApplyProximalAdagrad) *@par Third-party framework compatibility *Compatible with the TensorFlow operator ApplyProximalAdagradD. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use ApplyProximalAdagrad instead. */ REG_OP(ApplyProximalAdagradD) .INPUT(var, TensorType::NumberType()) @@ -1592,9 +1568,6 @@ REG_OP(SparseApplyProximalAdagrad) *@par Third-party framework compatibility *Compatible with the TensorFlow operator SparseApplyProximalAdagrad. - -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use SparseApplyProximalAdagrad instead. */ REG_OP(SparseApplyProximalAdagradD) .INPUT(var, TensorType::NumberType()) @@ -1681,9 +1654,6 @@ REG_OP(ApplyFtrl) *@par Third-party framework compatibility *Compatible with the TensorFlow operator ApplyFtrl. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use ApplyFtrl instead. */ REG_OP(ApplyFtrlD) .INPUT(var, TensorType::NumberType()) @@ -1775,9 +1745,6 @@ REG_OP(ApplyFtrlV2) *@par Third-party framework compatibility *Compatible with the TensorFlow operator ApplyFtrlV2. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use ApplyFtrlV2 instead. */ REG_OP(ApplyFtrlV2D) .INPUT(var, TensorType::NumberType()) @@ -1890,9 +1857,6 @@ REG_OP(ApplyAdam) *@par Third-party framework compatibility *Compatible with the TensorFlow operator ApplyAdam. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use ApplyAdam instead. */ REG_OP(ApplyAdamD) .INPUT(var, TensorType::NumberType()) @@ -1981,9 +1945,6 @@ REG_OP(ApplyAdadelta) *@par Third-party framework compatibility * Compatible with the TensorFlow operator ApplyAdadelta. - -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use ApplyAdadelta instead. */ REG_OP(ApplyAdadeltaD) .INPUT(var, TensorType::NumberType()) diff --git a/third_party/fwkacllib/inc/ops/pad_ops.h b/third_party/fwkacllib/inc/ops/pad_ops.h index 92dca17c..ed10648e 100644 --- a/third_party/fwkacllib/inc/ops/pad_ops.h +++ b/third_party/fwkacllib/inc/ops/pad_ops.h @@ -65,9 +65,6 @@ REG_OP(Fill) * *@par Outputs: * y: A tensor. Has the same type as "value". -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use Fill instead. */ REG_OP(FillD) .INPUT(value, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, @@ -125,9 +122,6 @@ REG_OP(BroadcastTo) * *@par Third-party framework compatibility *Compatible with the TensorFlow operator BroadcastTo. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use BroadcastTo instead. */ REG_OP(BroadcastToD) .INPUT(x, TensorType::BasicType()) @@ -175,9 +169,6 @@ REG_OP(Pad) *@par Third-party framework compatibility: * Compatible with TensorFlow operator Pad. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use Pad instead. */ REG_OP(PadD) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8, DT_FLOAT})) @@ -272,9 +263,6 @@ REG_OP(PadV3D) *@see Diag() *@par Third-party framework compatibility * Compatible with the TensorFlow operator Diag. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use Diag instead. */ REG_OP(DiagD) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) diff --git a/third_party/fwkacllib/inc/ops/ragged_conversion_ops.h b/third_party/fwkacllib/inc/ops/ragged_conversion_ops.h index 020e3da4..099d2a85 100644 --- a/third_party/fwkacllib/inc/ops/ragged_conversion_ops.h +++ b/third_party/fwkacllib/inc/ops/ragged_conversion_ops.h @@ -30,7 +30,7 @@ namespace ge { *@par Inputs: *Two inputs, including: *@li rt_nested_splits: A list of at least 1 Tensor objects with the same type -in: int32, int64. The row_splits for the RaggedTensor. It's a dynamic input. +in: int32, int64. The row_splits for the RaggedTensor. *@li rt_dense_values: A Tensor. The flat_values for the RaggedTensor Must be one of the following types: bool, int8, int16, uint16, int32, int64, double, float, float16 . \n @@ -66,7 +66,7 @@ REG_OP(RaggedTensorToSparse) *@li values:A 1D tensor representing the values of the ragged tensor. *@li default_value:A `Tensor`. Must have the same type as `values`. *@li row_partition_tensors:A list of at least 1 `Tensor` objects with the same -type in: `int64`, `int32` . It's a dynamic input.\n +type in: `int64`, `int32` .\n *@par Attributes: *@li num_row_partition_tensors:Numbers of row partition tensors. diff --git a/third_party/fwkacllib/inc/ops/random_ops.h b/third_party/fwkacllib/inc/ops/random_ops.h index 847b0768..df6cf33a 100644 --- a/third_party/fwkacllib/inc/ops/random_ops.h +++ b/third_party/fwkacllib/inc/ops/random_ops.h @@ -374,9 +374,6 @@ REG_OP(DropOutGenMask) *@par Third-party framework compatibility * Compatible with the TensorFlow operator lin_space. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use LinSpace instead. */ REG_OP(LinSpaceD) .INPUT(assist, TensorType({DT_FLOAT})) diff --git a/third_party/fwkacllib/inc/ops/reduce_ops.h b/third_party/fwkacllib/inc/ops/reduce_ops.h index cd448c8d..386c88c3 100644 --- a/third_party/fwkacllib/inc/ops/reduce_ops.h +++ b/third_party/fwkacllib/inc/ops/reduce_ops.h @@ -353,9 +353,6 @@ REG_OP(ReduceSum) *@par Third-party framework compatibility * Compatible with the TensorFlow operator Sum. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use ReduceSum instead. */ REG_OP(ReduceSumD) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) @@ -381,9 +378,6 @@ REG_OP(ReduceSumD) *@par Third-party framework compatibility * Compatible with the TensorFlow operator ReduceAll. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use ReduceAll instead. */ REG_OP(ReduceAllD) .INPUT(x, TensorType({DT_BOOL})) @@ -459,9 +453,6 @@ REG_OP(ReduceProd) *@par Third-party framework compatibility * Compatible with the TensorFlow operator ReduceProd. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use ReduceProd instead. */ REG_OP(ReduceProdD) .INPUT(x,TensorType({DT_FLOAT, DT_UINT8, DT_INT8, DT_INT32, DT_FLOAT16})) @@ -516,9 +507,6 @@ REG_OP(ReduceMean) *@par Third-party framework compatibility: * Compatible with the TensorFlow operator ReduceMean. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use ReduceMean instead. */ REG_OP(ReduceMeanD) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) @@ -573,9 +561,6 @@ REG_OP(ReduceMax) *@par Third-party framework compatibility * Compatible with TensorFlow operator Max. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use ReduceMax instead. */ REG_OP(ReduceMaxD) .INPUT(x, TensorType({DT_FLOAT, DT_UINT8, DT_INT8, @@ -630,9 +615,6 @@ REG_OP(ReduceMin) *@par Third-party framework compatibility * Compatible with the TensorFlow operator reduce_min. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use ReduceMin instead. */ REG_OP(ReduceMinD) .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT8,DT_UINT8})) @@ -699,9 +681,6 @@ REG_OP(ReduceAny) * *@par Third-party framework compatibility *Compatible with the TensorFlow operator reduce_any. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use ReduceAny instead. */ REG_OP(ReduceAnyD) .INPUT(x, TensorType({DT_BOOL})) @@ -787,9 +766,6 @@ REG_OP(EuclideanNorm) *@par Third-party framework compatibility * Compatible with the TensorFlow operator EuclideanNorm. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use EuclideanNorm instead. */ REG_OP(EuclideanNormD) .INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_FLOAT16})) diff --git a/third_party/fwkacllib/inc/ops/rnn.h b/third_party/fwkacllib/inc/ops/rnn.h index e33f3677..07b8c0c7 100644 --- a/third_party/fwkacllib/inc/ops/rnn.h +++ b/third_party/fwkacllib/inc/ops/rnn.h @@ -92,6 +92,7 @@ REG_OP(DynamicLSTM) .OUTPUT(output_h, TensorType({DT_FLOAT32})) .OP_END_FACTORY_REG(DynamicLSTM) + /** *@brief: DynamicRNNGrad calculation. *@par Inputs: @@ -126,7 +127,7 @@ REG_OP(DynamicLSTM) *@li keep_prob:An float identifying the keep prob in the op. Default to 1. *@li cell_clip:An float identifying the cell clip in the op. Default to -1. *@li num_proj:An integer identifying the num projection in the op. Default to 0. -*@li time_major:An bool identifying the time major in the op. Default to false. +*@li time_major:An bool identifying the time major in the op. Default to true. *@li activation:An string identifying the type of activation function in the op. Default to "tanh". Only tanh is currently supported. *@li forget_bias:An float identifying the forget bias in the op. Default to 0. *@li is_training:An bool identifying is training in the op. Default to true. diff --git a/third_party/fwkacllib/inc/ops/save_ops.h b/third_party/fwkacllib/inc/ops/save_ops.h index 5ce6c2e0..a232e7ba 100644 --- a/third_party/fwkacllib/inc/ops/save_ops.h +++ b/third_party/fwkacllib/inc/ops/save_ops.h @@ -28,7 +28,7 @@ namespace ge { /** *@brief Mark which tensors need to be saved to the ckpt file. *@par Inputs: -*tensors: A list of input tensor.It's a dynamic input. +*tensors: A list of input tensor. *@par Restrictions: *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ diff --git a/third_party/fwkacllib/inc/ops/sdca_ops.h b/third_party/fwkacllib/inc/ops/sdca_ops.h index 34c6a268..318e05a2 100644 --- a/third_party/fwkacllib/inc/ops/sdca_ops.h +++ b/third_party/fwkacllib/inc/ops/sdca_ops.h @@ -35,16 +35,16 @@ namespace ge { *rate . \n *@par Inputs: -*@li sparse_example_indices: a list of vectors which contain example indices.It's a dynamic input. -*@li sparse_feature_indices: a list of vectors which contain feature indices.It's a dynamic input. -*@li sparse_feature_values: a list of vectors which contains feature value associated with each feature group.It's a dynamic input. -*@li dense_features: a list of matrices which contains the dense feature values.It's a dynamic input. +*@li sparse_example_indices: a list of vectors which contain example indices. +*@li sparse_feature_indices: a list of vectors which contain feature indices. +*@li sparse_feature_values: a list of vectors which contains feature value associated with each feature group. +*@li dense_features: a list of matrices which contains the dense feature values. *@li example_weights: a vector which contains the weight associated with each example. *@li example_labels: a vector which contains the label/target associated with each example. *@li sparse_indices: a list of vectors where each value is the indices which has -*corresponding weights in sparse_weights. This field maybe omitted for the dense approach.It's a dynamic input. +*corresponding weights in sparse_weights. This field maybe omitted for the dense approach. *@li sparse_weights: a list of vectors where each value is the weight associated with a sparse feature group. -*@li dense_weights: a list of vectors where the values are the weights associated with a dense feature group.It's a dynamic input. +*@li dense_weights: a list of vectors where the values are the weights associated with a dense feature group. *@li example_state_data: a list of vectors containing the example state data. *@li loss_type: Type of the primal loss. Currently SdcaSolver supports logistic, squared and hinge losses. *@li l1: Symmetric l1 regularization strength. @@ -61,7 +61,6 @@ namespace ge { *@par Third-party framework compatibility * Compatible with tensorflow SdcaOptimizerV2 operator. */ - REG_OP(SdcaOptimizerV2) .DYNAMIC_INPUT(sparse_example_indices, TensorType({DT_INT64})) .DYNAMIC_INPUT(sparse_feature_indices, TensorType({DT_INT64})) diff --git a/third_party/fwkacllib/inc/ops/selection_ops.h b/third_party/fwkacllib/inc/ops/selection_ops.h index e7f35e02..b3ff7297 100644 --- a/third_party/fwkacllib/inc/ops/selection_ops.h +++ b/third_party/fwkacllib/inc/ops/selection_ops.h @@ -79,9 +79,6 @@ REG_OP(Range) *@see Range() *@since V100R001C33 -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use Range instead. */ REG_OP(RangeD) .INPUT(x, TensorType({DT_FLOAT,DT_INT32})) @@ -226,9 +223,6 @@ REG_OP(GatherV2) *@par Third-party framework compatibility * Compatible with the TensorFlow operator GatherV2. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use GatherV2 instead. */ REG_OP(GatherV2D) .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_UINT32, DT_INT8, DT_UINT8, @@ -331,9 +325,6 @@ REG_OP(StridedSlice) *@par Third-party framework compatibility * Compatible with the TensorFlow operator StridedSlice. - -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use StridedSlice instead. */ REG_OP(StridedSliceD) .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_UINT8, DT_INT8, @@ -389,9 +380,6 @@ REG_OP(StridedSliceD) *@par Third-party framework compatibility * Compatible with the TensorFlow operator StridedSliceGradD. - -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use StridedSliceGrad instead. */ REG_OP(StridedSliceGradD) .INPUT(dy, TensorType::BasicType()) @@ -503,9 +491,6 @@ REG_OP(UnsortedSegmentSum) *@par Third-party framework compatibility * Compatible with the TensorFlow operator UnsortedSegmentSum. - -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use UnsortedSegmentSum instead. */ REG_OP(UnsortedSegmentSumD) .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8, DT_UINT8})) @@ -730,9 +715,6 @@ REG_OP(OneHot) *@par Third-party framework compatibility: * Compatible with the TensorFlow operator OneHot. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use OneHot instead. */ REG_OP(OneHotD) .INPUT(x, TensorType({DT_UINT8, DT_INT32})) @@ -808,7 +790,7 @@ REG_OP(SliceD) * @li assist_seq: A 1D tensor of type float16. * with size of 2N, which "N" is the last dimension. * The first N numbers is indices, and the next N numbers is deviation of casting -* int32 to float16. \n +* float16 to int32 . \n * @par Attributes: * @li k: A required int that is at least 0, specifying the number of top elements @@ -817,7 +799,7 @@ REG_OP(SliceD) * If true, the resulting "k" elements will be sorted by the values in descending * order. * @li dim: An optional int. Defaults to -1. For reserved use. -* @li largest: An optional bool. Defaults to true. For reserved use. \n +* @li largest: An optional bool. Defaults to true. For reserved use. * @par Outputs: * @li values: A Tensor, specifying the sorted data. Has the same type as "input". @@ -1280,9 +1262,6 @@ REG_OP(InplaceUpdate) *@par Third-party framework compatibility *Compatible with the TensorFlow operator InplaceUpdate. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use InplaceUpdate instead. */ REG_OP(InplaceUpdateD) .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32})) @@ -1335,9 +1314,6 @@ REG_OP(InplaceAdd) *@par Third-party framework compatibility *Compatible with the TensorFlow operator InplaceAdd. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use InplaceAdd instead. */ REG_OP(InplaceAddD) .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32})) @@ -1389,9 +1365,6 @@ REG_OP(InplaceSub) *@par Third-party framework compatibility *Compatible with the TensorFlow operator InplaceSub. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use InplaceSub instead. */ REG_OP(InplaceSubD) .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32})) @@ -1443,9 +1416,6 @@ REG_OP(ScatterNonAliasingAdd) * @par Outputs: * y: A Tensor of type RealNumberType . \n -* @attention Constraints: -* @li segment_ids must be non-negative tensor. - * @see UnsortedSegmentSum(), UnsortedSegmentProd(), * @par Third-party framework compatibility @@ -1473,9 +1443,6 @@ REG_OP(UnsortedSegmentMin) * @par Outputs: * y: A Tensor.Must have the same type as input "x" . \n -* @attention Constraints: -* @li segment_ids must be non-negative tensor. - * @see UnsortedSegmentProdD(), UnsortedSegmentSumD(), * * @par Restrictions: @@ -1501,9 +1468,6 @@ REG_OP(UnsortedSegmentMinD) * @par Outputs: * y: A Tensor of type RealNumberType . \n -* @attention Constraints: -* @li segment_ids must be non-negative tensor. - * @see UnsortedSegmentSum(), UnsortedSegmentProd(), * @par Third-party framework compatibility @@ -1531,9 +1495,6 @@ REG_OP(UnsortedSegmentMax) * @par Outputs: * y: A Tensor.Must have the same type as input "x" . \n -* @attention Constraints: -* @li segment_ids must be non-negative tensor. - * @see UnsortedSegmentProdD(), * * @par Restrictions: @@ -1558,9 +1519,6 @@ REG_OP(UnsortedSegmentMaxD) * @par Outputs: * y: A Tensor of type NumberType . \n -* @attention Constraints: -* @li segment_ids must be non-negative tensor. - * @see UnsortedSegmentSum(), UnsortedSegmentMin(), * @par Third-party framework compatibility @@ -1592,9 +1550,6 @@ REG_OP(UnsortedSegmentProd) * @li segment_ids must be non-negative tensor. * @see UnsortedSegmentMinD() -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use UnsortedSegmentProd instead. */ REG_OP(UnsortedSegmentProdD) .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT16})) @@ -1910,9 +1865,6 @@ REG_OP(CumulativeLogsumexp) *y: A Tensor. Has the same type as "x". *@par Third-party framework compatibility * Compatible with the TensorFlow operator Cumsum. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use CumulativeLogsumexp instead. */ REG_OP(CumulativeLogsumexpD) .INPUT(x, TensorType({DT_DOUBLE, DT_FLOAT, DT_FLOAT16})) diff --git a/third_party/fwkacllib/inc/ops/split_combination_ops.h b/third_party/fwkacllib/inc/ops/split_combination_ops.h index f1a93fa6..6d511728 100644 --- a/third_party/fwkacllib/inc/ops/split_combination_ops.h +++ b/third_party/fwkacllib/inc/ops/split_combination_ops.h @@ -75,9 +75,6 @@ REG_OP(Split) *@par Third-party framework compatibility * Compatible with the TensorFlow operator Split. - -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use Split instead. */ REG_OP(SplitD) .INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, @@ -144,9 +141,6 @@ Under the caffe framework, the conversion of slice_point through the cut point t Under the caffe framework,size_splits or axis transformat to split_dim.Only one can effect. *@par Third-party framework compatibility * Compatible with the TensorFlow operator SplitV. - -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use SplitV instead. */ REG_OP(SplitVD) .INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, @@ -164,8 +158,7 @@ REG_OP(SplitVD) * Two inputs, including: * @li values: A list of Tensors. Must be one of the following types: int8, int16, int32, * int64, uint8, uint16, uint32, uint64, float16, float32. -* Tensors to be concatenated. All must have size 1 in the first dimension and same shape. -* It's a dynamic input. +* Tensors to be concatenated. All must have size 1 in the first dimension and same shape. * @li shape: A Tensor of the same type as "x". * The final shape of the result. Should be equal to the shapes of any input * but with the number of input values in the first dimension . \n @@ -314,7 +307,7 @@ REG_OP(Concat) *@par Inputs: * x: A list of N Tensors. Must be one of the following types: int8, int16, int32, -* int64, uint8, uint16, uint32, uint64, float16, float32, bool . It's a dynamic input. \n +* int64, uint8, uint16, uint32, uint64, float16, float32, bool . \n *@par Attributes: *@li axis: A optional int, defaultvalue is 0. @@ -340,7 +333,7 @@ REG_OP(Pack) *@par Inputs: *Two inputs, including: * @li concat_dim: A Tensor of type int32. -* @li x: A list of 1D Tensor objects of type int32 . It's a dynamic input. \n +* @li x: A list of 1D Tensor objects of type int32 . \n *@par Attributes: *N: A required int . \n @@ -364,7 +357,7 @@ REG_OP(ConcatOffset) *@par Inputs: *Two inputs, including: * @li concat_dim: A Tensor of type int32. -* @li x: A list of 1D Tensor objects of type int32 . It's a dynamic input. \n +* @li x: A list of 1D Tensor objects of type int32 . \n *@par Attributes: *@li Concat_dim: A required int. Must be within the rank of input "x". diff --git a/third_party/fwkacllib/inc/ops/transformation_ops.h b/third_party/fwkacllib/inc/ops/transformation_ops.h index 9338a636..eb5ae258 100644 --- a/third_party/fwkacllib/inc/ops/transformation_ops.h +++ b/third_party/fwkacllib/inc/ops/transformation_ops.h @@ -235,12 +235,8 @@ REG_OP(BatchToSpaceND) *@par Outputs: *y: A Tensor with format NC1HWC0. Has the same type as input "x". - *@par Third-party framework compatibility * Compatible with the TensorFlow operator BatchToSpaceND. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use BatchToSpaceND instead. */ REG_OP(BatchToSpaceNDD) .INPUT(x, TensorType::BasicType()) @@ -287,9 +283,6 @@ REG_OP(SpaceToBatchND) *@par Third-party framework compatibility * Compatible with the TensorFlow operator SpaceToBatchND. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use SpaceToBatchND instead. */ REG_OP(SpaceToBatchNDD) .INPUT(x, TensorType::BasicType()) @@ -411,9 +404,6 @@ REG_OP(BatchToSpace) *@par Third-party framework compatibility * Compatible with the TensorFlow operator BatchToSpace. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use BatchToSpace instead. */ REG_OP(BatchToSpaceD) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8, @@ -467,9 +457,6 @@ REG_OP(SpaceToBatch) *y: A Tensor. Has the same type as input "x". *@par Third-party framework compatibility *@ Compatible with the TensorFlow operator SpaceToBatch. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use SpaceToBatch instead. */ REG_OP(SpaceToBatchD) .INPUT(x, TensorType::BasicType()) @@ -598,9 +585,6 @@ REG_OP(ExtractVolumePatches) *@par Outputs: *y: A Tensor. Has the same type as "x". -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use ConfusionTranspose instead. */ REG_OP(ConfusionTransposeD) .INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, diff --git a/third_party/fwkacllib/inc/toolchain/adx_datadump_server.h b/third_party/fwkacllib/inc/toolchain/adx_datadump_server.h index a1c39a51..67adecd9 100644 --- a/third_party/fwkacllib/inc/toolchain/adx_datadump_server.h +++ b/third_party/fwkacllib/inc/toolchain/adx_datadump_server.h @@ -1,12 +1,18 @@ /** -* @file adx_datadump_server.h -* -* Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. -* -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -*/ + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #ifndef ADX_DATADUMP_SERVER_H #define ADX_DATADUMP_SERVER_H