|
- /**
- * Copyright (c) Huawei Technologies Co., Ltd. 2020-2021. All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
- /*!
- * \file experiment_ops.h
- * \brief
- */
- #ifndef OPS_BUILT_IN_OP_PROTO_INC_EXPERIMENT_OPS_H_
- #define OPS_BUILT_IN_OP_PROTO_INC_EXPERIMENT_OPS_H_
-
- #include "graph/operator_reg.h"
- namespace ge {
- /**
- * @brief Updates "var" according to the AdamW algorithm.
- *
- * @attention Constraints:
- * The input tensors must have the same shape.*
- *
- * @par Inputs:
- * @li var: A mutable Tensor of the type TensorType::NumberType().
- * Should be from a Variable().
- * @li m: A mutable Tensor of the same type as "var".
- * Should be from a Variable().
- * @li v: A mutable Tensor of the same type as "var".
- * Should be from a Variable().
- * @li beta1_power: A scalar of the same type as "var".
- * @li beta2_power: A scalar of the same type as "var".
- * @li lr: learning_rate. A scalar of the same type as "var".
- * @li weight_decay: learning_rate. A scalar of the same type as "var".
- * @li beta1: A scalar of the same type as "var".
- * @li beta2: A scalar of the same type as "var".
- * @li epsilon: A scalar of the same type as "var".
- * @li grad: A Tensor of the same type as "var", for the gradient.
- * @li max_grad_norm: A mutable Tensor of the same type as "var", an optional input.
- * Should be from a Variable().
- *
- * @par Attributes:
- * @li amsgrad: An optional bool. Defaults to "False".
- * If "True", max_grad_norm input and output must be entered.
- * @li maximize: An optional bool. Defaults to "False".
- *
- * @par Outputs:
- * @li var: A mutable tensor. Has the same type as input "var".
- * @li m: A mutable tensor. Has the same type as input "m".
- * @li v: A mutable tensor. Has the same type as input "v". \n
- */
- REG_OP(ApplyAdamW)
- .INPUT(var, TensorType::NumberType())
- .INPUT(m, TensorType::NumberType())
- .INPUT(v, TensorType::NumberType())
- .INPUT(beta1_power, TensorType::NumberType())
- .INPUT(beta2_power, TensorType::NumberType())
- .INPUT(lr, TensorType::NumberType())
- .INPUT(weight_decay, TensorType::NumberType())
- .INPUT(beta1, TensorType::NumberType())
- .INPUT(beta2, TensorType::NumberType())
- .INPUT(epsilon, TensorType::NumberType())
- .INPUT(grad, TensorType::NumberType())
- .OPTIONAL_INPUT(max_grad_norm, TensorType::NumberType())
- .OUTPUT(var, TensorType::NumberType())
- .OUTPUT(m, TensorType::NumberType())
- .OUTPUT(v, TensorType::NumberType())
- .ATTR(amsgrad, Bool, false)
- .ATTR(maximize, Bool, false)
- .OP_END_FACTORY_REG(ApplyAdamW)
-
- /**
- * @brief Multiplies matrix "a" by matrix "b", producing "a * b". \n
- * @par Inputs:
- * Four inputs, including:
- * @li x1: A matrix Tensor. Must be one of the following types: float32,
- * float16, int32, int8, int4, bf16. 3D. Has format ND.
- * @li x2: A matrix Tensor. Must be one of the following types: float32,
- * float16, int32, int8, int4, bf16. 3D. Has format ND.
- * @li bias: A optional Tensor. Must be one of the following types:
- * float32, float16, int32, bf16. 1D. Has format ND.
- * @li offset_w: A optional Tensor. Must be one of the following types:
- * int8, int4. Has format ND. \n
-
- * @par Attributes:
- * Three attributes, including:
- * @li perm_x1: A list int. "x1" is permuted to shape [B, M, K] before multiplication.
- * @li perm_x2: A list int. "x2" is permuted to shape [B, K, N] before multiplication.
- * @li perm_y: A list int. "y" is permuted after multiplication.
- * @li offset_x: An optional integer for quantized TransposeBatchMatMul.
- * The negative offset added to the input "x1" for int8, int4 type. Ensure offset_x
- * within the effective range of input data type. Defaults to "0". \n
-
- * @par Outputs:
- * y: The result matrix Tensor. 3D. Must be one of the following
- * types: float32, float16, int32, bf16. 3D. Has format ND. \n
- */
- REG_OP(TransposeBatchMatMul)
- .INPUT(x1, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8, DT_INT4, DT_BF16}))
- .INPUT(x2, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8, DT_INT4, DT_BF16}))
- .OPTIONAL_INPUT(bias, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_BF16}))
- .OPTIONAL_INPUT(offset_w, TensorType({DT_INT8, DT_INT4}))
- .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_BF16}))
- .ATTR(perm_x1, ListInt, {})
- .ATTR(perm_x2, ListInt, {})
- .ATTR(perm_y, ListInt, {})
- .ATTR(offset_x, Int, 0)
- .OP_END_FACTORY_REG(TransposeBatchMatMul)
-
- /**
- * @brief Performs non-maximum suppression (NMS) on the rotated boxes according
- * to their intersection-over-union (IoU). Rotated NMS interatively removes lower
- * scoring rotated boxes which have an IoU greater than iou_threshold with
- * another (higher scoring) rotated box.
-
- * @par Inputs:
- * Three inputs, including:
- * @li boxes: A 2D Tensor of float16 or float32 with shape (N, 5). Rotated boxes to
- * perform NMS on. They are expected to be in (x1, y1, x2, y2, angle_degress) format.
- * @li scores: A 1D Tensor of float16 or float32 with shape (N). Scores for each one of
- * the rotated boxes.
- * @li labels: A 1D Tensor of int32 or int64 with shape (N). Labels for each one of
- * the rotated boxes.
-
- * @par Attributes:
- * iou_threshold: A required float attribute. Discards all overlapping rotated
- * boxes with IoU < iou_threshold.
-
- * @par Outputs:
- * Two outputs, including:
- * @li selected_detections: A 2D Tensor of float16 or float32 with shape (N, 5).
- * The selected boxes that kept by Rotated NMS, sorted in decreasing order of scores.
- * @li keep_indices: A 1D Tensor of int32 or int64 with shape (N). The indices of
- * selected_detections.
-
- * @attention Constraints:
- * Currently, the tensor type of input (boxes, scores) only support float.
- * The tensor type of keep_indices only support int32.
- */
- REG_OP(RotatedNMS)
- .INPUT(boxes, TensorType({DT_FLOAT16, DT_FLOAT}))
- .INPUT(scores, TensorType({DT_FLOAT16, DT_FLOAT}))
- .INPUT(labels, TensorType({DT_INT32, DT_INT64}))
- .OUTPUT(selected_detections, TensorType({DT_FLOAT16, DT_FLOAT}))
- .OUTPUT(keep_indices, TensorType({DT_INT32, DT_INT64}))
- .REQUIRED_ATTR(iou_threshold, Float)
- .OP_END_FACTORY_REG(RotatedNMS)
- } // namespace ge
-
- #endif // OPS_BUILT_IN_OP_PROTO_INC_EXPERIMENT_OPS_H_
|