|
- /**
- * Copyright 2019-2020 Huawei Technologies Co., Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
- #ifndef GE_OP_NN_OTHER_OPS_H
- #define GE_OP_NN_OTHER_OPS_H
- #include "../graph/operator_reg.h"
-
- namespace ge {
- REG_OP(Erf)
- .INPUT(x, TensorType::FloatingDataType())
- .OUTPUT(y, TensorType::FloatingDataType())
- .OP_END_FACTORY_REG(Erf)
-
- REG_OP(Erfc)
- .INPUT(x, TensorType::FloatingDataType())
- .OUTPUT(y, TensorType::FloatingDataType())
- .OP_END_FACTORY_REG(Erfc)
-
- /**
- *@brief This operation returns a rank 1 histogram counting the number of entries in `values` \n
- * that fell into every bin.The bins are equal width and determined by the arguments \n
- * 'value_range' and 'nbins'. \n
-
- *@par Inputs:
- *Three inputs, including: \n
- *@li x: A Tensor of type float32,float16,int32.
- *@li range: A Tensor of type float32,float16,int32.
- *@li nbins: A Tensor of type int32.
-
- *@par Attributes:
- * dtype: An optional attribute. Defaults to "int32".
-
- *@par Outputs:
- *y: A Tensor. A Tensor of type int32.
- */
- REG_OP(HistogramFixedWidth)
- .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
- .INPUT(range, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
- .INPUT(nbins, TensorType({DT_INT32}))
- .OUTPUT(y, TensorType({DT_INT32}))
- .ATTR(dtype, String, "int32")
- .OP_END_FACTORY_REG(HistogramFixedWidth)
-
- /**
- *@brief This operation returns a rank 1 histogram counting the number of entries in `values` \n
- * that fell into every bin.The bins are equal width and determined by the arguments \n
- * 'value_range' and 'nbins'. \n
-
- *@par Inputs:
- *Two inputs, including: \n
- *@li x: A Tensor of type float32,float16,int32.
- *@li range: A Tensor of type float32,float16,int32.
-
- *@par Attributes:
- *@li dtype: An optional attribute. Defaults to "int32".
- *@li nbins: A required attribute,the type is int32.
-
- *@par Outputs:
- *y: A Tensor. A Tensor of type int32.
- */
- REG_OP(HistogramFixedWidthD)
- .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
- .INPUT(range, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
- .OUTPUT(y, TensorType({DT_INT32}))
- .REQUIRED_ATTR(nbins, Int)
- .ATTR(dtype, String, "int32")
- .OP_END_FACTORY_REG(HistogramFixedWidthD)
-
- /**
- *@brief Layernorm operator interface implementation
- * calculating: x, gamma, beta
- * mean = np.mean(x, reduce_axis, keepdims=True)
- * variance = np.mean(np.power((x - mean),2), reduce_axis, keepdims=True)
- * y = gamma*((x - mean) / np.sqrt(variance + 0.001)) + beta
-
- *@par Inputs:
- *Three inputs, including:
- * @li x: A Tensor. Must be one of the following types: float16, float32.
- * @li gamma: A Tensor. Must be one of the following types: float16, float32.
- * @li beta: A Tensor. Must be one of the following types: float16, float32.
-
- *@par Attributes:
- * @li begin_norm_axis: A required attribute, the type is int32.
- * @li begin_params_axis: A required attribute,the type is int32.
-
- *@par Outputs:
- *Three outputs, including:
- * @li y: A Tensor. Must be one of the following types: float16, float32.
- * @li mean: A Tensor. Must be one of the following types: float16, float32.
- * @li variance: A Tensor. Must be one of the following types: float16, float32.
- */
- REG_OP(LayerNorm)
- .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16}))
- .INPUT(gamma, TensorType({DT_FLOAT, DT_FLOAT16}))
- .INPUT(beta, TensorType({DT_FLOAT, DT_FLOAT16}))
- .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16}))
- .OUTPUT(mean, TensorType({DT_FLOAT, DT_FLOAT16}))
- .OUTPUT(variance, TensorType({DT_FLOAT, DT_FLOAT16}))
- .ATTR(begin_norm_axis, Int, 0)
- .ATTR(begin_params_axis, Int, 0)
- .OP_END_FACTORY_REG(LayerNorm)
-
- /**
- *@brief LayerNormGrad operator interface implementation
- * calculating: dy, x, variance, mean, gamma
- * pd_xl = data_dy*data_gamma
- * pd_var = np.sum(((-0.5)*pd_xl*(data_x - data_mean)
- * np.power((data_variance + EPSLON), (-1.5))),
- * reduce_axis, keepdims=True)
- * pd_mean = np.sum(((-1.0)*pd_xl
- * np.power((data_variance + EPSLON), (-0.5))),
- * reduce_axis, keepdims=True)
- * + pd_var*(1.0/m)
- * np.sum(((-2.0)*(data_x - data_mean)), reduce_axis, keepdims=True)
- * pd_x = pd_xl*np.power((data_variance + EPSLON), (-0.5)) +
- * pd_var*(2.0/m)*(data_x - data_mean) + pd_mean*(1.0/m)
- * pd_gamma = np.sum((data_dy*(data_x - data_mean)
- * np.power((data_variance + EPSLON), (-0.5))), param_axis, keepdims=True)
- * pd_beta = np.sum(data_dy, param_axis, keepdims=True)
-
- *@par Inputs:
- *Three inputs, including:
- * @li dy: A Tensor. Must be one of the following types: float16, float32.
- * @li x: A Tensor. Must be one of the following types: float16, float32.
- * @li variance: A Tensor. Must be one of the following types: float16, float32.
- * @li mean: A Tensor. Must be one of the following types: float16, float32.
- * @li gamma: A Tensor. Must be one of the following types: float16, float32.
-
- *@par Outputs:
- *Three outputs, including:
- * @li pd_x: A Tensor. Must be one of the following types: float16, float32.
- * @li pd_gamma: A Tensor. Must be one of the following types: float16, float32.
- * @li pd_beta: A Tensor. Must be one of the following types: float16, float32.
- */
- REG_OP(LayerNormGrad)
- .INPUT(dy, TensorType({DT_FLOAT, DT_FLOAT16}))
- .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16}))
- .INPUT(variance, TensorType({DT_FLOAT, DT_FLOAT16}))
- .INPUT(mean, TensorType({DT_FLOAT, DT_FLOAT16}))
- .INPUT(gamma, TensorType({DT_FLOAT, DT_FLOAT16}))
- .OUTPUT(pd_x, TensorType({DT_FLOAT, DT_FLOAT16}))
- .OUTPUT(pd_gamma, TensorType({DT_FLOAT, DT_FLOAT16}))
- .OUTPUT(pd_beta, TensorType({DT_FLOAT, DT_FLOAT16}))
- .OP_END_FACTORY_REG(LayerNormGrad)
-
- /**
- *@brief LayerNormXBackprop operator interface implementation
- * calculating: dy, x, variance, mean, gamma
- * pd_xl = data_dy*data_gamma
- * pd_var = np.sum(((-0.5)*pd_xl*(data_x - data_mean)
- * np.power((data_variance + EPSLON), (-1.5))),
- * reduce_axis, keepdims=True)
- * pd_mean = np.sum(((-1.0)*pd_xl
- * np.power((data_variance + EPSLON), (-0.5))),
- * reduce_axis, keepdims=True)
- * + pd_var*(1.0/m)
- * np.sum(((-2.0)*(data_x - data_mean)), reduce_axis, keepdims=True)
- * pd_x = pd_xl*np.power((data_variance + EPSLON), (-0.5)) +
- * pd_var*(2.0/m)*(data_x - data_mean) + pd_mean*(1.0/m)
- * pd_gamma = np.sum((data_dy*(data_x - data_mean)
- * np.power((data_variance + EPSLON), (-0.5))), param_axis, keepdims=True)
- * pd_beta = np.sum(data_dy, param_axis, keepdims=True)
-
- *@par Inputs:
- *Three inputs, including:
- * @li dy: A Tensor. Must be one of the following types: float16, float32.
- * @li x: A Tensor. Must be one of the following types: float16, float32.
- * @li variance: A Tensor. Must be one of the following types: float16, float32.
- * @li mean: A Tensor. Must be one of the following types: float16, float32.
- * @li gamma: A Tensor. Must be one of the following types: float16, float32.
-
- *@par Outputs:
- *Three outputs, including:
- * @li pd_x: A Tensor. Must be one of the following types: float16, float32.
- */
- REG_OP(LayerNormXBackprop)
- .INPUT(dy, TensorType({DT_FLOAT, DT_FLOAT16}))
- .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16}))
- .INPUT(variance, TensorType({DT_FLOAT, DT_FLOAT16}))
- .INPUT(mean, TensorType({DT_FLOAT, DT_FLOAT16}))
- .INPUT(gamma, TensorType({DT_FLOAT, DT_FLOAT16}))
- .OUTPUT(pd_x, TensorType({DT_FLOAT, DT_FLOAT16}))
- .OP_END_FACTORY_REG(LayerNormXBackprop)
-
- /**
- *@brief LayerNormBetaGammaBackprop operator interface implementation
- * calculating: dy, x, variance, mean
- * pd_xl = data_dy*data_gamma
- * pd_var = np.sum(((-0.5)*pd_xl*(data_x - data_mean)
- * np.power((data_variance + EPSLON), (-1.5))),
- * reduce_axis, keepdims=True)
- * pd_mean = np.sum(((-1.0)*pd_xl
- * np.power((data_variance + EPSLON), (-0.5))),
- * reduce_axis, keepdims=True)
- * + pd_var*(1.0/m)
- * np.sum(((-2.0)*(data_x - data_mean)), reduce_axis, keepdims=True)
- * pd_x = pd_xl*np.power((data_variance + EPSLON), (-0.5)) +
- * pd_var*(2.0/m)*(data_x - data_mean) + pd_mean*(1.0/m)
- * pd_gamma = np.sum((data_dy*(data_x - data_mean)
- * np.power((data_variance + EPSLON), (-0.5))), param_axis, keepdims=True)
- * pd_beta = np.sum(data_dy, param_axis, keepdims=True)
-
- *@par Inputs:
- *Three inputs, including:
- * @li dy: A Tensor. Must be one of the following types: float16, float32.
- * @li x: A Tensor. Must be one of the following types: float16, float32.
- * @li variance: A Tensor. Must be one of the following types: float16, float32.
- * @li mean: A Tensor. Must be one of the following types: float16, float32.
-
- *@par Outputs:
- *Three outputs, including:
- * @li pd_gamma: A Tensor. Must be one of the following types: float16, float32.
- * @li pd_beta: A Tensor. Must be one of the following types: float16, float32.
- */
- REG_OP(LayerNormBetaGammaBackprop)
- .INPUT(dy, TensorType({DT_FLOAT, DT_FLOAT16}))
- .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16}))
- .INPUT(variance, TensorType({DT_FLOAT, DT_FLOAT16}))
- .INPUT(mean, TensorType({DT_FLOAT, DT_FLOAT16}))
- .OUTPUT(pd_gamma, TensorType({DT_FLOAT, DT_FLOAT16}))
- .OUTPUT(pd_beta, TensorType({DT_FLOAT, DT_FLOAT16}))
- .REQUIRED_ATTR(shape_gamma, ListInt)
- .OP_END_FACTORY_REG(LayerNormBetaGammaBackprop)
-
- /**
- *@brief Return "output" according to the algorithm of dropout_do_mask: \n
- * scale_x = x *(1 / keep_prob)
- * output = select(mask == 1, scale_x, 0)
-
- *@par Inputs:
- *Three inputs, including: \n
- * @li x: A mutable Tensor. Must be one of the following types:
- * float16, float32
- * @li mask: A mutable Tensor. Must met all of the following rules:
- * shape of mask should be 1D.
- * dtype of mask should be uint8.
- * value of shape should met the following algorithm:
- * value = (size(x) + 128 - 1) // 128 * 128 //8
- * @li keep_prob: A mutable Tensor. Must met all of the following rules:
- * shape of "keep_prob" should be (1,) or [1,].
- * Has the same type as "x".
-
- *@par Output:
- *y: A mutable Tensor. Has the same type as "x".
- */
- REG_OP(DropOutDoMask)
- .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16}))
- .INPUT(mask, TensorType({DT_UINT8}))
- .INPUT(keep_prob, TensorType({DT_FLOAT, DT_FLOAT16}))
- .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16}))
- .OP_END_FACTORY_REG(DropOutDoMask)
-
- } // namespace ge
-
- #endif // GE_OP_NN_OTHER_OPS_H
|