You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

sdca_ops.h 4.4 kB

4 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file sdca_ops.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_SDCA_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_SDCA_OPS_H_
  22. #include "graph/operator.h"
  23. #include "graph/operator_reg.h"
  24. namespace ge {
  25. /**
  26. *@brief Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for
  27. *linear models with L1 + L2 regularization. As global optimization objective is
  28. *strongly-convex, the optimizer optimizes the dual objective at each step. The
  29. *optimizer applies each update one example at a time. Examples are sampled
  30. *uniformly, and the optimizer is learning rate free and enjoys linear convergence
  31. *rate . \n
  32. *@par Inputs:
  33. *@li sparse_example_indices: a list of vectors which contain example indices.It's a dynamic input.
  34. *@li sparse_feature_indices: a list of vectors which contain feature indices.It's a dynamic input.
  35. *@li sparse_feature_values: a list of vectors which contains feature value associated with each feature group.It's a dynamic input.
  36. *@li dense_features: a list of matrices which contains the dense feature values.It's a dynamic input.
  37. *@li example_weights: a vector which contains the weight associated with each example.
  38. *@li example_labels: a vector which contains the label/target associated with each example.
  39. *@li sparse_indices: a list of vectors where each value is the indices which has
  40. *corresponding weights in sparse_weights. This field maybe omitted for the dense approach.It's a dynamic input.
  41. *@li sparse_weights: a list of vectors where each value is the weight associated with a sparse feature group.
  42. *@li dense_weights: a list of vectors where the values are the weights associated with a dense feature group.It's a dynamic input.
  43. *@li example_state_data: a list of vectors containing the example state data.
  44. *@li loss_type: Type of the primal loss. Currently SdcaSolver supports logistic, squared and hinge losses.
  45. *@li l1: Symmetric l1 regularization strength.
  46. *@li l2: Symmetric l2 regularization strength.
  47. *@li num_loss_partitions: Number of partitions of the global loss function.
  48. *@li num_inner_iterations: Number of iterations per mini-batch . \n
  49. *@par Outputs:
  50. *y: A Returns a list of vectors containing the updated example state
  51. *data.a list of vectors where each value is the delta
  52. *weights associated with a sparse feature group.a list of vectors where the values are the delta
  53. *weights associated with a dense feature group . \n
  54. *@par Third-party framework compatibility
  55. * Compatible with tensorflow SdcaOptimizerV2 operator.
  56. */
  57. REG_OP(SdcaOptimizerV2)
  58. .DYNAMIC_INPUT(sparse_example_indices, TensorType({DT_INT64}))
  59. .DYNAMIC_INPUT(sparse_feature_indices, TensorType({DT_INT64}))
  60. .DYNAMIC_INPUT(sparse_feature_values, TensorType({DT_FLOAT}))
  61. .DYNAMIC_INPUT(dense_features, TensorType({DT_FLOAT}))
  62. .INPUT(example_weights, TensorType({DT_FLOAT}))
  63. .INPUT(example_labels, TensorType({DT_FLOAT}))
  64. .DYNAMIC_INPUT(sparse_indices, TensorType({DT_INT64}))
  65. .DYNAMIC_INPUT(sparse_weights, TensorType({DT_FLOAT}))
  66. .DYNAMIC_INPUT(dense_weights, TensorType({DT_FLOAT}))
  67. .INPUT(example_state_data, TensorType({DT_FLOAT}))
  68. .OUTPUT(out_example_state_data, TensorType({DT_FLOAT}))
  69. .DYNAMIC_OUTPUT(out_delta_sparse_weights, TensorType({DT_FLOAT}))
  70. .DYNAMIC_OUTPUT(out_delta_dense_weights, TensorType({DT_FLOAT}))
  71. .ATTR(adaptive, Bool, false)
  72. .ATTR(num_sparse_features, Int, 0)
  73. .ATTR(num_sparse_features_with_values, Int, 0)
  74. .ATTR(num_dense_features, Int, 0)
  75. .ATTR(num_loss_partitions, Int, 1)
  76. .ATTR(num_inner_iterations, Int, 1)
  77. .ATTR(loss_type, String, "logistic_loss")
  78. .ATTR(l1, Float, 0.5)
  79. .ATTR(l2, Float, 0.5)
  80. .OP_END_FACTORY_REG(SdcaOptimizerV2)
  81. } // namespace ge
  82. #endif // OPS_BUILT_IN_OP_PROTO_INC_SDCA_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示