From 3dfd2119c1317dcd08483d9b91310092c306b5fb Mon Sep 17 00:00:00 2001 From: yanghaoran Date: Wed, 28 Jul 2021 16:04:35 +0800 Subject: [PATCH] sync code 0728 --- CMakeLists.txt | 3 +- cmake/external_libs/json.cmake | 4 - ge/ge_runtime/CMakeLists.txt | 1 + ge/ge_runtime/task/hccl_task.cc | 16 +- ge/ge_runtime/task/label_goto_task.cc | 56 +- ge/ge_runtime/task/label_goto_task.h | 16 +- ge/ge_runtime/task/label_manager.cc | 119 + ge/ge_runtime/task/label_manager.h | 54 + ge/ge_runtime/task/label_switch_task.cc | 25 +- ge/ge_runtime/task/label_switch_task.h | 6 +- inc/external/acl/acl.h | 82 + inc/external/acl/acl_base.h | 638 +++++ inc/external/acl/acl_mdl.h | 1225 ++++++++++ inc/external/acl/acl_op.h | 504 ++++ inc/external/acl/acl_op_compiler.h | 121 + inc/external/acl/acl_prof.h | 329 +++ inc/external/acl/acl_rt.h | 958 ++++++++ inc/external/acl/acl_tdt.h | 276 +++ inc/external/acl/error_codes/ge_error_codes.h | 75 + inc/external/acl/error_codes/rt_error_codes.h | 109 + inc/external/acl/ops/acl_cblas.h | 334 +++ inc/external/acl/ops/acl_dvpp.h | 2568 ++++++++++++++++++++ inc/external/acl/ops/acl_fv.h | 348 +++ inc/external/hccl/hccl.h | 159 ++ inc/external/hccl/hccl_types.h | 101 + inc/external/runtime/rt_error_codes.h | 109 + inc/framework/ge_runtime/task_info.h | 5 +- metadef | 2 +- scripts/format_source_code.sh | 107 + third_party/fwkacllib/inc/cce/taskdown_common.hpp | 19 +- .../inc/external/runtime/rt_error_codes.h | 0 third_party/fwkacllib/inc/hccl/base.h | 36 +- third_party/fwkacllib/inc/hccl/hccl_types.h | 101 - third_party/fwkacllib/inc/hccl/hcom.h | 14 + third_party/fwkacllib/inc/mmpa/mmpa_api.h | 1 + .../fwkacllib/inc/mmpa/sub_inc/mmpa_linux.h | 4 + third_party/fwkacllib/inc/mmpa/sub_inc/mmpa_win.h | 4 + third_party/fwkacllib/inc/ops/aipp.h | 4 +- third_party/fwkacllib/inc/ops/all_ops.h | 3 +- third_party/fwkacllib/inc/ops/array_ops.h | 104 +- third_party/fwkacllib/inc/ops/audio_ops.h | 2 +- third_party/fwkacllib/inc/ops/avg_pool_1d_ops.h | 58 + third_party/fwkacllib/inc/ops/batch_ops.h | 21 +- third_party/fwkacllib/inc/ops/bitwise_ops.h | 31 +- third_party/fwkacllib/inc/ops/boosted_trees_ops.h | 2 +- .../fwkacllib/inc/ops/candidate_sampling_ops.h | 2 +- third_party/fwkacllib/inc/ops/condtake_ops.h | 2 +- third_party/fwkacllib/inc/ops/control_flow_ops.h | 12 +- third_party/fwkacllib/inc/ops/correlation.h | 52 + third_party/fwkacllib/inc/ops/ctc_ops.h | 83 +- third_party/fwkacllib/inc/ops/data_flow_ops.h | 89 +- .../fwkacllib/inc/ops/elewise_calculation_ops.h | 527 +++- third_party/fwkacllib/inc/ops/functional_ops.h | 2 +- third_party/fwkacllib/inc/ops/get_data_ops.h | 2 +- third_party/fwkacllib/inc/ops/globalavgpool.h | 49 + third_party/fwkacllib/inc/ops/hcom_ops.h | 135 +- third_party/fwkacllib/inc/ops/hvd_ops.h | 2 +- third_party/fwkacllib/inc/ops/image_ops.h | 653 ++++- third_party/fwkacllib/inc/ops/internal_ops.h | 2 +- third_party/fwkacllib/inc/ops/linalg_ops.h | 138 +- third_party/fwkacllib/inc/ops/list_ops.h | 504 ++++ third_party/fwkacllib/inc/ops/logging_ops.h | 2 +- third_party/fwkacllib/inc/ops/lookup_ops.h | 2 +- third_party/fwkacllib/inc/ops/math_ops.h | 283 ++- .../fwkacllib/inc/ops/matrix_calculation_ops.h | 336 ++- third_party/fwkacllib/inc/ops/nn_batch_norm_ops.h | 134 +- third_party/fwkacllib/inc/ops/nn_calculation_ops.h | 448 +++- third_party/fwkacllib/inc/ops/nn_detect_ops.h | 602 +++-- third_party/fwkacllib/inc/ops/nn_norm_ops.h | 777 +++++- third_party/fwkacllib/inc/ops/nn_ops.h | 141 +- third_party/fwkacllib/inc/ops/nn_pooling_ops.h | 488 +++- third_party/fwkacllib/inc/ops/nn_training_ops.h | 51 +- third_party/fwkacllib/inc/ops/no_op.h | 2 +- third_party/fwkacllib/inc/ops/nonlinear_fuc_ops.h | 408 +++- third_party/fwkacllib/inc/ops/npu_loss_scale_ops.h | 2 +- third_party/fwkacllib/inc/ops/outfeed_ops.h | 2 +- third_party/fwkacllib/inc/ops/pad_ops.h | 160 +- third_party/fwkacllib/inc/ops/parsing_ops.h | 242 +- third_party/fwkacllib/inc/ops/quantize_ops.h | 31 +- third_party/fwkacllib/inc/ops/ragged_array_ops.h | 2 +- .../fwkacllib/inc/ops/ragged_conversion_ops.h | 2 +- third_party/fwkacllib/inc/ops/ragged_math_ops.h | 2 +- third_party/fwkacllib/inc/ops/random_ops.h | 91 +- third_party/fwkacllib/inc/ops/reduce_ops.h | 279 ++- .../fwkacllib/inc/ops/resource_variable_ops.h | 2 +- third_party/fwkacllib/inc/ops/rnn.h | 595 ++++- third_party/fwkacllib/inc/ops/rpn_ops.h | 2 +- third_party/fwkacllib/inc/ops/save_ops.h | 2 +- third_party/fwkacllib/inc/ops/sdca_ops.h | 2 +- third_party/fwkacllib/inc/ops/selection_ops.h | 426 +++- third_party/fwkacllib/inc/ops/set_ops.h | 2 +- third_party/fwkacllib/inc/ops/sparse_ops.h | 8 +- third_party/fwkacllib/inc/ops/spectral_ops.h | 98 +- .../fwkacllib/inc/ops/split_combination_ops.h | 26 +- third_party/fwkacllib/inc/ops/state_ops.h | 2 +- .../fwkacllib/inc/ops/stateful_random_ops.h | 2 +- .../fwkacllib/inc/ops/stateless_random_ops.h | 2 +- third_party/fwkacllib/inc/ops/string_ops.h | 382 ++- third_party/fwkacllib/inc/ops/swap_co_ops.h | 2 +- .../fwkacllib/inc/ops/target_crop_and_resize.h | 2 +- third_party/fwkacllib/inc/ops/transformation_ops.h | 271 ++- .../fwkacllib/inc/ops/warp_perspective_ops.h | 2 +- third_party/fwkacllib/inc/runtime/event.h | 5 + third_party/fwkacllib/inc/runtime/rt.h | 1 + third_party/fwkacllib/inc/runtime/rt_stars.h | 85 + third_party/fwkacllib/inc/tdt/tsd_client.h | 82 - .../fwkacllib/inc/toolchain/adx_datadump_server.h | 22 +- third_party/fwkacllib/inc/toolchain/prof_acl_api.h | 208 +- .../fwkacllib/inc/toolchain/prof_mgr_core.h | 9 + .../fwkacllib/inc/toolchain/prof_reporter.h | 70 +- third_party/prebuild/aarch64/libalog.so | Bin 223920 -> 225280 bytes third_party/prebuild/aarch64/liberror_manager.so | Bin 888880 -> 1159216 bytes third_party/prebuild/aarch64/libmmpa.a | Bin 63182 -> 62550 bytes third_party/prebuild/x86_64/libalog.so | Bin 164208 -> 173984 bytes third_party/prebuild/x86_64/liberror_manager.so | Bin 852544 -> 1168920 bytes third_party/prebuild/x86_64/libmmpa.a | Bin 57270 -> 56998 bytes 116 files changed, 16672 insertions(+), 1133 deletions(-) create mode 100644 ge/ge_runtime/task/label_manager.cc create mode 100644 ge/ge_runtime/task/label_manager.h create mode 100644 inc/external/acl/acl.h create mode 100644 inc/external/acl/acl_base.h create mode 100644 inc/external/acl/acl_mdl.h create mode 100644 inc/external/acl/acl_op.h create mode 100644 inc/external/acl/acl_op_compiler.h create mode 100644 inc/external/acl/acl_prof.h create mode 100644 inc/external/acl/acl_rt.h create mode 100644 inc/external/acl/acl_tdt.h create mode 100644 inc/external/acl/error_codes/ge_error_codes.h create mode 100644 inc/external/acl/error_codes/rt_error_codes.h create mode 100644 inc/external/acl/ops/acl_cblas.h create mode 100644 inc/external/acl/ops/acl_dvpp.h create mode 100644 inc/external/acl/ops/acl_fv.h create mode 100644 inc/external/hccl/hccl.h create mode 100644 inc/external/hccl/hccl_types.h create mode 100644 inc/external/runtime/rt_error_codes.h create mode 100755 scripts/format_source_code.sh mode change 100755 => 100644 third_party/fwkacllib/inc/external/runtime/rt_error_codes.h delete mode 100644 third_party/fwkacllib/inc/hccl/hccl_types.h create mode 100644 third_party/fwkacllib/inc/ops/avg_pool_1d_ops.h create mode 100644 third_party/fwkacllib/inc/ops/correlation.h create mode 100644 third_party/fwkacllib/inc/ops/globalavgpool.h create mode 100644 third_party/fwkacllib/inc/ops/list_ops.h create mode 100644 third_party/fwkacllib/inc/runtime/rt_stars.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 60509838..5e58eeba 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -125,7 +125,6 @@ else () message(STATUS "PLATFORM param is invalid, should be train or inference, you choose nothing!") endif() endif() - set(METADEF_DIR ${CMAKE_CURRENT_LIST_DIR}/metadef) set(PARSER_DIR ${CMAKE_CURRENT_LIST_DIR}/parser) set(GE_DEPEND_DIR ${CMAKE_CURRENT_LIST_DIR}/..) @@ -158,6 +157,7 @@ else () elseif(ENABLE_MS_TESTCASES) include(cmake/external_libs/protobuf_static.cmake) include(cmake/external_libs/protoc.cmake) + include(cmake/external_libs/json.cmake) include(cmake/external_libs/securec.cmake) include(cmake/FindModule.cmake) include(cmake/intf_pub_linux.cmake) @@ -175,5 +175,4 @@ else () endif() add_subdirectory(ge) - endif () diff --git a/cmake/external_libs/json.cmake b/cmake/external_libs/json.cmake index 3c1cd012..04659ebc 100755 --- a/cmake/external_libs/json.cmake +++ b/cmake/external_libs/json.cmake @@ -9,10 +9,6 @@ if (GE_PB_PKG) set(REQ_URL "${GE_PB_PKG}/libs/ge_nlohmann_json/include.zip") set(MD5 "0dc903888211db3a0f170304cd9f3a89") set(JSON_INCLUDE_DIR ${JSON_SRC_DIR}) -#elseif (ENABLE_GITEE) -# set(REQ_URL "https://gitee.com/mirrors/JSON-for-Modern-CPP/repository/archive/v3.6.1.zip") -# set(MD5 "5bda78ce308e6cfcf614dcf1d5ff27a7") -#set(JSON_INCLUDE_DIR "${JSON_SRC_DIR}/include") else() set(REQ_URL "https://github.com/nlohmann/json/releases/download/v3.6.1/include.zip") set(MD5 "0dc903888211db3a0f170304cd9f3a89") diff --git a/ge/ge_runtime/CMakeLists.txt b/ge/ge_runtime/CMakeLists.txt index 3243766f..ffea784b 100644 --- a/ge/ge_runtime/CMakeLists.txt +++ b/ge/ge_runtime/CMakeLists.txt @@ -16,6 +16,7 @@ set(GE_SRC_LIST "task/label_goto_task.cc" "task/label_set_task.cc" "task/label_switch_task.cc" + "task/label_manager.cc" ) add_library(ge_runtime SHARED ${GE_SRC_LIST}) diff --git a/ge/ge_runtime/task/hccl_task.cc b/ge/ge_runtime/task/hccl_task.cc index b1c7158c..bfe0d0f3 100644 --- a/ge/ge_runtime/task/hccl_task.cc +++ b/ge/ge_runtime/task/hccl_task.cc @@ -53,15 +53,7 @@ HcclTask::HcclTask(const ModelContext &model_context, const std::shared_ptrworkspace_size() > 0) { - rtError_t rt_ret = rtMalloc(&workspace_mem_, task_info_->workspace_size(), RT_MEMORYINFO_HBM); - if (rt_ret != RT_ERROR_NONE) { - GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret); - return false; - } + workspace_mem_ = task_info_->workspace_addr(); } GELOGI("HcclTaskInfo Distribute Start. begin to call function LoadTask in hccl."); diff --git a/ge/ge_runtime/task/label_goto_task.cc b/ge/ge_runtime/task/label_goto_task.cc index 7cb6d556..a3b70971 100644 --- a/ge/ge_runtime/task/label_goto_task.cc +++ b/ge/ge_runtime/task/label_goto_task.cc @@ -16,33 +16,46 @@ #include "ge_runtime/task/label_goto_task.h" #include "ge_runtime/task/task_factory.h" -#include "framework/common/util.h" namespace ge { namespace model_runner { LabelGotoTask::LabelGotoTask(const ModelContext &model_context, const std::shared_ptr &task_info) - : TaskRepeater(model_context, task_info), task_info_(task_info) { + : TaskRepeater(model_context, task_info), + task_info_(task_info), + stream_(nullptr), + index_value_(nullptr) { if (task_info_ == nullptr) { GELOGW("task_info_ is null!"); return; } auto stream_list = model_context.stream_list(); auto label_list = model_context.label_list(); + rt_model_handle_ = model_context.rt_model_handle(); uint32_t stream_id = task_info->stream_id(); - uint32_t label_id = task_info->label_id(); + label_id_ = task_info->label_id(); GELOGI("Stream list size:%zu, stream id:%u.", stream_list.size(), stream_id); - GELOGI("Label list size:%zu, label id:%u.", label_list.size(), label_id); - if (stream_id >= stream_list.size() || label_id >= label_list.size()) { + GELOGI("Label list size:%zu, label id:%u.", label_list.size(), label_id_); + if (stream_id >= stream_list.size() || label_id_ >= label_list.size()) { GELOGW("Stream/Label id invalid."); return; } stream_ = stream_list[stream_id]; - label_ = label_list[label_id]; + label_manager_ = LabelManager::GetInstance(); + if (label_manager_ == nullptr) { + GELOGW("Get label manager instance failed."); + return; + } + label_info_ = label_manager_->GetLabelInfo(rt_model_handle_, {label_id_}, label_list); } LabelGotoTask::~LabelGotoTask() { - GE_FREE_RT_LOG(label_info_); - GE_FREE_RT_LOG(index_value_); + if (index_value_ != nullptr) { + rtError_t rt_ret = rtFree(index_value_); + if (rt_ret != RT_ERROR_NONE) { + GELOGE(RT_FAILED, "rtFree index_value_ failed! ret: 0x%X.", rt_ret); + } + index_value_ = nullptr; + } } bool LabelGotoTask::Distribute() { @@ -94,21 +107,34 @@ bool LabelGotoTask::CheckParamValid() { return false; } - if (label_ == nullptr) { - GELOGE(PARAM_INVALID, "label is null!"); + if (label_info_ == nullptr) { + GELOGE(PARAM_INVALID, "label info is null!"); return false; } - if (label_info_ != nullptr) { - GELOGE(PARAM_INVALID, "label_info_ has dirty data."); - return false; + if (index_value_ == nullptr) { + rtError_t rt_ret = rtMalloc(&index_value_, sizeof(uint64_t), RT_MEMORY_HBM); + if (rt_ret != RT_ERROR_NONE) { + GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret); + return false; + } + + uint64_t index = 0; + rt_ret = rtMemcpy(index_value_, sizeof(uint64_t), &index, sizeof(index), RT_MEMCPY_HOST_TO_DEVICE); + if (rt_ret != RT_ERROR_NONE) { + GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret); + return false; + } } - if (index_value_ != nullptr) { - GELOGE(PARAM_INVALID, "index_value_ has dirty data."); + void *label_info = label_info_->GetLabelInfo(); + rtError_t rt_ret = rtLabelSwitchByIndex(index_value_, 1, label_info, stream_); + if (rt_ret != RT_ERROR_NONE) { + GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret); return false; } + GELOGI("DistributeTask end."); return true; } diff --git a/ge/ge_runtime/task/label_goto_task.h b/ge/ge_runtime/task/label_goto_task.h index addbb700..e579c683 100644 --- a/ge/ge_runtime/task/label_goto_task.h +++ b/ge/ge_runtime/task/label_goto_task.h @@ -18,7 +18,11 @@ #define GE_GE_RUNTIME_TASK_LABEL_GOTO_TASK_H_ #include +#include +#include +#include #include "ge_runtime/task/task.h" +#include "ge_runtime/task/label_manager.h" namespace ge { namespace model_runner { @@ -31,13 +35,13 @@ class LabelGotoTask : public TaskRepeater { bool Distribute() override; private: - bool CheckParamValid(); - std::shared_ptr task_info_; - void *stream_{nullptr}; - void *label_{nullptr}; - void *label_info_{nullptr}; - void *index_value_{nullptr}; + void *stream_; + std::shared_ptr label_info_; + void *index_value_; + uint32_t label_id_; + rtModel_t rt_model_handle_; + std::shared_ptr label_manager_; }; } // namespace model_runner } // namespace ge diff --git a/ge/ge_runtime/task/label_manager.cc b/ge/ge_runtime/task/label_manager.cc new file mode 100644 index 00000000..a2b0c3aa --- /dev/null +++ b/ge/ge_runtime/task/label_manager.cc @@ -0,0 +1,119 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "ge_runtime/task/label_manager.h" +#include +#include +#include "runtime/mem.h" +#include "runtime/rt_model.h" +#include "common/ge_inner_error_codes.h" +#include "framework/common/debug/ge_log.h" + +namespace ge { +namespace model_runner { +std::weak_ptr LabelManager::instance_; +std::mutex LabelManager::instance_mutex_; + +template +static std::string GetVectorString(const std::vector &vec) { + std::string ret; + for (size_t i = 0; i < vec.size(); ++i) { + if (i != 0) { + ret.push_back(','); + } + ret += std::to_string(vec[i]); + } + return ret; +} + +LabelGuard::~LabelGuard() { + void *label_info = GetLabelInfo(); + if (label_info != nullptr) { + rtError_t rt_ret = rtFree(label_info); + if (rt_ret != RT_ERROR_NONE) { + GELOGE(RT_FAILED, "rtFree label_info failed! ret: 0x%X.", rt_ret); + } + } +} + +std::shared_ptr LabelManager::GetInstance() { + std::lock_guard lock(instance_mutex_); + auto instance = instance_.lock(); + if (instance != nullptr) { + return instance; + } + + instance = std::make_shared(); + instance_ = instance; + return instance; +} + +std::shared_ptr LabelManager::GetLabelInfo(rtModel_t model, const std::vector &label_ids, + const std::vector &all_label) { + std::lock_guard lock(model_info_mapping_mutex_); + rtError_t rt_ret; + auto model_iter = model_info_mapping_.find(model); + if (model_iter == model_info_mapping_.end()) { + model_info_mapping_.emplace(model, std::map>()); + model_iter = model_info_mapping_.find(model); + } + + std::string label_id_str = GetVectorString(label_ids); + auto &label_map = model_iter->second; + auto label_iter = label_map.find(label_id_str); + if (label_iter != label_map.end()) { + auto label_guard = label_iter->second.lock(); + if (label_guard != nullptr) { + GELOGI("model %p find same label id %s.", model, label_id_str.c_str()); + return label_guard; + } + } + + GELOGI("Alloc label id %s for model %p.", label_id_str.c_str(), model); + void *label_info; + std::vector label_list; + bool status = true; + std::transform(label_ids.begin(), label_ids.end(), std::back_inserter(label_list), + [&all_label, &status](uint32_t idx) -> void * { + if (idx >= all_label.size()) { + GELOGE(PARAM_INVALID, "Invalid label id %u, all label list size %zu.", idx, all_label.size()); + status = false; + return nullptr; + } + return all_label[idx]; + }); + if (!status) { + GELOGE(PARAM_INVALID, "Get label info failed."); + return nullptr; + } + uint32_t label_info_size = sizeof(rtLabelDevInfo) * label_list.size(); + rt_ret = rtMalloc(&label_info, label_info_size, RT_MEMORY_HBM); + if (rt_ret != RT_ERROR_NONE) { + GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret); + return nullptr; + } + + rt_ret = rtLabelListCpy(label_list.data(), label_list.size(), label_info, label_info_size); + if (rt_ret != RT_ERROR_NONE) { + GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret); + return nullptr; + } + + auto label_guard = std::make_shared(label_info); + label_map.emplace(label_id_str, label_guard); + return label_guard; +} +} // namespace model_runner +} // namespace ge diff --git a/ge/ge_runtime/task/label_manager.h b/ge/ge_runtime/task/label_manager.h new file mode 100644 index 00000000..f2c42c29 --- /dev/null +++ b/ge/ge_runtime/task/label_manager.h @@ -0,0 +1,54 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef GE_GE_RUNTIME_TASK_LABEL_MANAGER_H_ +#define GE_GE_RUNTIME_TASK_LABEL_MANAGER_H_ + +#include +#include +#include +#include +#include + +namespace ge { +namespace model_runner { +class LabelGuard { + public: + explicit LabelGuard(void *label_info) : label_info_(reinterpret_cast(label_info)) {} + ~LabelGuard(); + void *GetLabelInfo() { return reinterpret_cast(label_info_); } + + private: + uintptr_t label_info_; +}; + +class LabelManager { + public: + static std::shared_ptr GetInstance(); + std::shared_ptr GetLabelInfo(rtModel_t model, const std::vector &label_ids, + const std::vector &all_label); + + private: + std::mutex model_info_mapping_mutex_; + std::map>> model_info_mapping_; + + static std::weak_ptr instance_; + static std::mutex instance_mutex_; +}; + + +} // namespace model_runner +} // namespace ge +#endif // GE_GE_RUNTIME_TASK_LABEL_MANAGER_H_ \ No newline at end of file diff --git a/ge/ge_runtime/task/label_switch_task.cc b/ge/ge_runtime/task/label_switch_task.cc index 8c795da9..cde278d9 100644 --- a/ge/ge_runtime/task/label_switch_task.cc +++ b/ge/ge_runtime/task/label_switch_task.cc @@ -24,14 +24,14 @@ LabelSwitchTask::LabelSwitchTask(const ModelContext &model_context, : TaskRepeater(model_context, task_info), task_info_(task_info), stream_(nullptr), - all_label_resource_(), label_info_(nullptr) { if (task_info_ == nullptr) { GELOGW("task_info_ is null!"); return; } - all_label_resource_ = model_context.label_list(); + rt_model_handle_ = model_context.rt_model_handle(); + auto all_label_resource = model_context.label_list(); auto stream_list = model_context.stream_list(); uint32_t stream_id = task_info->stream_id(); GELOGI("Stream list size:%zu, stream id:%u.", stream_list.size(), stream_id); @@ -40,18 +40,16 @@ LabelSwitchTask::LabelSwitchTask(const ModelContext &model_context, return; } stream_ = stream_list[stream_id]; -} - -LabelSwitchTask::~LabelSwitchTask() { - if (label_info_ != nullptr) { - rtError_t rt_ret = rtFree(label_info_); - if (rt_ret != RT_ERROR_NONE) { - GELOGE(RT_FAILED, "rtFree fwkOpBuf failed! ret: 0x%X.", rt_ret); - } - label_info_ = nullptr; + label_manager_ = LabelManager::GetInstance(); + if (label_manager_ == nullptr) { + GELOGW("Get label manager instance failed."); + return; } + label_info_ = label_manager_->GetLabelInfo(rt_model_handle_, task_info_->label_list(), all_label_resource); } +LabelSwitchTask::~LabelSwitchTask() {} + bool LabelSwitchTask::Distribute() { GELOGI("LabelSwitchTask Distribute start."); if (!CheckParamValid()) { @@ -117,8 +115,8 @@ bool LabelSwitchTask::CheckParamValid() { return false; } - if (label_info_ != nullptr) { - GELOGE(PARAM_INVALID, "label_info_ has dirty data."); + if (label_info_ == nullptr) { + GELOGE(PARAM_INVALID, "CopyLabelList failed, label info is null."); return false; } @@ -126,6 +124,5 @@ bool LabelSwitchTask::CheckParamValid() { } REGISTER_TASK(TaskInfoType::LABEL_SWITCH, LabelSwitchTask, LabelSwitchTaskInfo); - } // namespace model_runner } // namespace ge diff --git a/ge/ge_runtime/task/label_switch_task.h b/ge/ge_runtime/task/label_switch_task.h index 463faa31..cfa6877c 100644 --- a/ge/ge_runtime/task/label_switch_task.h +++ b/ge/ge_runtime/task/label_switch_task.h @@ -19,6 +19,7 @@ #include #include "ge_runtime/task/task.h" +#include "ge_runtime/task/label_manager.h" namespace ge { namespace model_runner { @@ -35,8 +36,9 @@ class LabelSwitchTask : public TaskRepeater { std::shared_ptr task_info_; void *stream_; - std::vector all_label_resource_; - void *label_info_; + rtModel_t rt_model_handle_; + std::shared_ptr label_info_; + std::shared_ptr label_manager_; }; } // namespace model_runner } // namespace ge diff --git a/inc/external/acl/acl.h b/inc/external/acl/acl.h new file mode 100644 index 00000000..8d261201 --- /dev/null +++ b/inc/external/acl/acl.h @@ -0,0 +1,82 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef INC_EXTERNAL_ACL_ACL_H_ +#define INC_EXTERNAL_ACL_ACL_H_ + +#include "acl_rt.h" +#include "acl_op.h" +#include "acl_mdl.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// Current version is 1.0.0 +#define ACL_MAJOR_VERSION 1 +#define ACL_MINOR_VERSION 0 +#define ACL_PATCH_VERSION 0 + +/** + * @ingroup AscendCL + * @brief acl initialize + * + * @par Restriction + * The aclInit interface can be called only once in a process + * @param configPath [IN] the config path,it can be NULL + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclInit(const char *configPath); + +/** + * @ingroup AscendCL + * @brief acl finalize + * + * @par Restriction + * Need to call aclFinalize before the process exits. + * After calling aclFinalize,the services cannot continue to be used normally. + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclFinalize(); + +/** + * @ingroup AscendCL + * @brief query ACL interface version + * + * @param majorVersion[OUT] ACL interface major version + * @param minorVersion[OUT] ACL interface minor version + * @param patchVersion[OUT] ACL interface patch version + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtGetVersion(int32_t *majorVersion, int32_t *minorVersion, int32_t *patchVersion); + +/** + * @ingroup AscendCL + * @brief get recent error message + * + * @retval null for failed + * @retval OtherValues success + */ +ACL_FUNC_VISIBILITY const char *aclGetRecentErrMsg(); + +#ifdef __cplusplus +} +#endif + +#endif // INC_EXTERNAL_ACL_ACL_H_ diff --git a/inc/external/acl/acl_base.h b/inc/external/acl/acl_base.h new file mode 100644 index 00000000..64d4bd81 --- /dev/null +++ b/inc/external/acl/acl_base.h @@ -0,0 +1,638 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef INC_EXTERNAL_ACL_ACL_BASE_H_ +#define INC_EXTERNAL_ACL_ACL_BASE_H_ + +#include +#include +#include "error_codes/rt_error_codes.h" +#include "error_codes/ge_error_codes.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(_MSC_VER) +#ifdef FUNC_VISIBILITY +#define ACL_FUNC_VISIBILITY _declspec(dllexport) +#else +#define ACL_FUNC_VISIBILITY +#endif +#else +#ifdef FUNC_VISIBILITY +#define ACL_FUNC_VISIBILITY __attribute__((visibility("default"))) +#else +#define ACL_FUNC_VISIBILITY +#endif +#endif + +#ifdef __GNUC__ +#define ACL_DEPRECATED __attribute__((deprecated)) +#define ACL_DEPRECATED_MESSAGE(message) __attribute__((deprecated(message))) +#elif defined(_MSC_VER) +#define ACL_DEPRECATED __declspec(deprecated) +#define ACL_DEPRECATED_MESSAGE(message) __declspec(deprecated(message)) +#else +#define ACL_DEPRECATED +#define ACL_DEPRECATED_MESSAGE(message) +#endif + +typedef void *aclrtStream; +typedef void *aclrtEvent; +typedef void *aclrtContext; +typedef int aclError; +typedef uint16_t aclFloat16; +typedef struct aclDataBuffer aclDataBuffer; +typedef struct aclTensorDesc aclTensorDesc; + +static const int ACL_ERROR_NONE = 0; +static const int ACL_SUCCESS = 0; + +static const int ACL_ERROR_INVALID_PARAM = 100000; +static const int ACL_ERROR_UNINITIALIZE = 100001; +static const int ACL_ERROR_REPEAT_INITIALIZE = 100002; +static const int ACL_ERROR_INVALID_FILE = 100003; +static const int ACL_ERROR_WRITE_FILE = 100004; +static const int ACL_ERROR_INVALID_FILE_SIZE = 100005; +static const int ACL_ERROR_PARSE_FILE = 100006; +static const int ACL_ERROR_FILE_MISSING_ATTR = 100007; +static const int ACL_ERROR_FILE_ATTR_INVALID = 100008; +static const int ACL_ERROR_INVALID_DUMP_CONFIG = 100009; +static const int ACL_ERROR_INVALID_PROFILING_CONFIG = 100010; +static const int ACL_ERROR_INVALID_MODEL_ID = 100011; +static const int ACL_ERROR_DESERIALIZE_MODEL = 100012; +static const int ACL_ERROR_PARSE_MODEL = 100013; +static const int ACL_ERROR_READ_MODEL_FAILURE = 100014; +static const int ACL_ERROR_MODEL_SIZE_INVALID = 100015; +static const int ACL_ERROR_MODEL_MISSING_ATTR = 100016; +static const int ACL_ERROR_MODEL_INPUT_NOT_MATCH = 100017; +static const int ACL_ERROR_MODEL_OUTPUT_NOT_MATCH = 100018; +static const int ACL_ERROR_MODEL_NOT_DYNAMIC = 100019; +static const int ACL_ERROR_OP_TYPE_NOT_MATCH = 100020; +static const int ACL_ERROR_OP_INPUT_NOT_MATCH = 100021; +static const int ACL_ERROR_OP_OUTPUT_NOT_MATCH = 100022; +static const int ACL_ERROR_OP_ATTR_NOT_MATCH = 100023; +static const int ACL_ERROR_OP_NOT_FOUND = 100024; +static const int ACL_ERROR_OP_LOAD_FAILED = 100025; +static const int ACL_ERROR_UNSUPPORTED_DATA_TYPE = 100026; +static const int ACL_ERROR_FORMAT_NOT_MATCH = 100027; +static const int ACL_ERROR_BIN_SELECTOR_NOT_REGISTERED = 100028; +static const int ACL_ERROR_KERNEL_NOT_FOUND = 100029; +static const int ACL_ERROR_BIN_SELECTOR_ALREADY_REGISTERED = 100030; +static const int ACL_ERROR_KERNEL_ALREADY_REGISTERED = 100031; +static const int ACL_ERROR_INVALID_QUEUE_ID = 100032; +static const int ACL_ERROR_REPEAT_SUBSCRIBE = 100033; +static const int ACL_ERROR_STREAM_NOT_SUBSCRIBE = 100034; +static const int ACL_ERROR_THREAD_NOT_SUBSCRIBE = 100035; +static const int ACL_ERROR_WAIT_CALLBACK_TIMEOUT = 100036; +static const int ACL_ERROR_REPEAT_FINALIZE = 100037; +static const int ACL_ERROR_NOT_STATIC_AIPP = 100038; +static const int ACL_ERROR_COMPILING_STUB_MODE = 100039; +static const int ACL_ERROR_GROUP_NOT_SET = 100040; +static const int ACL_ERROR_GROUP_NOT_CREATE = 100041; +static const int ACL_ERROR_PROF_ALREADY_RUN = 100042; +static const int ACL_ERROR_PROF_NOT_RUN = 100043; +static const int ACL_ERROR_DUMP_ALREADY_RUN = 100044; +static const int ACL_ERROR_DUMP_NOT_RUN = 100045; +static const int ACL_ERROR_PROF_REPEAT_SUBSCRIBE = 148046; +static const int ACL_ERROR_PROF_API_CONFLICT = 148047; +static const int ACL_ERROR_INVALID_MAX_OPQUEUE_NUM_CONFIG = 148048; +static const int ACL_ERROR_INVALID_OPP_PATH = 148049; +static const int ACL_ERROR_OP_UNSUPPORTED_DYNAMIC = 148050; + +static const int ACL_ERROR_BAD_ALLOC = 200000; +static const int ACL_ERROR_API_NOT_SUPPORT = 200001; +static const int ACL_ERROR_INVALID_DEVICE = 200002; +static const int ACL_ERROR_MEMORY_ADDRESS_UNALIGNED = 200003; +static const int ACL_ERROR_RESOURCE_NOT_MATCH = 200004; +static const int ACL_ERROR_INVALID_RESOURCE_HANDLE = 200005; +static const int ACL_ERROR_FEATURE_UNSUPPORTED = 200006; +static const int ACL_ERROR_PROF_MODULES_UNSUPPORTED = 200007; + +static const int ACL_ERROR_STORAGE_OVER_LIMIT = 300000; + +static const int ACL_ERROR_INTERNAL_ERROR = 500000; +static const int ACL_ERROR_FAILURE = 500001; +static const int ACL_ERROR_GE_FAILURE = 500002; +static const int ACL_ERROR_RT_FAILURE = 500003; +static const int ACL_ERROR_DRV_FAILURE = 500004; +static const int ACL_ERROR_PROFILING_FAILURE = 500005; + +#define ACL_TENSOR_SHAPE_RANGE_NUM 2 +#define ACL_UNKNOWN_RANK 0xFFFFFFFFFFFFFFFE + +typedef enum { + ACL_DT_UNDEFINED = -1, + ACL_FLOAT = 0, + ACL_FLOAT16 = 1, + ACL_INT8 = 2, + ACL_INT32 = 3, + ACL_UINT8 = 4, + ACL_INT16 = 6, + ACL_UINT16 = 7, + ACL_UINT32 = 8, + ACL_INT64 = 9, + ACL_UINT64 = 10, + ACL_DOUBLE = 11, + ACL_BOOL = 12, + ACL_STRING = 13, +} aclDataType; + +typedef enum { + ACL_FORMAT_UNDEFINED = -1, + ACL_FORMAT_NCHW = 0, + ACL_FORMAT_NHWC = 1, + ACL_FORMAT_ND = 2, + ACL_FORMAT_NC1HWC0 = 3, + ACL_FORMAT_FRACTAL_Z = 4, + ACL_FORMAT_NC1HWC0_C04 = 12, + ACL_FORMAT_NDHWC = 27, + ACL_FORMAT_FRACTAL_NZ = 29, + ACL_FORMAT_NCDHW = 30, + ACL_FORMAT_NDC1HWC0 = 32, + ACL_FRACTAL_Z_3D = 33 +} aclFormat; + +typedef enum { + ACL_DEBUG = 0, + ACL_INFO = 1, + ACL_WARNING = 2, + ACL_ERROR = 3, +} aclLogLevel; + +typedef enum { + ACL_MEMTYPE_DEVICE = 0, + ACL_MEMTYPE_HOST = 1, +} aclMemType; + +/** + * @ingroup AscendCL + * @brief Converts data of type aclFloat16 to data of type float + * + * @param value [IN] Data to be converted + * + * @retval Transformed data + */ +ACL_FUNC_VISIBILITY float aclFloat16ToFloat(aclFloat16 value); + +/** + * @ingroup AscendCL + * @brief Converts data of type float to data of type aclFloat16 + * + * @param value [IN] Data to be converted + * + * @retval Transformed data + */ +ACL_FUNC_VISIBILITY aclFloat16 aclFloatToFloat16(float value); + +/** + * @ingroup AscendCL + * @brief create data of aclDataBuffer + * + * @param data [IN] pointer to data + * @li Need to be managed by the user, + * call aclrtMalloc interface to apply for memory, + * call aclrtFree interface to release memory + * + * @param size [IN] size of data in bytes + * + * @retval pointer to created instance. nullptr if run out of memory + * + * @see aclrtMalloc | aclrtFree + */ +ACL_FUNC_VISIBILITY aclDataBuffer *aclCreateDataBuffer(void *data, size_t size); + +/** + * @ingroup AscendCL + * @brief destroy data of aclDataBuffer + * + * @par Function + * Only the aclDataBuffer type data is destroyed here. + * The memory of the data passed in when the aclDataDataBuffer interface + * is called to create aclDataBuffer type data must be released by the user + * + * @param dataBuffer [IN] pointer to the aclDataBuffer + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclCreateDataBuffer + */ +ACL_FUNC_VISIBILITY aclError aclDestroyDataBuffer(const aclDataBuffer *dataBuffer); + +/** + * @ingroup AscendCL + * @brief update new data of aclDataBuffer + * + * @param dataBuffer [OUT] pointer to aclDataBuffer + * @li The old data need to be released by the user, otherwise it may occur memory leak leakage + * call aclGetDataBufferAddr interface to get old data address + * call aclrtFree interface to release memory + * + * @param data [IN] pointer to new data + * @li Need to be managed by the user, + * call aclrtMalloc interface to apply for memory, + * call aclrtFree interface to release memory + * + * @param size [IN] size of data in bytes + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtMalloc | aclrtFree | aclGetDataBufferAddr + */ +ACL_FUNC_VISIBILITY aclError aclUpdateDataBuffer(aclDataBuffer *dataBuffer, void *data, size_t size); + +/** + * @ingroup AscendCL + * @brief get data address from aclDataBuffer + * + * @param dataBuffer [IN] pointer to the data of aclDataBuffer + * + * @retval data address + */ +ACL_FUNC_VISIBILITY void *aclGetDataBufferAddr(const aclDataBuffer *dataBuffer); + +/** + * @ingroup AscendCL + * @brief get data size of aclDataBuffer + * + * @param dataBuffer [IN] pointer to the data of aclDataBuffer + * + * @retval data size + */ +ACL_DEPRECATED_MESSAGE("aclGetDataBufferSize is deprecated, use aclGetDataBufferSizeV2 instead") +ACL_FUNC_VISIBILITY uint32_t aclGetDataBufferSize(const aclDataBuffer *dataBuffer); + +/** + * @ingroup AscendCL + * @brief get data size of aclDataBuffer to replace aclGetDataBufferSize + * + * @param dataBuffer [IN] pointer to the data of aclDataBuffer + * + * @retval data size + */ +ACL_FUNC_VISIBILITY size_t aclGetDataBufferSizeV2(const aclDataBuffer *dataBuffer); + +/** + * @ingroup AscendCL + * @brief get size of aclDataType + * + * @param dataType [IN] aclDataType data the size to get + * + * @retval size of the aclDataType + */ +ACL_FUNC_VISIBILITY size_t aclDataTypeSize(aclDataType dataType); + +// interfaces of tensor desc +/** + * @ingroup AscendCL + * @brief create data aclTensorDesc + * + * @param dataType [IN] Data types described by tensor + * @param numDims [IN] the number of dimensions of the shape + * @param dims [IN] the size of the specified dimension + * @param format [IN] tensor format + * + * @retval aclTensorDesc pointer. + * @retval nullptr if param is invalid or run out of memory + */ +ACL_FUNC_VISIBILITY aclTensorDesc *aclCreateTensorDesc(aclDataType dataType, int numDims, const int64_t *dims, + aclFormat format); + +/** + * @ingroup AscendCL + * @brief destroy data aclTensorDesc + * + * @param desc [IN] pointer to the data of aclTensorDesc to destroy + */ +ACL_FUNC_VISIBILITY void aclDestroyTensorDesc(const aclTensorDesc *desc); + +/** + * @ingroup AscendCL + * @brief set tensor shape range for aclTensorDesc + * + * @param desc [OUT] pointer to the data of aclTensorDesc + * @param dimsCount [IN] the number of dimensions of the shape + * @param dimsRange [IN] the range of dimensions of the shape + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclSetTensorShapeRange(aclTensorDesc *desc, size_t dimsCount, + int64_t dimsRange[][ACL_TENSOR_SHAPE_RANGE_NUM]); + +/** + * @ingroup AscendCL + * @brief get data type specified by the tensor description + * + * @param desc [IN] pointer to the instance of aclTensorDesc + * + * @retval data type specified by the tensor description. + * @retval ACL_DT_UNDEFINED if description is null + */ +ACL_FUNC_VISIBILITY aclDataType aclGetTensorDescType(const aclTensorDesc *desc); + +/** + * @ingroup AscendCL + * @brief get data format specified by the tensor description + * + * @param desc [IN] pointer to the instance of aclTensorDesc + * + * @retval data format specified by the tensor description. + * @retval ACL_FORMAT_UNDEFINED if description is null + */ +ACL_FUNC_VISIBILITY aclFormat aclGetTensorDescFormat(const aclTensorDesc *desc); + +/** + * @ingroup AscendCL + * @brief get tensor size specified by the tensor description + * + * @param desc [IN] pointer to the instance of aclTensorDesc + * + * @retval data size specified by the tensor description. + * @retval 0 if description is null + */ +ACL_FUNC_VISIBILITY size_t aclGetTensorDescSize(const aclTensorDesc *desc); + +/** + * @ingroup AscendCL + * @brief get element count specified by the tensor description + * + * @param desc [IN] pointer to the instance of aclTensorDesc + * + * @retval element count specified by the tensor description. + * @retval 0 if description is null + */ +ACL_FUNC_VISIBILITY size_t aclGetTensorDescElementCount(const aclTensorDesc *desc); + +/** + * @ingroup AscendCL + * @brief get number of dims specified by the tensor description + * + * @param desc [IN] pointer to the instance of aclTensorDesc + * + * @retval number of dims specified by the tensor description. + * @retval 0 if description is null + * @retval ACL_UNKNOWN_RANK if the tensor dim is -2 + */ +ACL_FUNC_VISIBILITY size_t aclGetTensorDescNumDims(const aclTensorDesc *desc); + +/** + * @ingroup AscendCL + * @brief Get the size of the specified dim in the tensor description + * + * @param desc [IN] pointer to the instance of aclTensorDesc + * @param index [IN] index of dims, start from 0. + * + * @retval dim specified by the tensor description and index. + * @retval -1 if description or index is invalid + */ +ACL_DEPRECATED_MESSAGE("aclGetTensorDescDim is deprecated, use aclGetTensorDescDimV2 instead") +ACL_FUNC_VISIBILITY int64_t aclGetTensorDescDim(const aclTensorDesc *desc, size_t index); + +/** + * @ingroup AscendCL + * @brief Get the size of the specified dim in the tensor description + * + * @param desc [IN] pointer to the instance of aclTensorDesc + * @param index [IN] index of dims, start from 0. + * @param dimSize [OUT] size of the specified dim. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclGetTensorDescDimV2(const aclTensorDesc *desc, size_t index, int64_t *dimSize); + +/** + * @ingroup AscendCL + * @brief Get the range of the specified dim in the tensor description + * + * @param desc [IN] pointer to the instance of aclTensorDesc + * @param index [IN] index of dims, start from 0. + * @param dimRangeNum [IN] number of dimRange. + * @param dimRange [OUT] range of the specified dim. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclGetTensorDescDimRange(const aclTensorDesc *desc, size_t index, size_t dimRangeNum, + int64_t *dimRange); + +/** + * @ingroup AscendCL + * @brief set tensor description name + * + * @param desc [OUT] pointer to the instance of aclTensorDesc + * @param name [IN] tensor description name + */ +ACL_FUNC_VISIBILITY void aclSetTensorDescName(aclTensorDesc *desc, const char *name); + +/** + * @ingroup AscendCL + * @brief get tensor description name + * + * @param desc [IN] pointer to the instance of aclTensorDesc + * + * @retval tensor description name. + * @retval empty string if description is null + */ +ACL_FUNC_VISIBILITY const char *aclGetTensorDescName(aclTensorDesc *desc); + +/** + * @ingroup AscendCL + * @brief Convert the format in the source aclTensorDesc according to + * the specified dstFormat to generate a new target aclTensorDesc. + * The format in the source aclTensorDesc remains unchanged. + * + * @param srcDesc [IN] pointer to the source tensor desc + * @param dstFormat [IN] destination format + * @param dstDesc [OUT] pointer to the pointer to the destination tensor desc + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclTransTensorDescFormat(const aclTensorDesc *srcDesc, aclFormat dstFormat, + aclTensorDesc **dstDesc); + +/** + * @ingroup AscendCL + * @brief Set the storage format specified by the tensor description + * + * @param desc [OUT] pointer to the instance of aclTensorDesc + * @param format [IN] the storage format + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_DEPRECATED_MESSAGE("aclSetTensorStorageFormat is deprecated, use aclSetTensorFormat instead") +ACL_FUNC_VISIBILITY aclError aclSetTensorStorageFormat(aclTensorDesc *desc, aclFormat format); + +/** + * @ingroup AscendCL + * @brief Set the storage shape specified by the tensor description + * + * @param desc [OUT] pointer to the instance of aclTensorDesc + * @param numDims [IN] the number of dimensions of the shape + * @param dims [IN] the size of the specified dimension + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_DEPRECATED_MESSAGE("aclSetTensorStorageShape is deprecated, use aclSetTensorShape instead") +ACL_FUNC_VISIBILITY aclError aclSetTensorStorageShape(aclTensorDesc *desc, int numDims, const int64_t *dims); + +/** + * @ingroup AscendCL + * @brief Set the format specified by the tensor description + * + * @param desc [OUT] pointer to the instance of aclTensorDesc + * @param format [IN] the storage format + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclSetTensorFormat(aclTensorDesc *desc, aclFormat format); + +/** + * @ingroup AscendCL + * @brief Set the shape specified by the tensor description + * + * @param desc [OUT] pointer to the instance of aclTensorDesc + * @param numDims [IN] the number of dimensions of the shape + * @param dims [IN] the size of the specified dimension + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclSetTensorShape(aclTensorDesc *desc, int numDims, const int64_t *dims); + +/** + * @ingroup AscendCL + * @brief Set the original format specified by the tensor description + * + * @param desc [OUT] pointer to the instance of aclTensorDesc + * @param format [IN] the storage format + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclSetTensorOriginFormat(aclTensorDesc *desc, aclFormat format); + +/** + * @ingroup AscendCL + * @brief Set the original shape specified by the tensor description + * + * @param desc [OUT] pointer to the instance of aclTensorDesc + * @param numDims [IN] the number of dimensions of the shape + * @param dims [IN] the size of the specified dimension + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclSetTensorOriginShape(aclTensorDesc *desc, int numDims, const int64_t *dims); + +/** + * @ingroup AscendCL + * @brief get op description info + * + * @param desc [IN] pointer to tensor description + * @param index [IN] index of tensor + * + * @retval null for failed. + * @retval OtherValues success. + */ +ACL_FUNC_VISIBILITY aclTensorDesc *aclGetTensorDescByIndex(aclTensorDesc *desc, size_t index); + +/** + * @ingroup AscendCL + * @brief get address of tensor + * + * @param desc [IN] pointer to tensor description + * + * @retval null for failed + * @retval OtherValues success + */ +ACL_FUNC_VISIBILITY void *aclGetTensorDescAddress(const aclTensorDesc *desc); + +/** + * @ingroup AscendCL + * @brief Set the dynamic input name specified by the tensor description + * + * @param desc [OUT] pointer to the instance of aclTensorDesc + * @param dynamicInputName [IN] pointer to the dynamic input name + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclSetTensorDynamicInput(aclTensorDesc *desc, const char *dynamicInputName); + +/** + * @ingroup AscendCL + * @brief Set const data specified by the tensor description + * + * @param desc [OUT] pointer to the instance of aclTensorDesc + * @param dataBuffer [IN] pointer to the const databuffer + * @param length [IN] the length of const databuffer + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclSetTensorConst(aclTensorDesc *desc, void *dataBuffer, size_t length); + +/** + * @ingroup AscendCL + * @brief Set tensor memory type specified by the tensor description + * + * @param desc [OUT] pointer to the instance of aclTensorDesc + * @param memType [IN] ACL_MEMTYPE_DEVICE means device, ACL_MEMTYPE_HOST means host + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclSetTensorPlaceMent(aclTensorDesc *desc, aclMemType memType); + +/** + * @ingroup AscendCL + * @brief an interface for users to output APP logs + * + * @param logLevel [IN] the level of current log + * @param func [IN] the function where the log is located + * @param file [IN] the file where the log is located + * @param line [IN] Number of source lines where the log is located + * @param fmt [IN] the format of current log + * @param ... [IN] the value of current log + */ +ACL_FUNC_VISIBILITY void aclAppLog(aclLogLevel logLevel, const char *func, const char *file, uint32_t line, + const char *fmt, ...); + +/** + * @ingroup AscendCL + * @brief get soc name + * + * @retval null for failed + * @retval OtherValues success + */ +ACL_FUNC_VISIBILITY const char *aclrtGetSocName(); + +#define ACL_APP_LOG(level, fmt, ...) aclAppLog(level, __FUNCTION__, __FILE__, __LINE__, fmt, ##__VA_ARGS__) + +#ifdef __cplusplus +} +#endif + +#endif // INC_EXTERNAL_ACL_ACL_BASE_H_ diff --git a/inc/external/acl/acl_mdl.h b/inc/external/acl/acl_mdl.h new file mode 100644 index 00000000..2bf85e29 --- /dev/null +++ b/inc/external/acl/acl_mdl.h @@ -0,0 +1,1225 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef INC_EXTERNAL_ACL_ACL_MODEL_H_ +#define INC_EXTERNAL_ACL_ACL_MODEL_H_ + +#include +#include + +#include "acl_base.h" +#include "acl_rt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define ACL_MAX_DIM_CNT 128 +#define ACL_MAX_TENSOR_NAME_LEN 128 +#define ACL_MAX_BATCH_NUM 128 +#define ACL_MAX_HW_NUM 128 +#define ACL_MAX_SHAPE_COUNT 128 +#define ACL_INVALID_NODE_INDEX 0xFFFFFFFF + +#define ACL_MDL_LOAD_FROM_FILE 1 +#define ACL_MDL_LOAD_FROM_FILE_WITH_MEM 2 +#define ACL_MDL_LOAD_FROM_MEM 3 +#define ACL_MDL_LOAD_FROM_MEM_WITH_MEM 4 +#define ACL_MDL_LOAD_FROM_FILE_WITH_Q 5 +#define ACL_MDL_LOAD_FROM_MEM_WITH_Q 6 + +#define ACL_DYNAMIC_TENSOR_NAME "ascend_mbatch_shape_data" +#define ACL_DYNAMIC_AIPP_NAME "ascend_dynamic_aipp_data" +#define ACL_ATTR_NAME_DATA_DUMP_ORIGIN_OP_NAMES "_datadump_original_op_names" + +typedef struct aclmdlDataset aclmdlDataset; +typedef struct aclmdlDesc aclmdlDesc; +typedef struct aclmdlAIPP aclmdlAIPP; +typedef struct aclAippExtendInfo aclAippExtendInfo; +typedef struct aclmdlConfigHandle aclmdlConfigHandle; + +typedef enum { + ACL_YUV420SP_U8 = 1, + ACL_XRGB8888_U8, + ACL_RGB888_U8, + ACL_YUV400_U8, + ACL_NC1HWC0DI_FP16, + ACL_NC1HWC0DI_S8, + ACL_ARGB8888_U8, + ACL_YUYV_U8, + ACL_YUV422SP_U8, + ACL_AYUV444_U8, + ACL_RAW10, + ACL_RAW12, + ACL_RAW16, + ACL_RAW24, + ACL_AIPP_RESERVED = 0xffff, +} aclAippInputFormat; + +typedef enum { + ACL_MDL_PRIORITY_INT32 = 0, + ACL_MDL_LOAD_TYPE_SIZET, + ACL_MDL_PATH_PTR, /**< pointer to model load path with deep copy */ + ACL_MDL_MEM_ADDR_PTR, /**< pointer to model memory with shallow copy */ + ACL_MDL_MEM_SIZET, + ACL_MDL_WEIGHT_ADDR_PTR, /**< pointer to weight memory of model with shallow copy */ + ACL_MDL_WEIGHT_SIZET, + ACL_MDL_WORKSPACE_ADDR_PTR, /**< pointer to worksapce memory of model with shallow copy */ + ACL_MDL_WORKSPACE_SIZET, + ACL_MDL_INPUTQ_NUM_SIZET, + ACL_MDL_INPUTQ_ADDR_PTR, /**< pointer to inputQ with shallow copy */ + ACL_MDL_OUTPUTQ_NUM_SIZET, + ACL_MDL_OUTPUTQ_ADDR_PTR /**< pointer to outputQ with shallow copy */ +} aclmdlConfigAttr; + +typedef enum { + ACL_DATA_WITHOUT_AIPP = 0, + ACL_DATA_WITH_STATIC_AIPP, + ACL_DATA_WITH_DYNAMIC_AIPP, + ACL_DYNAMIC_AIPP_NODE +} aclmdlInputAippType; + +typedef struct aclmdlIODims { + char name[ACL_MAX_TENSOR_NAME_LEN]; /**< tensor name */ + size_t dimCount; /**< dim array count */ + int64_t dims[ACL_MAX_DIM_CNT]; /**< dim data array */ +} aclmdlIODims; + +typedef struct aclAippDims { + aclmdlIODims srcDims; /**< input dims before model transform */ + size_t srcSize; /**< input size before model transform */ + aclmdlIODims aippOutdims; /**< aipp output dims */ + size_t aippOutSize; /**< aipp output size */ +} aclAippDims; + +typedef struct aclmdlBatch { + size_t batchCount; /**< batch array count */ + uint64_t batch[ACL_MAX_BATCH_NUM]; /**< batch data array */ +} aclmdlBatch; + +typedef struct aclmdlHW { + size_t hwCount; /**< height&width array count */ + uint64_t hw[ACL_MAX_HW_NUM][2]; /**< height&width data array */ +} aclmdlHW; + +typedef struct aclAippInfo { + aclAippInputFormat inputFormat; + int32_t srcImageSizeW; + int32_t srcImageSizeH; + int8_t cropSwitch; + int32_t loadStartPosW; + int32_t loadStartPosH; + int32_t cropSizeW; + int32_t cropSizeH; + int8_t resizeSwitch; + int32_t resizeOutputW; + int32_t resizeOutputH; + int8_t paddingSwitch; + int32_t leftPaddingSize; + int32_t rightPaddingSize; + int32_t topPaddingSize; + int32_t bottomPaddingSize; + int8_t cscSwitch; + int8_t rbuvSwapSwitch; + int8_t axSwapSwitch; + int8_t singleLineMode; + int32_t matrixR0C0; + int32_t matrixR0C1; + int32_t matrixR0C2; + int32_t matrixR1C0; + int32_t matrixR1C1; + int32_t matrixR1C2; + int32_t matrixR2C0; + int32_t matrixR2C1; + int32_t matrixR2C2; + int32_t outputBias0; + int32_t outputBias1; + int32_t outputBias2; + int32_t inputBias0; + int32_t inputBias1; + int32_t inputBias2; + int32_t meanChn0; + int32_t meanChn1; + int32_t meanChn2; + int32_t meanChn3; + float minChn0; + float minChn1; + float minChn2; + float minChn3; + float varReciChn0; + float varReciChn1; + float varReciChn2; + float varReciChn3; + aclFormat srcFormat; + aclDataType srcDatatype; + size_t srcDimNum; + size_t shapeCount; + aclAippDims outDims[ACL_MAX_SHAPE_COUNT]; + aclAippExtendInfo *aippExtend; /**< reserved parameters, current version needs to be null */ +} aclAippInfo; + +/** + * @ingroup AscendCL + * @brief Create data of type aclmdlDesc + * + * @retval the aclmdlDesc pointer + */ +ACL_FUNC_VISIBILITY aclmdlDesc *aclmdlCreateDesc(); + +/** + * @ingroup AscendCL + * @brief destroy data of type aclmdlDesc + * + * @param modelDesc [IN] Pointer to almdldlDesc to be destroyed + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlDestroyDesc(aclmdlDesc *modelDesc); + +/** + * @ingroup AscendCL + * @brief Get aclmdlDesc data of the model according to the model ID + * + * @param modelDesc [OUT] aclmdlDesc pointer + * @param modelId [IN] model id + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlGetDesc(aclmdlDesc *modelDesc, uint32_t modelId); + +/** + * @ingroup AscendCL + * @brief Get the number of the inputs of + * the model according to data of aclmdlDesc + * + * @param modelDesc [IN] aclmdlDesc pointer + * + * @retval input size with aclmdlDesc + */ +ACL_FUNC_VISIBILITY size_t aclmdlGetNumInputs(aclmdlDesc *modelDesc); + +/** + * @ingroup AscendCL + * @brief Get the number of the output of + * the model according to data of aclmdlDesc + * + * @param modelDesc [IN] aclmdlDesc pointer + * + * @retval output size with aclmdlDesc + */ +ACL_FUNC_VISIBILITY size_t aclmdlGetNumOutputs(aclmdlDesc *modelDesc); + +/** + * @ingroup AscendCL + * @brief Get the size of the specified input according to + * the data of type aclmdlDesc + * + * @param modelDesc [IN] aclmdlDesc pointer + * @param index [IN] the size of the number of inputs to be obtained, + * the index value starts from 0 + * + * @retval Specify the size of the input + */ +ACL_FUNC_VISIBILITY size_t aclmdlGetInputSizeByIndex(aclmdlDesc *modelDesc, size_t index); + +/** + * @ingroup AscendCL + * @brief Get the size of the specified output according to + * the data of type aclmdlDesc + * + * @param modelDesc [IN] aclmdlDesc pointer + * @param index [IN] the size of the number of outputs to be obtained, + * the index value starts from 0 + * + * @retval Specify the size of the output + */ +ACL_FUNC_VISIBILITY size_t aclmdlGetOutputSizeByIndex(aclmdlDesc *modelDesc, size_t index); + +/** + * @ingroup AscendCL + * @brief Create data of type aclmdlDataset + * + * @retval the aclmdlDataset pointer + */ +ACL_FUNC_VISIBILITY aclmdlDataset *aclmdlCreateDataset(); + +/** + * @ingroup AscendCL + * @brief destroy data of type aclmdlDataset + * + * @param dataset [IN] Pointer to aclmdlDataset to be destroyed + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlDestroyDataset(const aclmdlDataset *dataset); + +/** + * @ingroup AscendCL + * @brief Add aclDataBuffer to aclmdlDataset + * + * @param dataset [OUT] aclmdlDataset address of aclDataBuffer to be added + * @param dataBuffer [IN] aclDataBuffer address to be added + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlAddDatasetBuffer(aclmdlDataset *dataset, aclDataBuffer *dataBuffer); + +/** + * @ingroup AscendCL + * @brief Set aclTensorDesc to aclmdlDataset + * + * @param dataset [OUT] aclmdlDataset address of aclDataBuffer to be added + * @param tensorDesc [IN] aclTensorDesc address to be added + * @param index [IN] index of tensorDesc which to be added + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetDatasetTensorDesc(aclmdlDataset *dataset, aclTensorDesc *tensorDesc, + size_t index); + +/** + * @ingroup AscendCL + * @brief Get the number of aclDataBuffer in aclmdlDataset + * + * @param dataset [IN] aclmdlDataset poiter + * + * @retval the number of aclDataBuffer + */ +ACL_FUNC_VISIBILITY size_t aclmdlGetDatasetNumBuffers(const aclmdlDataset *dataset); + +/** + * @ingroup AscendCL + * @brief Get the aclDataBuffer in aclmdlDataset by index + * + * @param dataset [IN] aclmdlDataset poiter + * @param index [IN] the index of aclDataBuffer + * + * @retval Get successfully, return the address of aclDataBuffer + * @retval Failure return NULL + */ +ACL_FUNC_VISIBILITY aclDataBuffer *aclmdlGetDatasetBuffer(const aclmdlDataset *dataset, size_t index); + +/** + * @ingroup AscendCL + * @brief Load offline model data from files + * and manage memory internally by the system + * + * @par Function + * After the system finishes loading the model, + * the model ID returned is used as a mark to identify the model + * during subsequent operations + * + * @param modelPath [IN] Storage path for offline model files + * @param modelId [OUT] Model ID generated after + * the system finishes loading the model + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlLoadFromFile(const char *modelPath, uint32_t *modelId); + +/** + * @ingroup AscendCL + * @brief Load offline model data from memory and manage the memory of + * model running internally by the system + * + * @par Function + * After the system finishes loading the model, + * the model ID returned is used as a mark to identify the model + * during subsequent operations + * + * @param model [IN] Model data stored in memory + * @param modelSize [IN] model data size + * @param modelId [OUT] Model ID generated after + * the system finishes loading the model + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlLoadFromMem(const void *model, size_t modelSize, uint32_t *modelId); + +/** + * @ingroup AscendCL + * @brief Load offline model data from a file, + * and the user manages the memory of the model run by itself + * + * @par Function + * After the system finishes loading the model, + * the model ID returned is used as a mark to identify the model + * during subsequent operations. + * @param modelPath [IN] Storage path for offline model files + * @param modelId [OUT] Model ID generated after finishes loading the model + * @param workPtr [IN] A pointer to the working memory + * required by the model on the Device,can be null + * @param workSize [IN] The amount of working memory required by the model + * @param weightPtr [IN] Pointer to model weight memory on Device + * @param weightSize [IN] The amount of weight memory required by the model + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlLoadFromFileWithMem(const char *modelPath, uint32_t *modelId, void *workPtr, + size_t workSize, void *weightPtr, size_t weightSize); + +/** + * @ingroup AscendCL + * @brief Load offline model data from memory, + * and the user can manage the memory of model running + * + * @par Function + * After the system finishes loading the model, + * the model ID returned is used as a mark to identify the model + * during subsequent operations + * @param model [IN] Model data stored in memory + * @param modelSize [IN] model data size + * @param modelId [OUT] Model ID generated after finishes loading the model + * @param workPtr [IN] A pointer to the working memory + * required by the model on the Device,can be null + * @param workSize [IN] work memory size + * @param weightPtr [IN] Pointer to model weight memory on Device,can be null + * @param weightSize [IN] The amount of weight memory required by the model + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlLoadFromMemWithMem(const void *model, size_t modelSize, uint32_t *modelId, + void *workPtr, size_t workSize, void *weightPtr, + size_t weightSize); + +/** + * @ingroup AscendCL + * @brief load model from file with async queue + * + * @param modelPath [IN] model path + * @param modelId [OUT] return model id if load success + * @param inputQ [IN] input queue pointer + * @param inputQNum [IN] input queue num + * @param outputQ [IN] output queue pointer + * @param outputQNum [IN] output queue num + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlLoadFromFileWithQ(const char *modelPath, uint32_t *modelId, const uint32_t *inputQ, + size_t inputQNum, const uint32_t *outputQ, size_t outputQNum); + +/** + * @ingroup AscendCL + * @brief load model from memory with async queue + * + * @param model [IN] model memory which user manages + * @param modelSize [IN] model size + * @param modelId [OUT] return model id if load success + * @param inputQ [IN] input queue pointer + * @param inputQNum [IN] input queue num + * @param outputQ [IN] output queue pointer + * @param outputQNum [IN] output queue num + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlLoadFromMemWithQ(const void *model, size_t modelSize, uint32_t *modelId, + const uint32_t *inputQ, size_t inputQNum, const uint32_t *outputQ, + size_t outputQNum); + +/** + * @ingroup AscendCL + * @brief Execute model synchronous inference until the inference result is returned + * + * @param modelId [IN] ID of the model to perform inference + * @param input [IN] Input data for model inference + * @param output [OUT] Output data for model inference + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlExecute(uint32_t modelId, const aclmdlDataset *input, aclmdlDataset *output); + +/** + * @ingroup AscendCL + * @brief Execute model asynchronous inference until the inference result is returned + * + * @param modelId [IN] ID of the model to perform inference + * @param input [IN] Input data for model inference + * @param output [OUT] Output data for model inference + * @param stream [IN] stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlLoadFromFile | aclmdlLoadFromMem | aclmdlLoadFromFileWithMem | + * aclmdlLoadFromMemWithMem + */ +ACL_FUNC_VISIBILITY aclError aclmdlExecuteAsync(uint32_t modelId, const aclmdlDataset *input, aclmdlDataset *output, + aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief unload model with model id + * + * @param modelId [IN] model id to be unloaded + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlUnload(uint32_t modelId); + +/** + * @ingroup AscendCL + * @brief Get the weight memory size and working memory size + * required for model execution according to the model file + * + * @param fileName [IN] Model path to get memory information + * @param workSize [OUT] The amount of working memory for model executed + * @param weightSize [OUT] The amount of weight memory for model executed + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlQuerySize(const char *fileName, size_t *workSize, size_t *weightSize); + +/** + * @ingroup AscendCL + * @brief Obtain the weights required for + * model execution according to the model data in memory + * + * @par Restriction + * The execution and weight memory is Device memory, + * and requires user application and release. + * @param model [IN] model memory which user manages + * @param modelSize [IN] model data size + * @param workSize [OUT] The amount of working memory for model executed + * @param weightSize [OUT] The amount of weight memory for model executed + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlQuerySizeFromMem(const void *model, size_t modelSize, size_t *workSize, + size_t *weightSize); + +/** + * @ingroup AscendCL + * @brief In dynamic batch scenarios, + * it is used to set the number of images processed + * at one time during model inference + * + * @param modelId [IN] model id + * @param dataset [IN|OUT] data for model inference + * @param index [IN] index of dynamic tensor + * @param batchSize [IN] Number of images processed at a time during model + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlLoadFromFile | aclmdlLoadFromMem | aclmdlLoadFromFileWithMem | + * aclmdlLoadFromMemWithMem | aclmdlGetInputIndexByName + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetDynamicBatchSize(uint32_t modelId, aclmdlDataset *dataset, size_t index, + uint64_t batchSize); + +/** + * @ingroup AscendCL + * @brief Sets the H and W of the specified input of the model + * + * @param modelId [IN] model id + * @param dataset [IN|OUT] data for model inference + * @param index [IN] index of dynamic tensor + * @param height [IN] model height + * @param width [IN] model width + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlLoadFromFile | aclmdlLoadFromMem | aclmdlLoadFromFileWithMem | + * aclmdlLoadFromMemWithMem | aclmdlGetInputIndexByName + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetDynamicHWSize(uint32_t modelId, aclmdlDataset *dataset, size_t index, + uint64_t height, uint64_t width); + +/** + * @ingroup AscendCL + * @brief Sets the dynamic dims of the specified input of the model + * + * @param modelId [IN] model id + * @param dataset [IN|OUT] data for model inference + * @param index [IN] index of dynamic dims + * @param dims [IN] value of dynamic dims + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlLoadFromFile | aclmdlLoadFromMem | aclmdlLoadFromFileWithMem | + * aclmdlLoadFromMemWithMem | aclmdlGetInputIndexByName + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetInputDynamicDims(uint32_t modelId, aclmdlDataset *dataset, size_t index, + const aclmdlIODims *dims); + +/** + * @ingroup AscendCL + * @brief get input dims info + * + * @param modelDesc [IN] model description + * @param index [IN] input tensor index + * @param dims [OUT] dims info + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlGetInputDimsV2 + */ +ACL_FUNC_VISIBILITY aclError aclmdlGetInputDims(const aclmdlDesc *modelDesc, size_t index, aclmdlIODims *dims); + +/** + * @ingroup AscendCL + * @brief get input dims info(version 2), especially for static aipp + * it is the same with aclmdlGetInputDims while model without static aipp + * + * @param modelDesc [IN] model description + * @param index [IN] input tensor index + * @param dims [OUT] dims info + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlGetInputDims + */ +ACL_FUNC_VISIBILITY aclError aclmdlGetInputDimsV2(const aclmdlDesc *modelDesc, size_t index, aclmdlIODims *dims); + +/** + * @ingroup AscendCL + * @brief get output dims info + * + * @param modelDesc [IN] model description + * @param index [IN] output tensor index + * @param dims [OUT] dims info + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlGetOutputDims(const aclmdlDesc *modelDesc, size_t index, aclmdlIODims *dims); + +/** + * @ingroup AscendCL + * @brief get current output dims info + * + * @par Function + * The following use cases are supported: + * @li Get current output shape when model is dynamic and + * dynamic shape info is set + * @li Get max output shape when model is dynamic and + * dynamic shape info is not set + * @li Get actual output shape when model is static + * + * @param modelDesc [IN] model description + * @param index [IN] output tensor index + * @param dims [OUT] dims info + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlGetCurOutputDims(const aclmdlDesc *modelDesc, size_t index, aclmdlIODims *dims); + +/** + * @ingroup AscendCL + * @brief get attr value by op name + * + * @param modelDesc [IN] model description + * @param opName [IN] op name + * @param attr [IN] attr name + * + * @retval the attr value + */ +ACL_FUNC_VISIBILITY const char *aclmdlGetOpAttr(aclmdlDesc *modelDesc, const char *opName, const char *attr); + +/** + * @ingroup AscendCL + * @brief get input name by index + * + * @param modelDesc [IN] model description + * @param index [IN] intput tensor index + * + * @retval input tensor name,the same life cycle with modelDesc + */ +ACL_FUNC_VISIBILITY const char *aclmdlGetInputNameByIndex(const aclmdlDesc *modelDesc, size_t index); + +/** + * @ingroup AscendCL + * @brief get output name by index + * + * @param modelDesc [IN] model description + * @param index [IN] output tensor index + * + * @retval output tensor name,the same life cycle with modelDesc + */ +ACL_FUNC_VISIBILITY const char *aclmdlGetOutputNameByIndex(const aclmdlDesc *modelDesc, size_t index); + +/** + * @ingroup AscendCL + * @brief get input format by index + * + * @param modelDesc [IN] model description + * @param index [IN] intput tensor index + * + * @retval input tensor format + */ +ACL_FUNC_VISIBILITY aclFormat aclmdlGetInputFormat(const aclmdlDesc *modelDesc, size_t index); + +/** + * @ingroup AscendCL + * @brief get output format by index + * + * @param modelDesc [IN] model description + * @param index [IN] output tensor index + * + * @retval output tensor format + */ +ACL_FUNC_VISIBILITY aclFormat aclmdlGetOutputFormat(const aclmdlDesc *modelDesc, size_t index); + +/** + * @ingroup AscendCL + * @brief get input data type by index + * + * @param modelDesc [IN] model description + * @param index [IN] intput tensor index + * + * @retval input tensor data type + */ +ACL_FUNC_VISIBILITY aclDataType aclmdlGetInputDataType(const aclmdlDesc *modelDesc, size_t index); + +/** + * @ingroup AscendCL + * @brief get output data type by index + * + * @param modelDesc [IN] model description + * @param index [IN] output tensor index + * + * @retval output tensor data type + */ +ACL_FUNC_VISIBILITY aclDataType aclmdlGetOutputDataType(const aclmdlDesc *modelDesc, size_t index); + +/** + * @ingroup AscendCL + * @brief get input tensor index by name + * + * @param modelDesc [IN] model description + * @param name [IN] intput tensor name + * @param index [OUT] intput tensor index + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlGetInputIndexByName(const aclmdlDesc *modelDesc, const char *name, size_t *index); + +/** + * @ingroup AscendCL + * @brief get output tensor index by name + * + * @param modelDesc [IN] model description + * @param name [IN] output tensor name + * @param index [OUT] output tensor index + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlGetOutputIndexByName(const aclmdlDesc *modelDesc, const char *name, size_t *index); + +/** + * @ingroup AscendCL + * @brief get dynamic batch info + * + * @param modelDesc [IN] model description + * @param batch [OUT] dynamic batch info + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlGetDynamicBatch(const aclmdlDesc *modelDesc, aclmdlBatch *batch); + +/** + * @ingroup AscendCL + * @brief get dynamic height&width info + * + * @param modelDesc [IN] model description + * @param index [IN] input tensor index + * @param hw [OUT] dynamic height&width info + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlGetDynamicHW(const aclmdlDesc *modelDesc, size_t index, aclmdlHW *hw); + +/** + * @ingroup AscendCL + * @brief get dynamic gear count + * + * @param modelDesc [IN] model description + * @param index [IN] unused, must be -1 + * @param gearCount [OUT] dynamic gear count + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlGetInputDynamicGearCount(const aclmdlDesc *modelDesc, size_t index, + size_t *gearCount); + +/** + * @ingroup AscendCL + * @brief get dynamic dims info + * + * @param modelDesc [IN] model description + * @param index [IN] unused, must be -1 + * @param dims [OUT] value of dynamic dims + * @param gearCount [IN] dynamic gear count + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlGetInputDynamicDims(const aclmdlDesc *modelDesc, size_t index, aclmdlIODims *dims, + size_t gearCount); + +/** + * @ingroup AscendCL + * @brief Create data of type aclmdlAIPP + * + * @param batchSize [IN] batchsizes of model + * + * @retval the aclmdlAIPP pointer + */ +ACL_FUNC_VISIBILITY aclmdlAIPP *aclmdlCreateAIPP(uint64_t batchSize); + +/** + * @ingroup AscendCL + * @brief destroy data of type aclmdlAIPP + * + * @param aippParmsSet [IN] Pointer for aclmdlAIPP to be destroyed + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlDestroyAIPP(const aclmdlAIPP *aippParmsSet); + +/** + * @ingroup AscendCL + * @brief set InputFormat of type aclmdlAIPP + * + * @param aippParmsSet [OUT] Pointer for aclmdlAIPP + * @param inputFormat [IN] The inputFormat of aipp + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPInputFormat(aclmdlAIPP *aippParmsSet, aclAippInputFormat inputFormat); + +/** + * @ingroup AscendCL + * @brief set cscParms of type aclmdlAIPP + * + * @param aippParmsSet [OUT] Pointer for aclmdlAIPP + * @param csc_switch [IN] Csc switch + * @param cscMatrixR0C0 [IN] Csc_matrix_r0_c0 + * @param cscMatrixR0C1 [IN] Csc_matrix_r0_c1 + * @param cscMatrixR0C2 [IN] Csc_matrix_r0_c2 + * @param cscMatrixR1C0 [IN] Csc_matrix_r1_c0 + * @param cscMatrixR1C1 [IN] Csc_matrix_r1_c1 + * @param cscMatrixR1C2 [IN] Csc_matrix_r1_c2 + * @param cscMatrixR2C0 [IN] Csc_matrix_r2_c0 + * @param cscMatrixR2C1 [IN] Csc_matrix_r2_c1 + * @param cscMatrixR2C2 [IN] Csc_matrix_r2_c2 + * @param cscOutputBiasR0 [IN] Output Bias for RGB to YUV, element of row 0, unsigned number + * @param cscOutputBiasR1 [IN] Output Bias for RGB to YUV, element of row 1, unsigned number + * @param cscOutputBiasR2 [IN] Output Bias for RGB to YUV, element of row 2, unsigned number + * @param cscInputBiasR0 [IN] Input Bias for YUV to RGB, element of row 0, unsigned number + * @param cscInputBiasR1 [IN] Input Bias for YUV to RGB, element of row 1, unsigned number + * @param cscInputBiasR2 [IN] Input Bias for YUV to RGB, element of row 2, unsigned number + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPCscParams(aclmdlAIPP *aippParmsSet, int8_t csc_switch, int16_t cscMatrixR0C0, + int16_t cscMatrixR0C1, int16_t cscMatrixR0C2, int16_t cscMatrixR1C0, + int16_t cscMatrixR1C1, int16_t cscMatrixR1C2, int16_t cscMatrixR2C0, + int16_t cscMatrixR2C1, int16_t cscMatrixR2C2, + uint8_t cscOutputBiasR0, uint8_t cscOutputBiasR1, + uint8_t cscOutputBiasR2, uint8_t cscInputBiasR0, + uint8_t cscInputBiasR1, uint8_t cscInputBiasR2); + +/** + * @ingroup AscendCL + * @brief set rb/ub swap switch of type aclmdlAIPP + * + * @param aippParmsSet [OUT] Pointer for aclmdlAIPP + * @param rbuvSwapSwitch [IN] rb/ub swap switch + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPRbuvSwapSwitch(aclmdlAIPP *aippParmsSet, int8_t rbuvSwapSwitch); + +/** + * @ingroup AscendCL + * @brief set RGBA->ARGB, YUVA->AYUV swap switch of type aclmdlAIPP + * + * @param aippParmsSet [OUT] Pointer for aclmdlAIPP + * @param axSwapSwitch [IN] RGBA->ARGB, YUVA->AYUV swap switch + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPAxSwapSwitch(aclmdlAIPP *aippParmsSet, int8_t axSwapSwitch); + +/** + * @ingroup AscendCL + * @brief set source image of type aclmdlAIPP + * + * @param aippParmsSet [OUT] Pointer for aclmdlAIPP + * @param srcImageSizeW [IN] Source image width + * @param srcImageSizeH [IN] Source image height + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPSrcImageSize(aclmdlAIPP *aippParmsSet, int32_t srcImageSizeW, + int32_t srcImageSizeH); + +/** + * @ingroup AscendCL + * @brief set resize switch of type aclmdlAIPP + * + * @param aippParmsSet [OUT] Pointer for aclmdlAIPP + * @param scfSwitch [IN] Resize switch + * @param scfInputSizeW [IN] Input width of scf + * @param scfInputSizeH [IN] Input height of scf + * @param scfOutputSizeW [IN] Output width of scf + * @param scfOutputSizeH [IN] Output height of scf + * @param batchIndex [IN] Batch parameter index + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPScfParams(aclmdlAIPP *aippParmsSet, int8_t scfSwitch, int32_t scfInputSizeW, + int32_t scfInputSizeH, int32_t scfOutputSizeW, + int32_t scfOutputSizeH, uint64_t batchIndex); + +/** + * @ingroup AscendCL + * @brief set cropParams of type aclmdlAIPP + * + * @param aippParmsSet [OUT] Pointer for aclmdlAIPP + * @param cropSwitch [IN] Crop switch + * @param cropStartPosW [IN] The start horizontal position of cropping + * @param cropStartPosH [IN] The start vertical position of cropping + * @param cropSizeW [IN] Crop width + * @param cropSizeH [IN] Crop height + * @param batchIndex [IN] Batch parameter index + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPCropParams(aclmdlAIPP *aippParmsSet, int8_t cropSwitch, int32_t cropStartPosW, + int32_t cropStartPosH, int32_t cropSizeW, int32_t cropSizeH, + uint64_t batchIndex); + +/** + * @ingroup AscendCL + * @brief set paddingParams of type aclmdlAIPP + * + * @param aippParmsSet [OUT] Pointer for aclmdlAIPP + * @param paddingSwitch [IN] Padding switch + * @param paddingSizeTop [IN] Top padding size + * @param paddingSizeBottom [IN] Bottom padding size + * @param paddingSizeLeft [IN] Left padding size + * @param paddingSizeRight [IN] Right padding size + * @param batchIndex [IN] Batch parameter index + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPPaddingParams(aclmdlAIPP *aippParmsSet, int8_t paddingSwitch, + int32_t paddingSizeTop, int32_t paddingSizeBottom, + int32_t paddingSizeLeft, int32_t paddingSizeRight, + uint64_t batchIndex); + +/** + * @ingroup AscendCL + * @brief set DtcPixelMean of type aclmdlAIPP + * + * @param aippParmsSet [OUT] Pointer for aclmdlAIPP + * @param dtcPixelMeanChn0 [IN] Mean value of channel 0 + * @param dtcPixelMeanChn1 [IN] Mean value of channel 1 + * @param dtcPixelMeanChn2 [IN] Mean value of channel 2 + * @param dtcPixelMeanChn3 [IN] Mean value of channel 3 + * @param batchIndex [IN] Batch parameter index + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPDtcPixelMean(aclmdlAIPP *aippParmsSet, int16_t dtcPixelMeanChn0, + int16_t dtcPixelMeanChn1, int16_t dtcPixelMeanChn2, + int16_t dtcPixelMeanChn3, uint64_t batchIndex); + +/** + * @ingroup AscendCL + * @brief set DtcPixelMin of type aclmdlAIPP + * + * @param aippParmsSet [OUT] Pointer for aclmdlAIPP + * @param dtcPixelMinChn0 [IN] Min value of channel 0 + * @param dtcPixelMinChn1 [IN] Min value of channel 1 + * @param dtcPixelMinChn2 [IN] Min value of channel 2 + * @param dtcPixelMinChn3 [IN] Min value of channel 3 + * @param batchIndex [IN] Batch parameter index + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPDtcPixelMin(aclmdlAIPP *aippParmsSet, float dtcPixelMinChn0, + float dtcPixelMinChn1, float dtcPixelMinChn2, + float dtcPixelMinChn3, uint64_t batchIndex); + +/** + * @ingroup AscendCL + * @brief set PixelVarReci of type aclmdlAIPP + * + * @param aippParmsSet [OUT] Pointer for aclmdlAIPP + * @param dtcPixelVarReciChn0 [IN] sfr_dtc_pixel_variance_reci_ch0 + * @param dtcPixelVarReciChn1 [IN] sfr_dtc_pixel_variance_reci_ch1 + * @param dtcPixelVarReciChn2 [IN] sfr_dtc_pixel_variance_reci_ch2 + * @param dtcPixelVarReciChn3 [IN] sfr_dtc_pixel_variance_reci_ch3 + * @param batchIndex [IN] Batch parameter index + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPPixelVarReci(aclmdlAIPP *aippParmsSet, float dtcPixelVarReciChn0, + float dtcPixelVarReciChn1, float dtcPixelVarReciChn2, + float dtcPixelVarReciChn3, uint64_t batchIndex); + +/** + * @ingroup AscendCL + * @brief set aipp parameters to model + * + * @param modelId [IN] model id + * @param dataset [IN] Pointer of dataset + * @param index [IN] index of input for aipp data(ACL_DYNAMIC_AIPP_NODE) + * @param aippParmsSet [IN] Pointer for aclmdlAIPP + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlLoadFromFile | aclmdlLoadFromMem | aclmdlLoadFromFileWithMem | + * aclmdlLoadFromMemWithMem | aclmdlGetInputIndexByName | aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetInputAIPP(uint32_t modelId, aclmdlDataset *dataset, size_t index, + const aclmdlAIPP *aippParmsSet); + +/** + * @ingroup AscendCL + * @brief set aipp parameters to model + * + * @param modelId [IN] model id + * @param dataset [IN] Pointer of dataset + * @param index [IN] index of input for data which linked dynamic aipp(ACL_DATA_WITH_DYNAMIC_AIPP) + * @param aippParmsSet [IN] Pointer for aclmdlAIPP + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlLoadFromFile | aclmdlLoadFromMem | aclmdlLoadFromFileWithMem | + * aclmdlLoadFromMemWithMem | aclmdlGetInputIndexByName | aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPByInputIndex(uint32_t modelId, aclmdlDataset *dataset, size_t index, + const aclmdlAIPP *aippParmsSet); + +/** + * @ingroup AscendCL + * @brief get input aipp type + * + * @param modelId [IN] model id + * @param index [IN] index of input + * @param type [OUT] aipp type for input.refrer to aclmdlInputAippType(enum) + * @param dynamicAttachedDataIndex [OUT] index for dynamic attached data(ACL_DYNAMIC_AIPP_NODE) + * valid when type is ACL_DATA_WITH_DYNAMIC_AIPP, invalid value is ACL_INVALID_NODE_INDEX + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlLoadFromFile | aclmdlLoadFromMem | aclmdlLoadFromFileWithMem | + * aclmdlLoadFromMemWithMem | aclmdlGetInputIndexByName | aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlGetAippType(uint32_t modelId, size_t index, aclmdlInputAippType *type, + size_t *dynamicAttachedDataIndex); + +/** + * @ingroup AscendCL + * @brief get static aipp parameters from model + * + * @param modelId [IN] model id + * @param index [IN] index of tensor + * @param aippinfo [OUT] Pointer for static aipp info + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval ACL_ERROR_MODEL_AIPP_NOT_EXIST The tensor of index is not configured with aipp + * @retval OtherValues Failure + * + * @see aclmdlLoadFromFile | aclmdlLoadFromMem | aclmdlLoadFromFileWithMem | + * aclmdlLoadFromMemWithMem | aclmdlGetInputIndexByName + */ +ACL_FUNC_VISIBILITY aclError aclmdlGetFirstAippInfo(uint32_t modelId, size_t index, aclAippInfo *aippinfo); + +/** + * @ingroup AscendCL + * @brief get op description info + * + * @param deviceId [IN] device id + * @param streamId [IN] stream id + * @param taskId [IN] task id + * @param opName [OUT] pointer to op name + * @param opNameLen [IN] the length of op name + * @param inputDesc [OUT] pointer to input description + * @param numInputs [OUT] the number of input tensor + * @param outputDesc [OUT] pointer to output description + * @param numOutputs [OUT] the number of output tensor + * + * @retval ACL_SUCCESS The function is successfully executed + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlCreateAndGetOpDesc(uint32_t deviceId, uint32_t streamId, uint32_t taskId, + char *opName, size_t opNameLen, aclTensorDesc **inputDesc, + size_t *numInputs, aclTensorDesc **outputDesc, + size_t *numOutputs); + +/** + * @ingroup AscendCL + * @brief init dump + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlInitDump(); + +/** + * @ingroup AscendCL + * @brief set param of dump + * + * @param dumpCfgPath [IN] the path of dump config + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetDump(const char *dumpCfgPath); + +/** + * @ingroup AscendCL + * @brief finalize dump. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlFinalizeDump(); + +/** + * @ingroup AscendCL + * @brief load model with config + * + * @param handle [IN] pointer to model config handle + * @param modelId [OUT] pointer to model id + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlLoadWithConfig(const aclmdlConfigHandle *handle, uint32_t *modelId); + +/** + * @ingroup AscendCL + * @brief create model config handle of type aclmdlConfigHandle + * + * @retval the aclmdlConfigHandle pointer + * + * @see aclmdlDestroyConfigHandle + */ +ACL_FUNC_VISIBILITY aclmdlConfigHandle *aclmdlCreateConfigHandle(); + +/** + * @ingroup AscendCL + * @brief destroy data of type aclmdlConfigHandle + * + * @param handle [IN] pointer to model config handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlCreateConfigHandle + */ +ACL_FUNC_VISIBILITY aclError aclmdlDestroyConfigHandle(aclmdlConfigHandle *handle); + +/** + * @ingroup AscendCL + * @brief set config for model load + * + * @param handle [OUT] pointer to model config handle + * @param attr [IN] config attr in model config handle to be set + * @param attrValue [IN] pointer to model config value + * @param valueSize [IN] memory size of attrValue + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetConfigOpt(aclmdlConfigHandle *handle, aclmdlConfigAttr attr, + const void *attrValue, size_t valueSize); + +/** + * @ingroup AscendCL + * @brief get real tensor name from modelDesc + * + * @param modelDesc [IN] pointer to modelDesc + * @param name [IN] tensor name + * + * @retval the pointer of real tensor name + * @retval Failure return NULL + */ +ACL_FUNC_VISIBILITY const char *aclmdlGetTensorRealName(const aclmdlDesc *modelDesc, const char *name); + +#ifdef __cplusplus +} +#endif + +#endif // INC_EXTERNAL_ACL_ACL_MODEL_H_ diff --git a/inc/external/acl/acl_op.h b/inc/external/acl/acl_op.h new file mode 100644 index 00000000..d2e59bfb --- /dev/null +++ b/inc/external/acl/acl_op.h @@ -0,0 +1,504 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef INC_EXTERNAL_ACL_ACL_OP_H_ +#define INC_EXTERNAL_ACL_ACL_OP_H_ + +#include "acl_base.h" +#include "acl_rt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct aclopHandle aclopHandle; +typedef struct aclopAttr aclopAttr; +typedef struct aclopKernelDesc aclopKernelDesc; + +typedef void (*aclDataDeallocator)(void *data, size_t length); + +static const int ACL_COMPILE_FLAG_BIN_SELECTOR = 1; + +typedef enum aclEngineType { + ACL_ENGINE_SYS, + ACL_ENGINE_AICORE, + ACL_ENGINE_VECTOR, +} aclopEngineType; + +/** + * @ingroup AscendCL + * @brief Set base directory that contains single op models + * + * @par Restriction + * The aclopSetModelDir interface can be called only once in a process. + * @param modelDir [IN] path of the directory + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetModelDir(const char *modelDir); + +/** + * @ingroup AscendCL + * @brief load single op models from memory + * + * @par Restriction + * The aclopLoad interface can be called more than one times in a process. + * @param model [IN] address of single op models + * @param modelSize [IN] size of single op models + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopLoad(const void *model, size_t modelSize); + +/** + * @ingroup AscendCL + * @brief create data of type aclopAttr + * + * @retval pointer to created instance. + * @retval nullptr if run out of memory + */ +ACL_FUNC_VISIBILITY aclopAttr *aclopCreateAttr(); + +/** + * @ingroup AscendCL + * @brief destroy data of typ aclopAttr + * + * @param attr [IN] pointer to the instance of aclopAttr + */ +ACL_FUNC_VISIBILITY void aclopDestroyAttr(const aclopAttr *attr); + +/** + * @ingroup AscendCL + * @brief set an attribute. the type of the attribute is bool + * + * @param attr [OUT] pointer to the instance of aclopAttr + * @param attrName [IN] attribute name + * @param attrValue [IN] attribute value + * false if attrValue is 0, true otherwise. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetAttrBool(aclopAttr *attr, const char *attrName, uint8_t attrValue); + +/** + * @ingroup AscendCL + * @brief set an attribute. the type of the attribute is int64_t + * + * @param attr [OUT] pointer to the instance of aclopAttr + * @param attrName [IN] attribute name + * @param attrValue [IN] attribute value + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetAttrInt(aclopAttr *attr, const char *attrName, int64_t attrValue); + +/** + * @ingroup AscendCL + * @brief set an attribute. the type of the attribute is float + * + * @param attr [OUT] pointer to the instance of aclopAttr + * @param attrName [IN] attribute name + * @param attrValue [IN] attribute value + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetAttrFloat(aclopAttr *attr, const char *attrName, float attrValue); + +/** + * @ingroup AscendCL + * @brief set an attribute. the type of the attribute is string + * + * @param attr [OUT] pointer to the instance of aclopAttr + * @param attrName [IN] attribute name + * @param attrValue [IN] attribute value + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetAttrString(aclopAttr *attr, const char *attrName, const char *attrValue); + +/** + * @ingroup AscendCL + * @brief set an attribute. the type of the attribute is list of bools + * + * @param attr [OUT] pointer to the instance of aclopAttr + * @param attrName [IN] attribute name + * @param numValues [IN] number of values. false if attrValue is 0, true otherwise. + * @param values [IN] pointer to values + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetAttrListBool(aclopAttr *attr, const char *attrName, int numValues, + const uint8_t *values); + +/** + * @ingroup AscendCL + * @brief set an attribute. the type of the attribute is list of ints + * + * @param attr [OUT] pointer to the instance of aclopAttr + * @param attrName [IN] attribute name + * @param numValues [IN] number of values + * @param values [IN] pointer to values + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetAttrListInt(aclopAttr *attr, const char *attrName, int numValues, + const int64_t *values); + +/** + * @ingroup AscendCL + * @brief set an attribute. the type of the attribute is list of floats + * + * @param attr [OUT] pointer to the instance of aclopAttr + * @param attrName [IN] attribute name + * @param numValues [IN] number of values + * @param values [IN] pointer to values + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetAttrListFloat(aclopAttr *attr, const char *attrName, int numValues, + const float *values); + +/** + * @ingroup AscendCL + * @brief set an attribute. the type of the attribute is list of strings + * + * @param attr [OUT] pointer to the instance of aclopAttr + * @param attrName [IN] attribute name + * @param numValues [IN] number of values + * @param values [IN] pointer to values + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetAttrListString(aclopAttr *attr, const char *attrName, int numValues, + const char **values); + +/** + * @ingroup AscendCL + * @brief set an attribute. the type of the attribute is list of list of ints + * + * @param attr [OUT] pointer to the instance of aclopAttr + * @param attrName [IN] attribute name + * @param numLists [IN] number of lists + * @param numValues [IN] pointer to number of values of each list + * @param values [IN] pointer to values + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetAttrListListInt(aclopAttr *attr, const char *attrName, int numLists, + const int *numValues, const int64_t *const values[]); + +/** + * @ingroup AscendCL + * @brief Load and execute the specified operator asynchronously + * + * @par Restriction + * @li The input and output organization of each operator is different, + * and the application needs to organize the operator strictly + * according to the operator input and output parameters when calling. + * @li When the user calls aclopExecute, + * the ACL finds the corresponding task according to the optype, + * the description of the input tesnsor, + * the description of the output tesnsor, and attr, and issues the execution. + * + * @param opType [IN] type of op + * @param numInputs [IN] number of inputs + * @param inputDesc [IN] pointer to array of input tensor descriptions + * @param inputs [IN] pointer to array of input buffers + * @param numOutputs [IN] number of outputs + * @param outputDesc [IN] pointer to array of output tensor descriptions + * @param outputs [OUT] pointer to array of output buffers + * @param attr [IN] pointer to instance of aclopAttr. + * may pass nullptr if the op has no attribute + * @param stream [IN] stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_DEPRECATED_MESSAGE("aclopExecute is deprecated, use aclopExecuteV2 instead") +ACL_FUNC_VISIBILITY aclError aclopExecute(const char *opType, int numInputs, const aclTensorDesc *const inputDesc[], + const aclDataBuffer *const inputs[], int numOutputs, + const aclTensorDesc *const outputDesc[], aclDataBuffer *const outputs[], + const aclopAttr *attr, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief Load and execute the specified operator + * The difference with aclopExecute is that aclopExecuteV2 will refresh outputDesc + * + * @par Restriction + * @li The input and output organization of each operator is different, + * and the application needs to organize the operator strictly + * according to the operator input and output parameters when calling. + * @li When the user calls aclopExecuteV2, + * the ACL finds the corresponding task according to the optype, + * the description of the input tesnsor, + * the description of the output tesnsor, and attr, and issues the execution. + * + * @param opType [IN] type of op + * @param numInputs [IN] number of inputs + * @param inputDesc [IN] pointer to array of input tensor descriptions + * @param inputs [IN] pointer to array of input buffers + * @param numOutputs [IN] number of outputs + * @param outputDesc [IN|OUT] pointer to array of output tensor descriptions + * @param outputs [OUT] pointer to array of output buffers + * @param attr [IN] pointer to instance of aclopAttr. + * may pass nullptr if the op has no attribute + * @param stream [IN] stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopExecuteV2(const char *opType, int numInputs, aclTensorDesc *inputDesc[], + aclDataBuffer *inputs[], int numOutputs, aclTensorDesc *outputDesc[], + aclDataBuffer *outputs[], aclopAttr *attr, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief create a instance of aclopHandle. + * + * @param opType [IN] type of op + * @param numInputs [IN] number of inputs + * @param inputDesc [IN] pointer to array of input tensor descriptions + * @param numOutputs [IN] number of outputs + * @param outputDesc [IN] pointer to array of output tensor descriptions + * @param opAttr [IN] pointer to instance of aclopAttr. + * may pass nullptr if the op has no attribute + * @param handle [OUT] pointer to the pointer to the handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopCreateHandle(const char *opType, int numInputs, + const aclTensorDesc *const inputDesc[], int numOutputs, + const aclTensorDesc *const outputDesc[], const aclopAttr *opAttr, + aclopHandle **handle); + +/** + * @ingroup AscendCL + * @brief destroy aclopHandle instance + * + * @param handle [IN] pointer to the instance of aclopHandle + */ +ACL_FUNC_VISIBILITY void aclopDestroyHandle(aclopHandle *handle); + +/** + * @ingroup AscendCL + * @brief execute an op with the handle. + * can save op model matching cost compared with aclopExecute + * + * @param handle [IN] pointer to the instance of aclopHandle. + * The aclopCreateHandle interface has been called + * in advance to create aclopHandle type data. + * @param numInputs [IN] number of inputs + * @param inputs [IN] pointer to array of input buffers. + * The aclCreateDataBuffer interface has been called + * in advance to create aclDataBuffer type data. + * @param numOutputs [IN] number of outputs + * @param outputs [OUT] pointer to array of output buffers + * @param stream [IN] stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclopCreateHandle | aclCreateDataBuffer + */ +ACL_FUNC_VISIBILITY aclError aclopExecWithHandle(aclopHandle *handle, int numInputs, + const aclDataBuffer *const inputs[], int numOutputs, + aclDataBuffer *const outputs[], aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief cast data type + * + * @param srcDesc [IN] source tensor desc + * @param srcBuffer [IN] source tensor buffer + * @param dstDesc [IN] destination tensor desc + * @param dstBuffer [OUT] destination tensor buffer + * @param truncate [IN] do not truncate if value is 0, truncate otherwise + * @param stream [IN] stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopCast(const aclTensorDesc *srcDesc, const aclDataBuffer *srcBuffer, + const aclTensorDesc *dstDesc, aclDataBuffer *dstBuffer, uint8_t truncate, + aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief create a handle for casting datatype + * + * @param srcDesc [IN] source tensor desc + * @param dstDesc [IN] destination tensor desc + * @param truncate [IN] do not truncate if value is 0, truncate otherwise + * @param handle [OUT] pointer to the pointer to the handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopCreateHandleForCast(aclTensorDesc *srcDesc, aclTensorDesc *dstDesc, uint8_t truncate, + aclopHandle **handle); + +/** + * @ingroup AscendCL + * @brief create kernel + * + * @param opType [IN] op type + * @param kernelId [IN] kernel id + * @param kernelName [IN] kernel name + * @param binData [IN] kernel bin data + * @param binSize [IN] kernel bin size + * @param enginetype [IN] enigne type + * @param deallocator [IN] callback function for deallocating bin data, + * null if bin data to be deallocated by caller + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclopCompile + */ +ACL_FUNC_VISIBILITY aclError aclopCreateKernel(const char *opType, const char *kernelId, const char *kernelName, + void *binData, int binSize, aclopEngineType enginetype, + aclDataDeallocator deallocator); + +/** + * @ingroup AscendCL + * @brief create kernel + * + * @param numInputs [IN] number of inputs + * @param inputDesc [IN] pointer to array of input tensor descriptions + * @param numOutputs [IN] number of outputs + * @param outputDesc [IN] pointer to array of output tensor descriptions + * @param opAttr [IN] pointer to instance of aclopAttr + * @param aclopKernelDesc [IN] pointer to instance of aclopKernelDesc + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +typedef aclError (*aclopCompileFunc)(int numInputs, const aclTensorDesc *const inputDesc[], int numOutputs, + const aclTensorDesc *const outputDesc[], const aclopAttr *opAttr, + aclopKernelDesc *aclopKernelDesc); + +/** + * @ingroup AscendCL + * @brief register compile function + * + * @param opType [IN] op type + * @param func [IN] compile function + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclopUnregisterCompileFunc + */ +ACL_FUNC_VISIBILITY aclError aclopRegisterCompileFunc(const char *opType, aclopCompileFunc func); + +/** + * @ingroup AscendCL + * @brief unregister compile function + * + * @param opType [IN] op type + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopUnregisterCompileFunc(const char *opType); + +/** + * @ingroup AscendCL + * @brief set kernel args + * + * @param kernelDesc [IN] pointer to instance of aclopKernelDesc + * @param kernelId [IN] kernel id + * @param blockDim [IN] block dim + * @param args [IN] args + * @param argSize [IN] size in bytes of args + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetKernelArgs(aclopKernelDesc *kernelDesc, const char *kernelId, uint32_t blockDim, + const void *args, uint32_t argSize); + +/** + * @ingroup AscendCL + * @brief set workspace sizes + * + * @param kernelDesc [IN] pointer to instance of aclopKernelDesc + * @param numWorkspaces [IN] number of workspaces + * @param workspaceSizes [IN] pointer to array of sizes of workspaces + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetKernelWorkspaceSizes(aclopKernelDesc *kernelDesc, int numWorkspaces, + size_t *workspaceSizes); + +/** + * @ingroup AscendCL + * @brief compile op with dynamic shape + * + * @param opType [IN] op type + * @param numInputs [IN] number of inputs + * @param inputDesc [IN] pointer to array of input tensor descriptions + * @param numOutputs [IN] number of outputs + * @param outputDesc [IN] pointer to array of output tensor descriptions + * @param attr [IN] pointer to instance of aclopAttr. + * may pass nullptr if the op has no attribute + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopUpdateParams(const char *opType, int numInputs, + const aclTensorDesc *const inputDesc[], int numOutputs, + const aclTensorDesc *const outputDesc[], const aclopAttr *attr); + +/** + * @ingroup AscendCL + * @brief inferShape the specified operator synchronously + * + * @param opType [IN] type of op + * @param numInputs [IN] number of inputs + * @param inputDesc [IN] pointer to array of input tensor descriptions + * @param inputs [IN] pointer to array of input buffers + * @param numOutputs [IN] number of outputs + * @param outputDesc [OUT] pointer to array of output tensor descriptions + * @param attr [IN] pointer to instance of aclopAttr. + * may pass nullptr if the op has no attribute + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopInferShape(const char *opType, int numInputs, aclTensorDesc *inputDesc[], + aclDataBuffer *inputs[], int numOutputs, aclTensorDesc *outputDesc[], + aclopAttr *attr); + +#ifdef __cplusplus +} +#endif + +#endif // INC_EXTERNAL_ACL_ACL_OP_H_ diff --git a/inc/external/acl/acl_op_compiler.h b/inc/external/acl/acl_op_compiler.h new file mode 100644 index 00000000..d9d1b3da --- /dev/null +++ b/inc/external/acl/acl_op_compiler.h @@ -0,0 +1,121 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef INC_EXTERNAL_ACL_ACL_OP_COMPILER_H_ +#define INC_EXTERNAL_ACL_ACL_OP_COMPILER_H_ + +#include "acl_base.h" +#include "acl_op.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum aclCompileType { ACL_COMPILE_SYS, ACL_COMPILE_UNREGISTERED } aclopCompileType; + +typedef enum { + ACL_PRECISION_MODE, + ACL_AICORE_NUM, + ACL_AUTO_TUNE_MODE, + ACL_OP_SELECT_IMPL_MODE, + ACL_OPTYPELIST_FOR_IMPLMODE, + ACL_OP_DEBUG_LEVEL, + ACL_DEBUG_DIR, + ACL_OP_COMPILER_CACHE_MODE, + ACL_OP_COMPILER_CACHE_DIR, + ACL_OP_PERFORMANCE_MODE +} aclCompileOpt; + +typedef enum aclCompileFlag { ACL_OP_COMPILE_DEFAULT, ACL_OP_COMPILE_FUZZ } aclOpCompileFlag; + +/** + * @ingroup AscendCL + * @brief compile op + * + * @param opType [IN] op type + * @param numInputs [IN] number of inputs + * @param inputDesc [IN] pointer to array of input tensor descriptions + * @param numOutputs [IN] number of outputs + * @param outputDesc [IN] pointer to array of output tensor descriptions + * @param attr [IN] pointer to instance of aclopAttr. + * may pass nullptr if the op has no attribute + * @param engineType [IN] engine type + * @param compileFlag [IN] compile flag + * @param opPath [IN] path of op + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopCompile(const char *opType, int numInputs, const aclTensorDesc *const inputDesc[], + int numOutputs, const aclTensorDesc *const outputDesc[], + const aclopAttr *attr, aclopEngineType engineType, + aclopCompileType compileFlag, const char *opPath); + +/** + * @ingroup AscendCL + * @brief compile and execute op + * + * @param opType [IN] op type + * @param numInputs [IN] number of inputs + * @param inputDesc [IN] pointer to array of input tensor descriptions + * @param inputs [IN] pointer to array of input buffers + * @param numOutputs [IN] number of outputs + * @param outputDesc [IN] pointer to array of output tensor descriptions + * @param outputs [IN] pointer to array of outputs buffers + * @param attr [IN] pointer to instance of aclopAttr. + * may pass nullptr if the op has no attribute + * @param engineType [IN] engine type + * @param compileFlag [IN] compile flag + * @param opPath [IN] path of op + * @param stream [IN] stream handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopCompileAndExecute( + const char *opType, int numInputs, const aclTensorDesc *const inputDesc[], const aclDataBuffer *const inputs[], + int numOutputs, const aclTensorDesc *const outputDesc[], aclDataBuffer *const outputs[], const aclopAttr *attr, + aclopEngineType engineType, aclopCompileType compileFlag, const char *opPath, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief set compile option + * + * @param aclCompileOpt [IN] compile option + * @param value [IN] pointer for the option value + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclSetCompileopt(aclCompileOpt opt, const char *value); + +/** + * @ingroup AscendCL + * @brief set compile flag + * + * @param flag [IN] compile flag, ACL_OP_COMPILE_DEFAULT means compile with default mode + * ACL_OP_COMPILE_FUZZ means compile with fuzz mode + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetCompileFlag(aclOpCompileFlag flag); + +#ifdef __cplusplus +} +#endif + +#endif // INC_EXTERNAL_ACL_ACL_OP_COMPILER_H_ diff --git a/inc/external/acl/acl_prof.h b/inc/external/acl/acl_prof.h new file mode 100644 index 00000000..3784d8c6 --- /dev/null +++ b/inc/external/acl/acl_prof.h @@ -0,0 +1,329 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef INC_EXTERNAL_ACL_PROF_H_ +#define INC_EXTERNAL_ACL_PROF_H_ + +#include "acl_base.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define ACL_PROF_ACL_API 0x0001 +#define ACL_PROF_TASK_TIME 0x0002 +#define ACL_PROF_AICORE_METRICS 0x0004 +#define ACL_PROF_AICPU 0x0008 + +/** + * @deprecated please use aclprofGetOpTypeLen and aclprofGetOpTNameLen instead + */ +#define ACL_PROF_MAX_OP_NAME_LEN 257 +#define ACL_PROF_MAX_OP_TYPE_LEN 65 + +typedef enum { + ACL_AICORE_ARITHMETIC_UTILIZATION = 0, + ACL_AICORE_PIPE_UTILIZATION = 1, + ACL_AICORE_MEMORY_BANDWIDTH = 2, + ACL_AICORE_L0B_AND_WIDTH = 3, + ACL_AICORE_RESOURCE_CONFLICT_RATIO = 4, + ACL_AICORE_NONE = 0xFF +} aclprofAicoreMetrics; + +typedef struct aclprofConfig aclprofConfig; +typedef struct aclprofStopConfig aclprofStopConfig; +typedef struct aclprofAicoreEvents aclprofAicoreEvents; +typedef struct aclprofSubscribeConfig aclprofSubscribeConfig; + +/** + * @ingroup AscendCL + * @brief profiling initialize + * + * @param profilerResultPath [IN] path of profiling result + * @param length [IN] length of profilerResultPath + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclprofFinalize + */ +ACL_FUNC_VISIBILITY aclError aclprofInit(const char *profilerResultPath, size_t length); + +/** + * @ingroup AscendCL + * @brief profiling finalize + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclprofInit + */ +ACL_FUNC_VISIBILITY aclError aclprofFinalize(); + +/** + * @ingroup AscendCL + * @brief Start profiling modules by profilerConfig + * + * @param profilerConfig [IN] config of profiling + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclprofStop + */ +ACL_FUNC_VISIBILITY aclError aclprofStart(const aclprofConfig *profilerConfig); + +/** + * @ingroup AscendCL + * @brief Create data of type aclprofConfig + * + * @param deviceIdList [IN] list of device id + * @param deviceNums [IN] number of devices + * @param aicoreMetrics [IN] type of aicore metrics + * @param aicoreEvents [IN] pointer to aicore events, only support NULL now + * @param dataTypeConfig [IN] config modules need profiling + * + * @retval the aclprofConfig pointer + * + * @see aclprofDestroyConfig + */ +ACL_FUNC_VISIBILITY aclprofConfig *aclprofCreateConfig(uint32_t *deviceIdList, uint32_t deviceNums, + aclprofAicoreMetrics aicoreMetrics, + aclprofAicoreEvents *aicoreEvents, uint64_t dataTypeConfig); + +/** + * @ingroup AscendCL + * @brief Destroy data of type aclprofConfig + * + * @param profilerConfig [IN] config of profiling + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclprofCreateConfig + */ +ACL_FUNC_VISIBILITY aclError aclprofDestroyConfig(const aclprofConfig *profilerConfig); + +/** + * @ingroup AscendCL + * @brief stop profiling modules by stopProfilingConfig + * + * @param profilerConfig [IN] pointer to stop config of profiling + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclprofStart + */ +ACL_FUNC_VISIBILITY aclError aclprofStop(const aclprofConfig *profilerConfig); + +/** + * @ingroup AscendCL + * @brief subscribe profiling data of model + * + * @param modelId [IN] the model id subscribed + * @param profSubscribeConfig [IN] pointer to config of model subscribe + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclprofModelUnSubscribe + */ +ACL_FUNC_VISIBILITY aclError aclprofModelSubscribe(uint32_t modelId, const aclprofSubscribeConfig *profSubscribeConfig); + +/** + * @ingroup AscendCL + * @brief unsubscribe profiling data of model + * + * @param modelId [IN] the model id unsubscribed + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclprofModelSubscribe + */ +ACL_FUNC_VISIBILITY aclError aclprofModelUnSubscribe(uint32_t modelId); + +/** + * @ingroup AscendCL + * @brief create subscribe config + * + * @param timeInfoSwitch [IN] switch whether get time info from model + * @param aicoreMetrics [IN] aicore metrics + * @param fd [IN] pointer to write pipe + * + * @retval the aclprofSubscribeConfig pointer + * + * @see aclprofDestroySubscribeConfig + */ +ACL_FUNC_VISIBILITY aclprofSubscribeConfig *aclprofCreateSubscribeConfig(int8_t timeInfoSwitch, + aclprofAicoreMetrics aicoreMetrics, void *fd); + +/** + * @ingroup AscendCL + * @brief destroy subscribe config + * + * @param profSubscribeConfig [IN] subscribe config + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclprofCreateSubscribeConfig + */ +ACL_FUNC_VISIBILITY aclError aclprofDestroySubscribeConfig(const aclprofSubscribeConfig *profSubscribeConfig); + +/** + * @ingroup AscendCL + * @brief create subscribe config + * + * @param opDescSize [OUT] size of op desc + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclprofGetOpDescSize(size_t *opDescSize); + +/** + * @ingroup AscendCL + * @brief get op number from subscription data + * + * @param opInfo [IN] pointer to subscription data + * @param opInfoLen [IN] memory size of subscription data + * @param opNumber [OUT] op number of subscription data + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclprofGetOpNum(const void *opInfo, size_t opInfoLen, uint32_t *opNumber); + +/** + * @ingroup AscendCL + * @brief get length op type from subscription data + * + * @param opInfo [IN] pointer to subscription data + * @param opInfoLen [IN] memory size of subscription data + * @param index [IN] index of op array in opInfo + * @param opTypeLen [OUT] actual length of op type string + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclprofGetOpTypeLen(const void *opInfo, size_t opInfoLen, uint32_t index, + size_t *opTypeLen); + +/** + * @ingroup AscendCL + * @brief get op type from subscription data + * + * @param opInfo [IN] pointer to subscription data + * @param opInfoLen [IN] memory size of subscription data + * @param index [IN] index of op array in opInfo + * @param opType [OUT] obtained op type string + * @param opTypeLen [IN] obtained length of op type string + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclprofGetOpType(const void *opInfo, size_t opInfoLen, uint32_t index, char *opType, + size_t opTypeLen); + +/** + * @ingroup AscendCL + * @brief get length op name from subscription data + * + * @param opInfo [IN] pointer to subscription data + * @param opInfoLen [IN] memory size of subscription data + * @param index [IN] index of op array in opInfo + * @param opNameLen [OUT] actual length of op name string + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclprofGetOpNameLen(const void *opInfo, size_t opInfoLen, uint32_t index, + size_t *opNameLen); + +/** + * @ingroup AscendCL + * @brief get op type from subscription data + * + * @param opInfo [IN] pointer to subscription data + * @param opInfoLen [IN] memory size of subscription data + * @param index [IN] index of op array in opInfo + * @param opName [OUT] obtained op name string + * @param opNameLen [IN] obtained length of op name string + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclprofGetOpName(const void *opInfo, size_t opInfoLen, uint32_t index, char *opName, + size_t opNameLen); + +/** + * @ingroup AscendCL + * @brief get start time of specified op from subscription data + * + * @param opInfo [IN] pointer to subscription data + * @param opInfoLen [IN] memory size of subscription data + * @param index [IN] index of op array in opInfo + * + * @retval start time(us) of specified op with timestamp + * @retval 0 for failed + */ +ACL_FUNC_VISIBILITY uint64_t aclprofGetOpStart(const void *opInfo, size_t opInfoLen, uint32_t index); + +/** + * @ingroup AscendCL + * @brief get end time of specified op from subscription data + * + * @param opInfo [IN] pointer to subscription data + * @param opInfoLen [IN] memory size of subscription data + * @param index [IN] index of op array in opInfo + * + * @retval end time(us) of specified op with timestamp + * @retval 0 for failed + */ +ACL_FUNC_VISIBILITY uint64_t aclprofGetOpEnd(const void *opInfo, size_t opInfoLen, uint32_t index); + +/** + * @ingroup AscendCL + * @brief get excution time of specified op from subscription data + * + * @param opInfo [IN] pointer to subscription data + * @param opInfoLen [IN] memory size of subscription data + * @param index [IN] index of op array in opInfo + * + * @retval execution time(us) of specified op with timestamp + * @retval 0 for failed + */ +ACL_FUNC_VISIBILITY uint64_t aclprofGetOpDuration(const void *opInfo, size_t opInfoLen, uint32_t index); + +/** + * @ingroup AscendCL + * @brief get model id from subscription data + * + * @param opInfo [IN] pointer to subscription data + * @param opInfoLen [IN] memory size of subscription data + * + * @retval model id of subscription data + * @retval 0 for failed + */ +ACL_FUNC_VISIBILITY size_t aclprofGetModelId(const void *opInfo, size_t opInfoLen, uint32_t index); + +#ifdef __cplusplus +} +#endif + +#endif // INC_EXTERNAL_ACL_PROF_H_ diff --git a/inc/external/acl/acl_rt.h b/inc/external/acl/acl_rt.h new file mode 100644 index 00000000..5ee70724 --- /dev/null +++ b/inc/external/acl/acl_rt.h @@ -0,0 +1,958 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef INC_EXTERNAL_ACL_ACL_RT_H_ +#define INC_EXTERNAL_ACL_ACL_RT_H_ + +#include +#include +#include "acl_base.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define ACL_EVENT_TIME_LINE 0x00000008u + +typedef enum aclrtRunMode { + ACL_DEVICE, + ACL_HOST, +} aclrtRunMode; + +typedef enum aclrtTsId { + ACL_TS_ID_AICORE = 0, + ACL_TS_ID_AIVECTOR = 1, + ACL_TS_ID_RESERVED = 2, +} aclrtTsId; + +typedef enum aclrtEventStatus { + ACL_EVENT_STATUS_COMPLETE = 0, + ACL_EVENT_STATUS_NOT_READY = 1, + ACL_EVENT_STATUS_RESERVED = 2, +} aclrtEventStatus; + +typedef enum aclrtCallbackBlockType { + ACL_CALLBACK_NO_BLOCK, + ACL_CALLBACK_BLOCK, +} aclrtCallbackBlockType; + +typedef enum aclrtMemcpyKind { + ACL_MEMCPY_HOST_TO_HOST, + ACL_MEMCPY_HOST_TO_DEVICE, + ACL_MEMCPY_DEVICE_TO_HOST, + ACL_MEMCPY_DEVICE_TO_DEVICE, +} aclrtMemcpyKind; + +typedef enum aclrtMemMallocPolicy { + ACL_MEM_MALLOC_HUGE_FIRST, + ACL_MEM_MALLOC_HUGE_ONLY, + ACL_MEM_MALLOC_NORMAL_ONLY, + ACL_MEM_MALLOC_HUGE_FIRST_P2P, + ACL_MEM_MALLOC_HUGE_ONLY_P2P, + ACL_MEM_MALLOC_NORMAL_ONLY_P2P, +} aclrtMemMallocPolicy; + +typedef enum aclrtMemAttr { + ACL_DDR_MEM, + ACL_HBM_MEM, + ACL_DDR_MEM_HUGE, + ACL_DDR_MEM_NORMAL, + ACL_HBM_MEM_HUGE, + ACL_HBM_MEM_NORMAL, + ACL_DDR_MEM_P2P_HUGE, + ACL_DDR_MEM_P2P_NORMAL, + ACL_HBM_MEM_P2P_HUGE, + ACL_HBM_MEM_P2P_NORMAL, +} aclrtMemAttr; + +typedef enum aclrtGroupAttr { + ACL_GROUP_AICORE_INT, + ACL_GROUP_AIV_INT, + ACL_GROUP_AIC_INT, + ACL_GROUP_SDMANUM_INT, + ACL_GROUP_ASQNUM_INT, + ACL_GROUP_GROUPID_INT +} aclrtGroupAttr; + +typedef struct tagRtGroupInfo aclrtGroupInfo; + +typedef struct rtExceptionInfo aclrtExceptionInfo; + +typedef void (*aclrtCallback)(void *userData); + +typedef void (*aclrtExceptionInfoCallback)(aclrtExceptionInfo *exceptionInfo); + +/** + * @ingroup AscendCL + * @brief Set a callback function to handle exception information + * + * @param callback [IN] callback function to handle exception information + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtSetExceptionInfoCallback(aclrtExceptionInfoCallback callback); + +/** + * @ingroup AscendCL + * @brief Get task id from exception information + * + * @param info [IN] pointer of exception information + * + * @retval The task id from exception information + * @retval 0xFFFFFFFF if info is null + */ +ACL_FUNC_VISIBILITY uint32_t aclrtGetTaskIdFromExceptionInfo(const aclrtExceptionInfo *info); + +/** + * @ingroup AscendCL + * @brief Get stream id from exception information + * + * @param info [IN] pointer of exception information + * + * @retval The stream id from exception information + * @retval 0xFFFFFFFF if info is null + */ +ACL_FUNC_VISIBILITY uint32_t aclrtGetStreamIdFromExceptionInfo(const aclrtExceptionInfo *info); + +/** + * @ingroup AscendCL + * @brief Get thread id from exception information + * + * @param info [IN] pointer of exception information + * + * @retval The thread id of fail task + * @retval 0xFFFFFFFF if info is null + */ +ACL_FUNC_VISIBILITY uint32_t aclrtGetThreadIdFromExceptionInfo(const aclrtExceptionInfo *info); + +/** + * @ingroup AscendCL + * @brief Get device id from exception information + * + * @param info [IN] pointer of exception information + * + * @retval The thread id of fail task + * @retval 0xFFFFFFFF if info is null + */ +ACL_FUNC_VISIBILITY uint32_t aclrtGetDeviceIdFromExceptionInfo(const aclrtExceptionInfo *info); + +/** + * @ingroup AscendCL + * @brief The thread that handles the callback function on the Stream + * + * @param threadId [IN] thread ID + * @param stream [IN] stream handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtSubscribeReport(uint64_t threadId, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief Add a callback function to be executed on the host + * to the task queue of the Stream + * + * @param fn [IN] Specify the callback function to be added + * The function prototype of the callback function is: + * typedef void (*aclrtCallback)(void *userData); + * @param userData [IN] User data to be passed to the callback function + * @param blockType [IN] callback block type + * @param stream [IN] stream handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtLaunchCallback(aclrtCallback fn, void *userData, aclrtCallbackBlockType blockType, + aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief After waiting for a specified time, trigger callback processing + * + * @par Function + * The thread processing callback specified by + * the aclrtSubscribeReport interface + * + * @param timeout [IN] timeout value + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtSubscribeReport + */ +ACL_FUNC_VISIBILITY aclError aclrtProcessReport(int32_t timeout); + +/** + * @ingroup AscendCL + * @brief Cancel thread registration, + * the callback function on the specified Stream + * is no longer processed by the specified thread + * + * @param threadId [IN] thread ID + * @param stream [IN] stream handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtUnSubscribeReport(uint64_t threadId, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief create context and associates it with the calling thread + * + * @par Function + * The following use cases are supported: + * @li If you don't call the aclrtCreateContext interface + * to explicitly create the context, + * the system will use the default context, which is implicitly created + * when the aclrtSetDevice interface is called. + * @li If multiple contexts are created in a process + * (there is no limit on the number of contexts), + * the current thread can only use one of them at the same time. + * It is recommended to explicitly specify the context of the current thread + * through the aclrtSetCurrentContext interface to increase. + * the maintainability of the program. + * + * @param context [OUT] point to the created context + * @param deviceId [IN] device to create context on + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtSetDevice | aclrtSetCurrentContext + */ +ACL_FUNC_VISIBILITY aclError aclrtCreateContext(aclrtContext *context, int32_t deviceId); + +/** + * @ingroup AscendCL + * @brief destroy context instance + * + * @par Function + * Can only destroy context created through aclrtCreateContext interface + * + * @param context [IN] the context to destroy + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtCreateContext + */ +ACL_FUNC_VISIBILITY aclError aclrtDestroyContext(aclrtContext context); + +/** + * @ingroup AscendCL + * @brief set the context of the thread + * + * @par Function + * The following scenarios are supported: + * @li If the aclrtCreateContext interface is called in a thread to explicitly + * create a Context (for example: ctx1), the thread's Context can be specified + * without calling the aclrtSetCurrentContext interface. + * The system uses ctx1 as the context of thread1 by default. + * @li If the aclrtCreateContext interface is not explicitly created, + * the system uses the default context as the context of the thread. + * At this time, the aclrtDestroyContext interface cannot be used to release + * the default context. + * @li If the aclrtSetCurrentContext interface is called multiple times to + * set the thread's Context, the last one prevails. + * + * @par Restriction + * @li If the cevice corresponding to the context set for the thread + * has been reset, you cannot set the context as the context of the thread, + * otherwise a business exception will result. + * @li It is recommended to use the context created in a thread. + * If the aclrtCreateContext interface is called in thread A to create a context, + * and the context is used in thread B, + * the user must guarantee the execution order of tasks in the same stream + * under the same context in two threads. + * + * @param context [IN] the current context of the thread + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtCreateContext | aclrtDestroyContext + */ +ACL_FUNC_VISIBILITY aclError aclrtSetCurrentContext(aclrtContext context); + +/** + * @ingroup AscendCL + * @brief get the context of the thread + * + * @par Function + * If the user calls the aclrtSetCurrentContext interface + * multiple times to set the context of the current thread, + * then the last set context is obtained + * + * @param context [OUT] the current context of the thread + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtSetCurrentContext + */ +ACL_FUNC_VISIBILITY aclError aclrtGetCurrentContext(aclrtContext *context); + +/** + * @ingroup AscendCL + * @brief Specify the device to use for the operation + * implicitly create the default context and the default stream + * + * @par Function + * The following use cases are supported: + * @li Device can be specified in the process or thread. + * If you call the aclrtSetDevice interface multiple + * times to specify the same device, + * you only need to call the aclrtResetDevice interface to reset the device. + * @li The same device can be specified for operation + * in different processes or threads. + * @li Device is specified in a process, + * and multiple threads in the process can share this device to explicitly + * create a Context (aclrtCreateContext interface). + * @li In multi-device scenarios, you can switch to other devices + * through the aclrtSetDevice interface in the process. + * + * @param deviceId [IN] the device id + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtResetDevice |aclrtCreateContext + */ +ACL_FUNC_VISIBILITY aclError aclrtSetDevice(int32_t deviceId); + +/** + * @ingroup AscendCL + * @brief Reset the current operating Device and free resources on the device, + * including the default context, the default stream, + * and all streams created under the default context, + * and synchronizes the interface. + * If the task under the default context or stream has not been completed, + * the system will wait for the task to complete before releasing it. + * + * @par Restriction + * @li The Context, Stream, and Event that are explicitly created + * on the device to be reset. Before resetting, + * it is recommended to follow the following interface calling sequence, + * otherwise business abnormalities may be caused. + * @li Interface calling sequence: + * call aclrtDestroyEvent interface to release Event or + * call aclrtDestroyStream interface to release explicitly created Stream-> + * call aclrtDestroyContext to release explicitly created Context-> + * call aclrtResetDevice interface + * + * @param deviceId [IN] the device id + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtResetDevice(int32_t deviceId); + +/** + * @ingroup AscendCL + * @brief get target device of current thread + * + * @param deviceId [OUT] the device id + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtGetDevice(int32_t *deviceId); + +/** + * @ingroup AscendCL + * @brief get target side + * + * @param runMode [OUT] the run mode + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtGetRunMode(aclrtRunMode *runMode); + +/** + * @ingroup AscendCL + * @brief Wait for compute device to finish + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtSynchronizeDevice(void); + +/** + * @ingroup AscendCL + * @brief Set Scheduling TS + * + * @param tsId [IN] the ts id + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtSetTsDevice(aclrtTsId tsId); + +/** + * @ingroup AscendCL + * @brief get total device number. + * + * @param count [OUT] the device number + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtGetDeviceCount(uint32_t *count); + +/** + * @ingroup AscendCL + * @brief create event instance + * + * @param event [OUT] created event + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtCreateEvent(aclrtEvent *event); + +/** + * @ingroup AscendCL + * @brief create event instance with flag + * + * @param event [OUT] created event + * @param flag [IN] event flag + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtCreateEventWithFlag(aclrtEvent *event, uint32_t flag); + +/** + * @ingroup AscendCL + * @brief destroy event instance + * + * @par Function + * Only events created through the aclrtCreateEvent interface can be + * destroyed, synchronous interfaces. When destroying an event, + * the user must ensure that the tasks involved in the aclrtSynchronizeEvent + * interface or the aclrtStreamWaitEvent interface are completed before + * they are destroyed. + * + * @param event [IN] event to destroy + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtCreateEvent | aclrtSynchronizeEvent | aclrtStreamWaitEvent + */ +ACL_FUNC_VISIBILITY aclError aclrtDestroyEvent(aclrtEvent event); + +/** + * @ingroup AscendCL + * @brief Record an Event in the Stream + * + * @param event [IN] event to record + * @param stream [IN] stream handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtRecordEvent(aclrtEvent event, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief Reset an event + * + * @par Function + * Users need to make sure to wait for the tasks in the Stream + * to complete before resetting the Event + * + * @param event [IN] event to reset + * @param stream [IN] stream handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtResetEvent(aclrtEvent event, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief Queries an event's status + * + * @param event [IN] event to query + * @param status [OUT] event status + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtQueryEvent(aclrtEvent event, aclrtEventStatus *status); + +/** + * @ingroup AscendCL + * @brief Block Host Running, wait event to be complete + * + * @param event [IN] event to wait + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtSynchronizeEvent(aclrtEvent event); + +/** + * @ingroup AscendCL + * @brief computes the elapsed time between events. + * + * @param ms [OUT] time between start and end in ms + * @param start [IN] starting event + * @param end [IN] ending event + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtCreateEvent | aclrtRecordEvent | aclrtSynchronizeStream + */ +ACL_FUNC_VISIBILITY aclError aclrtEventElapsedTime(float *ms, aclrtEvent start, aclrtEvent end); + +/** + * @ingroup AscendCL + * @brief alloc memory on device + * + * @par Function + * alloc for size linear memory on device + * and return a pointer to allocated memory by *devPtr + * + * @par Restriction + * @li The memory requested by the aclrtMalloc interface needs to be released + * through the aclrtFree interface. + * @li Before calling the media data processing interface, + * if you need to apply memory on the device to store input or output data, + * you need to call acldvppMalloc to apply for memory. + * + * @param devPtr [OUT] pointer to pointer to allocated memory on device + * @param size [IN] alloc memory size + * @param policy [IN] memory alloc policy + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtFree | acldvppMalloc | aclrtMallocCached + */ +ACL_FUNC_VISIBILITY aclError aclrtMalloc(void **devPtr, size_t size, aclrtMemMallocPolicy policy); + +/** + * @ingroup AscendCL + * @brief allocate memory on device with cache + * + * @par Function + * alloc for size linear memory on device + * and return a pointer to allocated memory by *devPtr + * + * @par Restriction + * @li The memory requested by the aclrtMallocCached interface needs to be released + * through the aclrtFree interface. + * + * @param devPtr [OUT] pointer to pointer to allocated memory on device + * @param size [IN] alloc memory size + * @param policy [IN] memory alloc policy + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtFree | aclrtMalloc + */ +ACL_FUNC_VISIBILITY aclError aclrtMallocCached(void **devPtr, size_t size, aclrtMemMallocPolicy policy); + +/** + * @ingroup AscendCL + * @brief flush cache data to ddr + * + * @param devPtr [IN] the pointer that flush data to ddr + * @param size [IN] flush size + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtMemFlush(void *devPtr, size_t size); + +/** + * @ingroup AscendCL + * @brief invalidate cache data + * + * @param devPtr [IN] pointer to invalidate cache data + * @param size [IN] invalidate size + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtMemInvalidate(void *devPtr, size_t size); + +/** + * @ingroup AscendCL + * @brief free device memory + * + * @par Function + * can only free memory allocated through the aclrtMalloc interface + * + * @param devPtr [IN] Pointer to memory to be freed + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtMalloc + */ +ACL_FUNC_VISIBILITY aclError aclrtFree(void *devPtr); + +/** + * @ingroup AscendCL + * @brief alloc memory on host + * + * @par Restriction + * @li The requested memory cannot be used in the Device + * and needs to be explicitly copied to the Device. + * @li The memory requested by the aclrtMallocHost interface + * needs to be released through the aclrtFreeHost interface. + * + * @param hostPtr [OUT] pointer to pointer to allocated memory on the host + * @param size [IN] alloc memory size + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtFreeHost + */ +ACL_FUNC_VISIBILITY aclError aclrtMallocHost(void **hostPtr, size_t size); + +/** + * @ingroup AscendCL + * @brief free host memory + * + * @par Function + * can only free memory allocated through the aclrtMallocHost interface + * + * @param hostPtr [IN] free memory pointer + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtMallocHost + */ +ACL_FUNC_VISIBILITY aclError aclrtFreeHost(void *hostPtr); + +/** + * @ingroup AscendCL + * @brief synchronous memory replication between host and device + * + * @param dst [IN] destination address pointer + * @param destMax [IN] Max length of the destination address memory + * @param src [IN] source address pointer + * @param count [IN] the length of byte to copy + * @param kind [IN] memcpy type + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtMemcpy(void *dst, size_t destMax, const void *src, size_t count, + aclrtMemcpyKind kind); + +/** + * @ingroup AscendCL + * @brief Initialize memory and set contents of memory to specified value + * + * @par Function + * The memory to be initialized is on the Host or device side, + * and the system determines whether + * it is host or device according to the address + * + * @param devPtr [IN] Starting address of memory + * @param maxCount [IN] Max length of destination address memory + * @param value [IN] Set value + * @param count [IN] The length of memory + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtMemset(void *devPtr, size_t maxCount, int32_t value, size_t count); + +/** + * @ingroup AscendCL + * @brief Asynchronous memory replication between Host and Device + * + * @par Function + * After calling this interface, + * be sure to call the aclrtSynchronizeStream interface to ensure that + * the task of memory replication has been completed + * + * @par Restriction + * @li For on-chip Device-to-Device memory copy, + * both the source and destination addresses must be 64-byte aligned + * + * @param dst [IN] destination address pointer + * @param destMax [IN] Max length of destination address memory + * @param src [IN] source address pointer + * @param count [IN] the number of byte to copy + * @param kind [IN] memcpy type + * @param stream [IN] asynchronized task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtSynchronizeStream + */ +ACL_FUNC_VISIBILITY aclError aclrtMemcpyAsync(void *dst, size_t destMax, const void *src, size_t count, + aclrtMemcpyKind kind, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief Asynchronous initialize memory + * and set contents of memory to specified value async + * + * @par Function + * The memory to be initialized is on the Host or device side, + * and the system determines whether + * it is host or device according to the address + * + * @param devPtr [IN] destination address pointer + * @param maxCount [IN] Max length of destination address memory + * @param value [IN] set value + * @param count [IN] the number of byte to set + * @param stream [IN] asynchronized task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtSynchronizeStream + */ +ACL_FUNC_VISIBILITY aclError aclrtMemsetAsync(void *devPtr, size_t maxCount, int32_t value, size_t count, + aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief create stream instance + * + * @param stream [OUT] the created stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtCreateStream(aclrtStream *stream); + +/** + * @ingroup AscendCL + * @brief destroy stream instance + * + * @par Function + * Can only destroy streams created through the aclrtCreateStream interface + * + * @par Restriction + * Before calling the aclrtDestroyStream interface to destroy + * the specified Stream, you need to call the aclrtSynchronizeStream interface + * to ensure that the tasks in the Stream have been completed. + * + * @param stream [IN] the stream to destroy + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtCreateStream | aclrtSynchronizeStream + */ +ACL_FUNC_VISIBILITY aclError aclrtDestroyStream(aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief block the host until all tasks + * in the specified stream have completed + * + * @param stream [IN] the stream to wait + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtSynchronizeStream(aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief Blocks the operation of the specified Stream until + * the specified Event is completed. + * Support for multiple streams waiting for the same event. + * + * @param stream [IN] the wait stream If using thedefault Stream, set NULL + * @param event [IN] the event to wait + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtStreamWaitEvent(aclrtStream stream, aclrtEvent event); + +/** + * @ingroup AscendCL + * @brief set group + * + * @par Function + * set the task to the corresponding group + * + * @param groupId [IN] group id + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtGetGroupCount | aclrtGetAllGroupInfo | aclrtGetGroupInfoDetail + */ +ACL_FUNC_VISIBILITY aclError aclrtSetGroup(int32_t groupId); + +/** + * @ingroup AscendCL + * @brief get the number of group + * + * @par Function + * get the number of group. if the number of group is zero, + * it means that group is not supported or group is not created. + * + * @param count [OUT] the number of group + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + */ +ACL_FUNC_VISIBILITY aclError aclrtGetGroupCount(uint32_t *count); + +/** + * @ingroup AscendCL + * @brief create group information + * + * @retval null for failed. + * @retval OtherValues success. + * + * @see aclrtDestroyGroupInfo + */ +ACL_FUNC_VISIBILITY aclrtGroupInfo *aclrtCreateGroupInfo(); + +/** + * @ingroup AscendCL + * @brief destroy group information + * + * @param groupInfo [IN] pointer to group information + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtCreateGroupInfo + */ +ACL_FUNC_VISIBILITY aclError aclrtDestroyGroupInfo(aclrtGroupInfo *groupInfo); + +/** + * @ingroup AscendCL + * @brief get all group information + * + * @param groupInfo [OUT] pointer to group information + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtGetGroupCount + */ +ACL_FUNC_VISIBILITY aclError aclrtGetAllGroupInfo(aclrtGroupInfo *groupInfo); + +/** + * @ingroup AscendCL + * @brief get detail information of group + * + * @param groupInfo [IN] pointer to group information + * @param groupIndex [IN] group index value + * @param attr [IN] group attribute + * @param attrValue [OUT] pointer to attribute value + * @param valueLen [IN] length of attribute value + * @param paramRetSize [OUT] pointer to real length of attribute value + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtGetGroupCount | aclrtGetAllGroupInfo + */ +ACL_FUNC_VISIBILITY aclError aclrtGetGroupInfoDetail(const aclrtGroupInfo *groupInfo, int32_t groupIndex, + aclrtGroupAttr attr, void *attrValue, size_t valueLen, + size_t *paramRetSize); + +/** + * @ingroup AscendCL + * @brief checking whether current device and peer device support the p2p feature + * + * @param canAccessPeer [OUT] pointer to save the checking result + * @param deviceId [IN] current device id + * @param peerDeviceId [IN] peer device id + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtDeviceEnablePeerAccess | aclrtDeviceDisablePeerAccess + */ +ACL_FUNC_VISIBILITY aclError aclrtDeviceCanAccessPeer(int32_t *canAccessPeer, int32_t deviceId, int32_t peerDeviceId); + +/** + * @ingroup AscendCL + * @brief enable the peer device to support the p2p feature + * + * @param peerDeviceId [IN] the peer device id + * @param flags [IN] reserved field, now it must be zero + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtDeviceCanAccessPeer | aclrtDeviceDisablePeerAccess + */ +ACL_FUNC_VISIBILITY aclError aclrtDeviceEnablePeerAccess(int32_t peerDeviceId, uint32_t flags); + +/** + * @ingroup AscendCL + * @brief disable the peer device to support the p2p function + * + * @param peerDeviceId [IN] the peer device id + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtDeviceCanAccessPeer | aclrtDeviceEnablePeerAccess + */ +ACL_FUNC_VISIBILITY aclError aclrtDeviceDisablePeerAccess(int32_t peerDeviceId); + +/** + * @ingroup AscendCL + * @brief Obtain the free memory and total memory of specified attribute. + * the specified memory include normal memory and huge memory. + * + * @param attr [IN] the memory attribute of specified device + * @param free [OUT] the free memory of specified device + * @param total [OUT] the total memory of specified device. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtGetMemInfo(aclrtMemAttr attr, size_t *free, size_t *total); + +/** + * @ingroup AscendCL + * @brief Set the timeout interval for waitting of op + * + * @param timeout [IN] op wait timeout + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtSetOpWaitTimeout(uint32_t timeout); + +#ifdef __cplusplus +} +#endif + +#endif // INC_EXTERNAL_ACL_ACL_RT_H_ diff --git a/inc/external/acl/acl_tdt.h b/inc/external/acl/acl_tdt.h new file mode 100644 index 00000000..c357518d --- /dev/null +++ b/inc/external/acl/acl_tdt.h @@ -0,0 +1,276 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef INC_EXTERNAL_ACL_ACL_TDT_H_ +#define INC_EXTERNAL_ACL_ACL_TDT_H_ + +#include "acl/acl_base.h" + +#ifdef __cplusplus +extern "C" { +#endif + +enum acltdtTensorType { + ACL_TENSOR_DATA_UNDEFINED = -1, + ACL_TENSOR_DATA_TENSOR, + ACL_TENSOR_DATA_END_OF_SEQUENCE, + ACL_TENSOR_DATA_ABNORMAL +}; + +typedef struct acltdtDataItem acltdtDataItem; +typedef struct acltdtDataset acltdtDataset; +typedef struct acltdtChannelHandle acltdtChannelHandle; + +/** + * @ingroup AscendCL + * @brief Get tensor type from item + * + * @param dataItem [IN] pointer to the data item + * + * @retval Tensor type. + * @retval ACL_DT_UNDEFINED if dataItem is null + */ +ACL_FUNC_VISIBILITY acltdtTensorType acltdtGetTensorTypeFromItem(const acltdtDataItem *dataItem); + +/** + * @ingroup AscendCL + * @brief Get data type from item + * + * @param dataItem [IN] pointer to the data item + * + * @retval Data type. + * @retval ACL_DT_UNDEFINED if dataItem is null + */ +ACL_FUNC_VISIBILITY aclDataType acltdtGetDataTypeFromItem(const acltdtDataItem *dataItem); + +/** + * @ingroup AscendCL + * @brief Get data address from item + * + * @param dataItem [IN] pointer to data item + * + * @retval null for failed + * @retval OtherValues success + */ +ACL_FUNC_VISIBILITY void *acltdtGetDataAddrFromItem(const acltdtDataItem *dataItem); + +/** + * @ingroup AscendCL + * @brief Get data size from item + * + * @param dataItem [IN] pointer to data item + * + * @retval 0 for failed + * @retval OtherValues success + */ +ACL_FUNC_VISIBILITY size_t acltdtGetDataSizeFromItem(const acltdtDataItem *dataItem); + +/** + * @ingroup AscendCL + * @brief Get dim's number from item + * + * @param dataItem [IN] pointer to data item + * + * @retval 0 for failed + * @retval OtherValues success + */ +ACL_FUNC_VISIBILITY size_t acltdtGetDimNumFromItem(const acltdtDataItem *dataItem); + +/** + * @ingroup AscendCL + * @brief Get dims from item + * + * @param dataItem [IN] the struct of data item + * @param dims [IN|OUT] pointer to the dims of dataTtem + * @param dimNum [IN] the size of the dims + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acltdtGetDimsFromItem(const acltdtDataItem *dataItem, int64_t *dims, size_t dimNum); + +/** + * @ingroup AscendCL + * @brief Create the struct of data item + * + * @param tdtType [IN] Tdt tensor type + * @param dims [IN] pointer of tdtDataItem's dims + * @param dimNum [IN] Dim number + * @param dataType [IN] Data type + * @param data [IN] Data pointer + * @param size [IN] Data size + * + * @retval null for failed + * @retval OtherValues success + * + * @see acltdtDestroyDataItem + */ +ACL_FUNC_VISIBILITY acltdtDataItem *acltdtCreateDataItem(acltdtTensorType tdtType, const int64_t *dims, size_t dimNum, + aclDataType dataType, void *data, size_t size); + +/** + * @ingroup AscendCL + * @brief Destroy the struct of data item + * + * @param dataItem [IN] pointer to the data item + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acltdtCreateDataItem + */ +ACL_FUNC_VISIBILITY aclError acltdtDestroyDataItem(acltdtDataItem *dataItem); + +/** + * @ingroup AscendCL + * @brief Create the tdt dataset + * + * @retval null for failed + * @retval OtherValues success + * + * @see acltdtDestroyDataset + */ +ACL_FUNC_VISIBILITY acltdtDataset *acltdtCreateDataset(); + +/** + * @ingroup AscendCL + * @brief Destroy the tdt dataset + * + * @param dataset [IN] pointer to the dataset + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acltdtCreateDataset + */ +ACL_FUNC_VISIBILITY aclError acltdtDestroyDataset(acltdtDataset *dataset); + +/** + * @ingroup AscendCL + * @brief Get the data item + * + * @param dataset [IN] pointer to the dataset + * @param index [IN] index of the dataset + * + * @retval null for failed + * @retval OtherValues success + * + * @see acltdtAddDataItem + */ +ACL_FUNC_VISIBILITY acltdtDataItem *acltdtGetDataItem(const acltdtDataset *dataset, size_t index); + +/** + * @ingroup AscendCL + * @brief Get the data item + * + * @param dataset [OUT] pointer to the dataset + * @param dataItem [IN] pointer to the data item + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acltdtGetDataItem + */ +ACL_FUNC_VISIBILITY aclError acltdtAddDataItem(acltdtDataset *dataset, acltdtDataItem *dataItem); + +/** + * @ingroup AscendCL + * @brief Get the size of dataset + * + * @param dataset [IN] pointer to the dataset + * + * @retval 0 for failed + * @retval OtherValues success + */ +ACL_FUNC_VISIBILITY size_t acltdtGetDatasetSize(const acltdtDataset *dataset); + +/** + * @ingroup AscendCL + * @brief Stop the channel + * + * @param handle [IN] pointer to the channel handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acltdtCreateChannel | acltdtDestroyChannel + */ +ACL_FUNC_VISIBILITY aclError acltdtStopChannel(acltdtChannelHandle *handle); + +/** + * @ingroup AscendCL + * @brief Create the channel + * + * @param deviceId [IN] the device id + * @param name [IN] the channel's name + * + * @retval null for failed + * @retval OtherValues success + * + * @see acltdtStopChannel | acltdtDestroyChannel + */ +ACL_FUNC_VISIBILITY acltdtChannelHandle *acltdtCreateChannel(uint32_t deviceId, const char *name); + +/** + * @ingroup AscendCL + * @brief Destroy the channel + * + * @param handle [IN] pointer to the channel handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acltdtCreateChannel | acltdtStopChannel + */ +ACL_FUNC_VISIBILITY aclError acltdtDestroyChannel(acltdtChannelHandle *handle); + +/** + * @ingroup AscendCL + * @brief Send tensor to device + * + * @param handle [IN] pointer to the channel handle + * @param dataset [IN] pointer to the dataset + * @param timeout [IN] to be reserved, now it must be -1 + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acltdtReceiveTensor + */ +ACL_FUNC_VISIBILITY aclError acltdtSendTensor(const acltdtChannelHandle *handle, const acltdtDataset *dataset, + int32_t timeout); + +/** + * @ingroup AscendCL + * @brief Receive tensor from device + * + * @param handle [IN] pointer to the channel handle + * @param dataset [OUT] pointer to the dataset + * @param timeout [IN] to be reserved, now it must be -1 + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acltdtSendTensor + */ +ACL_FUNC_VISIBILITY aclError acltdtReceiveTensor(const acltdtChannelHandle *handle, acltdtDataset *dataset, + int32_t timeout); + +#ifdef __cplusplus +} +#endif + +#endif // INC_EXTERNAL_ACL_ACL_TDT_H_ diff --git a/inc/external/acl/error_codes/ge_error_codes.h b/inc/external/acl/error_codes/ge_error_codes.h new file mode 100644 index 00000000..cafc5a64 --- /dev/null +++ b/inc/external/acl/error_codes/ge_error_codes.h @@ -0,0 +1,75 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef INC_EXTERNAL_GE_GE_ERROR_CODES_H_ +#define INC_EXTERNAL_GE_GE_ERROR_CODES_H_ + +#if defined(_MSC_VER) +#ifdef FUNC_VISIBILITY +#define GE_FUNC_VISIBILITY _declspec(dllexport) +#else +#define GE_FUNC_VISIBILITY +#endif +#else +#ifdef FUNC_VISIBILITY +#define GE_FUNC_VISIBILITY __attribute__((visibility("default"))) +#else +#define GE_FUNC_VISIBILITY +#endif +#endif + +#include + +#ifdef __cplusplus +extern "C" { +#endif +static const uint32_t ACL_ERROR_GE_PARAM_INVALID = 145000; +static const uint32_t ACL_ERROR_GE_EXEC_NOT_INIT = 145001; +static const uint32_t ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID = 145002; +static const uint32_t ACL_ERROR_GE_EXEC_MODEL_ID_INVALID = 145003; +static const uint32_t ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID = 145006; +static const uint32_t ACL_ERROR_GE_EXEC_MODEL_ADDR_INVALID = 145007; +static const uint32_t ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID = 145008; +static const uint32_t ACL_ERROR_GE_EXEC_LOAD_MODEL_REPEATED = 145009; +static const uint32_t ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID = 145011; +static const uint32_t ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID = 145012; +static const uint32_t ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID = 145013; +static const uint32_t ACL_ERROR_GE_AIPP_BATCH_EMPTY = 145014; +static const uint32_t ACL_ERROR_GE_AIPP_NOT_EXIST = 145015; +static const uint32_t ACL_ERROR_GE_AIPP_MODE_INVALID = 145016; +static const uint32_t ACL_ERROR_GE_OP_TASK_TYPE_INVALID = 145017; +static const uint32_t ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID = 145018; +static const uint32_t ACL_ERROR_GE_PLGMGR_PATH_INVALID = 145019; +static const uint32_t ACL_ERROR_GE_FORMAT_INVALID = 145020; +static const uint32_t ACL_ERROR_GE_SHAPE_INVALID = 145021; +static const uint32_t ACL_ERROR_GE_DATATYPE_INVALID = 145022; +static const uint32_t ACL_ERROR_GE_MEMORY_ALLOCATION = 245000; +static const uint32_t ACL_ERROR_GE_MEMORY_OPERATE_FAILED = 245001; +static const uint32_t ACL_ERROR_GE_INTERNAL_ERROR = 545000; +static const uint32_t ACL_ERROR_GE_LOAD_MODEL = 545001; +static const uint32_t ACL_ERROR_GE_EXEC_LOAD_MODEL_PARTITION_FAILED = 545002; +static const uint32_t ACL_ERROR_GE_EXEC_LOAD_WEIGHT_PARTITION_FAILED = 545003; +static const uint32_t ACL_ERROR_GE_EXEC_LOAD_TASK_PARTITION_FAILED = 545004; +static const uint32_t ACL_ERROR_GE_EXEC_LOAD_KERNEL_PARTITION_FAILED = 545005; +static const uint32_t ACL_ERROR_GE_EXEC_RELEASE_MODEL_DATA = 545006; +static const uint32_t ACL_ERROR_GE_COMMAND_HANDLE = 545007; +static const uint32_t ACL_ERROR_GE_GET_TENSOR_INFO = 545008; +static const uint32_t ACL_ERROR_GE_UNLOAD_MODEL = 545009; + +#ifdef __cplusplus +} // namespace ge +#endif +#endif // INC_EXTERNAL_GE_GE_ERROR_CODES_H_ diff --git a/inc/external/acl/error_codes/rt_error_codes.h b/inc/external/acl/error_codes/rt_error_codes.h new file mode 100644 index 00000000..a1392cc6 --- /dev/null +++ b/inc/external/acl/error_codes/rt_error_codes.h @@ -0,0 +1,109 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __INC_EXTERNEL_RT_ERROR_CODES_H__ +#define __INC_EXTERNEL_RT_ERROR_CODES_H__ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +static const int32_t ACL_RT_SUCCESS = 0; // success + +static const int32_t ACL_ERROR_RT_PARAM_INVALID = 107000; // param invalid +static const int32_t ACL_ERROR_RT_INVALID_DEVICEID = 107001; // invalid device id +static const int32_t ACL_ERROR_RT_CONTEXT_NULL = 107002; // current context null +static const int32_t ACL_ERROR_RT_STREAM_CONTEXT = 107003; // stream not in current context +static const int32_t ACL_ERROR_RT_MODEL_CONTEXT = 107004; // model not in current context +static const int32_t ACL_ERROR_RT_STREAM_MODEL = 107005; // stream not in model +static const int32_t ACL_ERROR_RT_EVENT_TIMESTAMP_INVALID = 107006; // event timestamp invalid +static const int32_t ACL_ERROR_RT_EVENT_TIMESTAMP_REVERSAL = 107007; // event timestamp reversal +static const int32_t ACL_ERROR_RT_ADDR_UNALIGNED = 107008; // memory address unaligned +static const int32_t ACL_ERROR_RT_FILE_OPEN = 107009; // open file failed +static const int32_t ACL_ERROR_RT_FILE_WRITE = 107010; // write file failed +static const int32_t ACL_ERROR_RT_STREAM_SUBSCRIBE = 107011; // error subscribe stream +static const int32_t ACL_ERROR_RT_THREAD_SUBSCRIBE = 107012; // error subscribe thread +static const int32_t ACL_ERROR_RT_GROUP_NOT_SET = 107013; // group not set +static const int32_t ACL_ERROR_RT_GROUP_NOT_CREATE = 107014; // group not create +static const int32_t ACL_ERROR_RT_STREAM_NO_CB_REG = 107015; // callback not register to stream +static const int32_t ACL_ERROR_RT_INVALID_MEMORY_TYPE = 107016; // invalid memory type +static const int32_t ACL_ERROR_RT_INVALID_HANDLE = 107017; // invalid handle +static const int32_t ACL_ERROR_RT_INVALID_MALLOC_TYPE = 107018; // invalid malloc type +static const int32_t ACL_ERROR_RT_WAIT_TIMEOUT = 107019; // wait timeout + +static const int32_t ACL_ERROR_RT_FEATURE_NOT_SUPPORT = 207000; // feature not support +static const int32_t ACL_ERROR_RT_MEMORY_ALLOCATION = 207001; // memory allocation error +static const int32_t ACL_ERROR_RT_MEMORY_FREE = 207002; // memory free error +static const int32_t ACL_ERROR_RT_AICORE_OVER_FLOW = 207003; // aicore over flow +static const int32_t ACL_ERROR_RT_NO_DEVICE = 207004; // no device +static const int32_t ACL_ERROR_RT_RESOURCE_ALLOC_FAIL = 207005; // resource alloc fail +static const int32_t ACL_ERROR_RT_NO_PERMISSION = 207006; // no permission +static const int32_t ACL_ERROR_RT_NO_EVENT_RESOURCE = 207007; // no event resource +static const int32_t ACL_ERROR_RT_NO_STREAM_RESOURCE = 207008; // no stream resource +static const int32_t ACL_ERROR_RT_NO_NOTIFY_RESOURCE = 207009; // no notify resource +static const int32_t ACL_ERROR_RT_NO_MODEL_RESOURCE = 207010; // no model resource +static const int32_t ACL_ERROR_RT_NO_CDQ_RESOURCE = 207011; // no cdq resource + +static const int32_t ACL_ERROR_RT_INTERNAL_ERROR = 507000; // runtime internal error +static const int32_t ACL_ERROR_RT_TS_ERROR = 507001; // ts internel error +static const int32_t ACL_ERROR_RT_STREAM_TASK_FULL = 507002; // task full in stream +static const int32_t ACL_ERROR_RT_STREAM_TASK_EMPTY = 507003; // task empty in stream +static const int32_t ACL_ERROR_RT_STREAM_NOT_COMPLETE = 507004; // stream not complete +static const int32_t ACL_ERROR_RT_END_OF_SEQUENCE = 507005; // end of sequence +static const int32_t ACL_ERROR_RT_EVENT_NOT_COMPLETE = 507006; // event not complete +static const int32_t ACL_ERROR_RT_CONTEXT_RELEASE_ERROR = 507007; // context release error +static const int32_t ACL_ERROR_RT_SOC_VERSION = 507008; // soc version error +static const int32_t ACL_ERROR_RT_TASK_TYPE_NOT_SUPPORT = 507009; // task type not support +static const int32_t ACL_ERROR_RT_LOST_HEARTBEAT = 507010; // ts lost heartbeat +static const int32_t ACL_ERROR_RT_MODEL_EXECUTE = 507011; // model execute failed +static const int32_t ACL_ERROR_RT_REPORT_TIMEOUT = 507012; // report timeout +static const int32_t ACL_ERROR_RT_SYS_DMA = 507013; // sys dma error +static const int32_t ACL_ERROR_RT_AICORE_TIMEOUT = 507014; // aicore timeout +static const int32_t ACL_ERROR_RT_AICORE_EXCEPTION = 507015; // aicore exception +static const int32_t ACL_ERROR_RT_AICORE_TRAP_EXCEPTION = 507016; // aicore trap exception +static const int32_t ACL_ERROR_RT_AICPU_TIMEOUT = 507017; // aicpu timeout +static const int32_t ACL_ERROR_RT_AICPU_EXCEPTION = 507018; // aicpu exception +static const int32_t ACL_ERROR_RT_AICPU_DATADUMP_RSP_ERR = 507019; // aicpu datadump response error +static const int32_t ACL_ERROR_RT_AICPU_MODEL_RSP_ERR = 507020; // aicpu model operate response error +static const int32_t ACL_ERROR_RT_PROFILING_ERROR = 507021; // profiling error +static const int32_t ACL_ERROR_RT_IPC_ERROR = 507022; // ipc error +static const int32_t ACL_ERROR_RT_MODEL_ABORT_NORMAL = 507023; // model abort normal +static const int32_t ACL_ERROR_RT_KERNEL_UNREGISTERING = 507024; // kernel unregistering +static const int32_t ACL_ERROR_RT_RINGBUFFER_NOT_INIT = 507025; // ringbuffer not init +static const int32_t ACL_ERROR_RT_RINGBUFFER_NO_DATA = 507026; // ringbuffer no data +static const int32_t ACL_ERROR_RT_KERNEL_LOOKUP = 507027; // kernel lookup error +static const int32_t ACL_ERROR_RT_KERNEL_DUPLICATE = 507028; // kernel register duplicate +static const int32_t ACL_ERROR_RT_DEBUG_REGISTER_FAIL = 507029; // debug register failed +static const int32_t ACL_ERROR_RT_DEBUG_UNREGISTER_FAIL = 507030; // debug unregister failed +static const int32_t ACL_ERROR_RT_LABEL_CONTEXT = 507031; // label not in current context +static const int32_t ACL_ERROR_RT_PROGRAM_USE_OUT = 507032; // program register num use out +static const int32_t ACL_ERROR_RT_DEV_SETUP_ERROR = 507033; // device setup error +static const int32_t ACL_ERROR_RT_VECTOR_CORE_TIMEOUT = 507034; // vector core timeout +static const int32_t ACL_ERROR_RT_VECTOR_CORE_EXCEPTION = 507035; // vector core exception +static const int32_t ACL_ERROR_RT_VECTOR_CORE_TRAP_EXCEPTION = 507036; // vector core trap exception +static const int32_t ACL_ERROR_RT_CDQ_BATCH_ABNORMAL = 507037; // cdq alloc batch abnormal + +static const int32_t ACL_ERROR_RT_DRV_INTERNAL_ERROR = 507899; // drv internal error +static const int32_t ACL_ERROR_RT_AICPU_INTERNAL_ERROR = 507900; // aicpu internal error +static const int32_t ACL_ERROR_RT_SOCKET_CLOSE = 507901; // hdc disconnect + +#ifdef __cplusplus +} +#endif + +#endif // __INC_EXTERNEL_RT_ERROR_CODES_H__ diff --git a/inc/external/acl/ops/acl_cblas.h b/inc/external/acl/ops/acl_cblas.h new file mode 100644 index 00000000..3d81eb2b --- /dev/null +++ b/inc/external/acl/ops/acl_cblas.h @@ -0,0 +1,334 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef INC_EXTERNAL_ACL_OPS_ACL_CBLAS_H_ +#define INC_EXTERNAL_ACL_OPS_ACL_CBLAS_H_ + +#include "acl/acl.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum aclTransType { ACL_TRANS_N, ACL_TRANS_T, ACL_TRANS_NZ, ACL_TRANS_NZ_T } aclTransType; + +typedef enum aclComputeType { ACL_COMPUTE_HIGH_PRECISION, ACL_COMPUTE_LOW_PRECISION } aclComputeType; + +/** + * @ingroup AscendCL + * @brief perform the matrix-vector multiplication + * + * @param transA [IN] transpose type of matrix A + * @param m [IN] number of rows of matrix A + * @param n [IN] number of columns of matrix A + * @param alpha [IN] pointer to scalar used for multiplication. + * of same type as dataTypeC + * @param a [IN] pointer to matrix A + * @param lda [IN] leading dimension used to store the matrix A + * @param dataTypeA [IN] datatype of matrix A + * @param x [IN] pointer to vector x + * @param incx [IN] stride between consecutive elements of vector x + * @param dataTypeX [IN] datatype of vector x + * @param beta [IN] pointer to scalar used for multiplication. + * of same type as dataTypeC If beta == 0, + * then y does not have to be a valid input + * @param y [IN|OUT] pointer to vector y + * @param incy [IN] stride between consecutive elements of vector y + * @param dataTypeY [IN] datatype of vector y + * @param type [IN] computation type + * @param stream [IN] stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclblasGemvEx(aclTransType transA, int m, int n, const void *alpha, const void *a, int lda, + aclDataType dataTypeA, const void *x, int incx, aclDataType dataTypeX, + const void *beta, void *y, int incy, aclDataType dataTypeY, + aclComputeType type, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief create a handle for performing the matrix-vector multiplication + * + * @param transA [IN] transpose type of matrix A + * @param m [IN] number of rows of matrix A + * @param n [IN] number of columns of matrix A + * @param dataTypeA [IN] datatype of matrix A + * @param dataTypeX [IN] datatype of vector x + * @param dataTypeY [IN] datatype of vector y + * @param type [IN] computation type + * @param handle [OUT] pointer to the pointer to the handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclblasCreateHandleForGemvEx(aclTransType transA, int m, int n, aclDataType dataTypeA, + aclDataType dataTypeX, aclDataType dataTypeY, + aclComputeType type, aclopHandle **handle); + +/** + * @ingroup AscendCL + * @brief perform the matrix-vector multiplication + * + * @param transA [IN] transpose type of matrix A + * @param m [IN] number of rows of matrix A + * @param n [IN] number of columns of matrix A + * @param alpha [IN] pointer to scalar used for multiplication + * @param a [IN] pointer to matrix A + * @param lda [IN] leading dimension used to store the matrix A + * @param x [IN] pointer to vector x + * @param incx [IN] stride between consecutive elements of vector x + * @param beta [IN] pointer to scalar used for multiplication. + * If beta value == 0, + * then y does not have to be a valid input + * @param y [IN|OUT] pointer to vector y + * @param incy [IN] stride between consecutive elements of vector y + * @param type [IN] computation type + * @param stream [IN] stream + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclblasHgemv(aclTransType transA, int m, int n, const aclFloat16 *alpha, + const aclFloat16 *a, int lda, const aclFloat16 *x, int incx, + const aclFloat16 *beta, aclFloat16 *y, int incy, aclComputeType type, + aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief create a handle for performing the matrix-vector multiplication + * + * @param transA [IN] transpose type of matrix A + * @param m [IN] number of rows of matrix A + * @param n [IN] number of columns of matrix A + * @param type [IN] computation type + * @param handle [OUT] pointer to the pointer to the handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclblasCreateHandleForHgemv(aclTransType transA, int m, int n, aclComputeType type, + aclopHandle **handle); + +/** + * @ingroup AscendCL + * @brief perform the matrix-vector multiplication + * + * @param transA [IN] transpose type of matrix A + * @param m [IN] number of rows of matrix A + * @param n [IN] number of columns of matrix A + * @param alpha [IN] pointer to scalar used for multiplication + * @param a [IN] pointer to matrix A + * @param lda [IN] leading dimension used to store the matrix A + * @param x [IN] pointer to vector x + * @param incx [IN] stride between consecutive elements of vector x + * @param beta [IN] pointer to scalar used for multiplication. + * If beta value == 0, + * then y does not have to be a valid input + * @param y [IN|OUT] pointer to vector y + * @param incy [IN] stride between consecutive elements of vector y + * @param type [IN] computation type + * @param stream [IN] stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclblasS8gemv(aclTransType transA, int m, int n, const int32_t *alpha, const int8_t *a, + int lda, const int8_t *x, int incx, const int32_t *beta, int32_t *y, + int incy, aclComputeType type, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief create a handle for performing the matrix-vector multiplication + * + * @param transA [IN] transpose type of matrix A + * @param m [IN] number of rows of matrix A + * @param n [IN] number of columns of matrix A + * @param handle [OUT] pointer to the pointer to the handle + * @param type [IN] computation type + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclblasCreateHandleForS8gemv(aclTransType transA, int m, int n, aclComputeType type, + aclopHandle **handle); + +/** + * @ingroup AscendCL + * @brief perform the matrix-matrix multiplication + * + * @param transA [IN] transpose type of matrix A + * @param transB [IN] transpose type of matrix B + * @param transC [IN] transpose type of matrix C + * @param m [IN] number of rows of matrix A and matrix C + * @param n [IN] number of columns of matrix B and matrix C + * @param k [IN] number of columns of matrix A and rows of matrix B + * @param alpha [IN] pointer to scalar used for multiplication. of same type as dataTypeC + * @param matrixA [IN] pointer to matrix A + * @param lda [IN] leading dimension array used to store matrix A + * @param dataTypeA [IN] datatype of matrix A + * @param matrixB [IN] pointer to matrix B + * @param ldb [IN] leading dimension array used to store matrix B + * @param dataTypeB [IN] datatype of matrix B + * @param beta [IN] pointer to scalar used for multiplication. + * of same type as dataTypeC If beta == 0, + * then matrixC does not have to be a valid input + * @param matrixC [IN|OUT] pointer to matrix C + * @param ldc [IN] leading dimension array used to store matrix C + * @param dataTypeC [IN] datatype of matrix C + * @param type [IN] computation type + * @param stream [IN] stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclblasGemmEx(aclTransType transA, aclTransType transB, aclTransType transC, int m, int n, + int k, const void *alpha, const void *matrixA, int lda, + aclDataType dataTypeA, const void *matrixB, int ldb, aclDataType dataTypeB, + const void *beta, void *matrixC, int ldc, aclDataType dataTypeC, + aclComputeType type, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief create a handle for performing the matrix-matrix multiplication + * + * @param transA [IN] transpose type of matrix A + * @param transB [IN] transpose type of matrix B + * @param transC [IN] transpose type of matrix C + * @param m [IN] number of rows of matrix A and matrix C + * @param n [IN] number of columns of matrix B and matrix C + * @param k [IN] number of columns of matrix A and rows of matrix B + * @param dataTypeA [IN] datatype of matrix A + * @param dataTypeB [IN] datatype of matrix B + * @param dataTypeC [IN] datatype of matrix C + * @param type [IN] computation type + * @param handle [OUT] pointer to the pointer to the handle + * @param type [IN] computation type + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclblasCreateHandleForGemmEx(aclTransType transA, aclTransType transB, aclTransType transC, + int m, int n, int k, aclDataType dataTypeA, + aclDataType dataTypeB, aclDataType dataTypeC, + aclComputeType type, aclopHandle **handle); + +/** + * @ingroup AscendCL + * @brief perform the matrix-matrix multiplication + * + * @param transA [IN] transpose type of matrix A + * @param transB [IN] transpose type of matrix B + * @param transC [IN] transpose type of matrix C + * @param m [IN] number of rows of matrix A and matrix C + * @param n [IN] number of columns of matrix B and matrix C + * @param k [IN] number of columns of matrix A and rows of matrix B + * @param alpha [IN] pointer to scalar used for multiplication + * @param matrixA [IN] pointer to matrix A + * @param lda [IN] leading dimension used to store the matrix A + * @param matrixB [IN] pointer to matrix B + * @param ldb [IN] leading dimension used to store the matrix B + * @param beta [IN] pointer to scalar used for multiplication. + * If beta value == 0, + * then matrixC does not have to be a valid input + * @param matrixC [IN|OUT] pointer to matrix C + * @param ldc [IN] leading dimension used to store the matrix C + * @param type [IN] computation type + * @param stream [IN] stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclblasHgemm(aclTransType transA, aclTransType transB, aclTransType transC, int m, int n, + int k, const aclFloat16 *alpha, const aclFloat16 *matrixA, int lda, + const aclFloat16 *matrixB, int ldb, const aclFloat16 *beta, + aclFloat16 *matrixC, int ldc, aclComputeType type, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief create a handle for performing the matrix-matrix multiplication + * + * @param transA [IN] transpose type of matrix A + * @param transB [IN] transpose type of matrix B + * @param transC [IN] transpose type of matrix C + * @param m [IN] number of rows of matrix A and matrix C + * @param n [IN] number of columns of matrix B and matrix C + * @param k [IN] number of columns of matrix A and rows of matrix B + * @param type [IN] computation type + * @param handle [OUT] pointer to the pointer to the handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclblasCreateHandleForHgemm(aclTransType transA, aclTransType transB, aclTransType transC, + int m, int n, int k, aclComputeType type, + aclopHandle **handle); + +/** + * @ingroup AscendCL + * @brief perform the matrix-matrix multiplication + * + * @param transA [IN] transpose type of matrix A + * @param transB [IN] transpose type of matrix B + * @param transC [IN] transpose type of matrix C + * @param m [IN] number of rows of matrix A and matrix C + * @param n [IN] number of columns of matrix B and matrix C + * @param k [IN] number of columns of matrix A and rows of matrix B + * @param alpha [IN] pointer to scalar used for multiplication + * @param matrixA [IN] pointer to matrix A + * @param lda [IN] leading dimension used to store the matrix A + * @param matrixB [IN] pointer to matrix B + * @param ldb [IN] leading dimension used to store the matrix B + * @param beta [IN] pointer to scalar used for multiplication. + * If beta value == 0, + * then matrixC does not have to be a valid input + * @param matrixC [IN|OUT] pointer to matrix C + * @param ldc [IN] leading dimension used to store the matrix C + * @param type [IN] computation type + * @param stream [IN] stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclblasS8gemm(aclTransType transA, aclTransType transB, aclTransType transC, int m, int n, + int k, const int32_t *alpha, const int8_t *matrixA, int lda, + const int8_t *matrixB, int ldb, const int32_t *beta, int32_t *matrixC, + int ldc, aclComputeType type, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief create a handle for performing the matrix-matrix multiplication + * + * @param transA [IN] transpose type of matrix A + * @param transB [IN] transpose type of matrix B + * @param transC [IN] transpose type of matrix C + * @param m [IN] number of rows of matrix A and matrix C + * @param n [IN] number of columns of matrix B and matrix C + * @param k [IN] number of columns of matrix A and rows of matrix B + * @param type [IN] computation type + * @param handle [OUT] pointer to the pointer to the handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclblasCreateHandleForS8gemm(aclTransType transA, aclTransType transB, aclTransType transC, + int m, int n, int k, aclComputeType type, + aclopHandle **handle); + +#ifdef __cplusplus +} +#endif + +#endif // INC_EXTERNAL_ACL_OPS_ACL_CBLAS_H_ diff --git a/inc/external/acl/ops/acl_dvpp.h b/inc/external/acl/ops/acl_dvpp.h new file mode 100644 index 00000000..dcaa3936 --- /dev/null +++ b/inc/external/acl/ops/acl_dvpp.h @@ -0,0 +1,2568 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if !defined(ENABLE_DVPP_INTERFACE) +#if defined(_MSC_VER) +#error message("if you want to use dvpp funtions ,please use the macro definition (ENABLE_DVPP_INTERFACE).") +#else +#error "if you want to use dvpp funtions ,please use the macro definition (ENABLE_DVPP_INTERFACE)." +#endif +#endif + +#ifndef INC_EXTERNAL_ACL_OPS_ACL_DVPP_H_ +#define INC_EXTERNAL_ACL_OPS_ACL_DVPP_H_ + +#include +#include +#include "acl/acl.h" +#include "acl/acl_base.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct acldvppPicDesc acldvppPicDesc; +typedef struct acldvppBatchPicDesc acldvppBatchPicDesc; +typedef struct acldvppRoiConfig acldvppRoiConfig; +typedef struct acldvppResizeConfig acldvppResizeConfig; +typedef struct acldvppBorderConfig acldvppBorderConfig; +typedef struct acldvppLutMap acldvppLutMap; +typedef struct acldvppChannelDesc acldvppChannelDesc; +typedef struct acldvppJpegeConfig acldvppJpegeConfig; +typedef struct aclvdecChannelDesc aclvdecChannelDesc; +typedef struct acldvppStreamDesc acldvppStreamDesc; +typedef struct aclvdecFrameConfig aclvdecFrameConfig; +typedef struct aclvencChannelDesc aclvencChannelDesc; +typedef struct aclvencFrameConfig aclvencFrameConfig; +typedef struct acldvppHist acldvppHist; +typedef void (*aclvdecCallback)(acldvppStreamDesc *input, acldvppPicDesc *output, void *userData); +typedef void (*aclvencCallback)(acldvppPicDesc *input, acldvppStreamDesc *output, void *userdata); + +// Supported Pixel Format +enum acldvppPixelFormat { + PIXEL_FORMAT_YUV_400 = 0, // 0 + PIXEL_FORMAT_YUV_SEMIPLANAR_420 = 1, // 1 + PIXEL_FORMAT_YVU_SEMIPLANAR_420 = 2, // 2 + PIXEL_FORMAT_YUV_SEMIPLANAR_422 = 3, // 3 + PIXEL_FORMAT_YVU_SEMIPLANAR_422 = 4, // 4 + PIXEL_FORMAT_YUV_SEMIPLANAR_444 = 5, // 5 + PIXEL_FORMAT_YVU_SEMIPLANAR_444 = 6, // 6 + PIXEL_FORMAT_YUYV_PACKED_422 = 7, // 7 + PIXEL_FORMAT_UYVY_PACKED_422 = 8, // 8 + PIXEL_FORMAT_YVYU_PACKED_422 = 9, // 9 + PIXEL_FORMAT_VYUY_PACKED_422 = 10, // 10 + PIXEL_FORMAT_YUV_PACKED_444 = 11, // 11 + PIXEL_FORMAT_RGB_888 = 12, // 12 + PIXEL_FORMAT_BGR_888 = 13, // 13 + PIXEL_FORMAT_ARGB_8888 = 14, // 14 + PIXEL_FORMAT_ABGR_8888 = 15, // 15 + PIXEL_FORMAT_RGBA_8888 = 16, // 16 + PIXEL_FORMAT_BGRA_8888 = 17, // 17 + PIXEL_FORMAT_YUV_SEMI_PLANNER_420_10BIT = 18, // 18 + PIXEL_FORMAT_YVU_SEMI_PLANNER_420_10BIT = 19, // 19 + PIXEL_FORMAT_YVU_PLANAR_420 = 20, // 20 + PIXEL_FORMAT_YVU_PLANAR_422, + PIXEL_FORMAT_YVU_PLANAR_444, + PIXEL_FORMAT_RGB_444 = 23, + PIXEL_FORMAT_BGR_444, + PIXEL_FORMAT_ARGB_4444, + PIXEL_FORMAT_ABGR_4444, + PIXEL_FORMAT_RGBA_4444, + PIXEL_FORMAT_BGRA_4444, + PIXEL_FORMAT_RGB_555, + PIXEL_FORMAT_BGR_555, + PIXEL_FORMAT_RGB_565, + PIXEL_FORMAT_BGR_565, + PIXEL_FORMAT_ARGB_1555, + PIXEL_FORMAT_ABGR_1555, + PIXEL_FORMAT_RGBA_1555, + PIXEL_FORMAT_BGRA_1555, + PIXEL_FORMAT_ARGB_8565, + PIXEL_FORMAT_ABGR_8565, + PIXEL_FORMAT_RGBA_8565, + PIXEL_FORMAT_BGRA_8565, + PIXEL_FORMAT_RGB_BAYER_8BPP = 50, + PIXEL_FORMAT_RGB_BAYER_10BPP, + PIXEL_FORMAT_RGB_BAYER_12BPP, + PIXEL_FORMAT_RGB_BAYER_14BPP, + PIXEL_FORMAT_RGB_BAYER_16BPP, + PIXEL_FORMAT_BGR_888_PLANAR = 70, + PIXEL_FORMAT_HSV_888_PACKAGE, + PIXEL_FORMAT_HSV_888_PLANAR, + PIXEL_FORMAT_LAB_888_PACKAGE, + PIXEL_FORMAT_LAB_888_PLANAR, + PIXEL_FORMAT_S8C1, + PIXEL_FORMAT_S8C2_PACKAGE, + PIXEL_FORMAT_S8C2_PLANAR, + PIXEL_FORMAT_S16C1, + PIXEL_FORMAT_U8C1, + PIXEL_FORMAT_U16C1, + PIXEL_FORMAT_S32C1, + PIXEL_FORMAT_U32C1, + PIXEL_FORMAT_U64C1, + PIXEL_FORMAT_S64C1, + PIXEL_FORMAT_YUV_SEMIPLANAR_440 = 1000, + PIXEL_FORMAT_YVU_SEMIPLANAR_440, + PIXEL_FORMAT_FLOAT32, + PIXEL_FORMAT_BUTT, + PIXEL_FORMAT_UNKNOWN = 10000 +}; + +// Stream Format +enum acldvppStreamFormat { H265_MAIN_LEVEL = 0, H264_BASELINE_LEVEL, H264_MAIN_LEVEL, H264_HIGH_LEVEL }; + +// Supported Channel Mode +enum acldvppChannelMode { DVPP_CHNMODE_VPC = 1, DVPP_CHNMODE_JPEGD = 2, DVPP_CHNMODE_JPEGE = 4 }; + +// Supported Border Type +enum acldvppBorderType { BORDER_CONSTANT = 0, BORDER_REPLICATE, BORDER_REFLECT, BORDER_REFLECT_101 }; + +// Venc parameter type +enum aclvencChannelDescParamType { + ACL_VENC_THREAD_ID_UINT64 = 0, + ACL_VENC_CALLBACK_PTR, + ACL_VENC_PIXEL_FORMAT_UINT32, + ACL_VENC_ENCODE_TYPE_UINT32, + ACL_VENC_PIC_WIDTH_UINT32, + ACL_VENC_PIC_HEIGHT_UINT32, + ACL_VENC_KEY_FRAME_INTERVAL_UINT32, + ACL_VENC_BUF_ADDR_PTR, + ACL_VENC_BUF_SIZE_UINT32, + ACL_VENC_RC_MODE_UINT32, + ACL_VENC_SRC_RATE_UINT32, + ACL_VENC_MAX_BITRATE_UINT32, + ACL_VENC_MAX_IP_PROP_UINT32 +}; + +// Jpeg picture format +enum acldvppJpegFormat { + ACL_JPEG_CSS_444 = 0, + ACL_JPEG_CSS_422, + ACL_JPEG_CSS_420, + ACL_JPEG_CSS_GRAY, + ACL_JPEG_CSS_440, + ACL_JPEG_CSS_411, + ACL_JPEG_CSS_UNKNOWN = 1000 +}; + +/** + * @ingroup AscendCL + * @brief alloc device memory for dvpp. + * + * @par Function + * @li It's mainly used for allocating memory to device media data processing. + * The requested memory meets the data processing requirements. + * After calling this interface to request memory, + * you must release the memory using the acldvppFree interface. + * @li When calling the acldvppMalloc interface to apply for memory, + * the size entered by the user is aligned upwards to 32 integer multiples, + * and an additional 32 bytes are applied. + * + * @par Restriction + * If the user uses the acldvppMalloc interface to apply for a large block of + * memory and divide and manage the memory by himself, + * when applying for memory, the user needs to align up to 32 integer + * times + 32 bytes (ALIGN_UP [len] +32 words) according to + * the actual data size of each picture Section) to manage memory. + * + * @param devPtr [OUT] memory pointer. + * @param size [IN] memory size. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppFree + */ +ACL_FUNC_VISIBILITY aclError acldvppMalloc(void **devPtr, size_t size); + +/** + * @ingroup AscendCL + * @brief free device memory for dvpp. + * + * @par Function + * Free the memory requested through the acldvppMalloc interface + * @param devPtr [IN] memory pointer to free. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppMalloc + */ +ACL_FUNC_VISIBILITY aclError acldvppFree(void *devPtr); + +/** + * @ingroup AscendCL + * @brief create DvppChannelDesc. + * + * @par Function + * Create a channel for image data processing. + * The same channel can be reused + * and is no longer available after destruction + * + * @retval null for failed. + * @retval OtherValues success. + */ +ACL_FUNC_VISIBILITY acldvppChannelDesc *acldvppCreateChannelDesc(); + +/** + * @ingroup AscendCL + * @brief destroy dvppChannelDesc. + * + * @par Function + * Can only destroy channels created by the acldvppCreateChannel interface + * @param channelDesc [IN] the channel description. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannelDesc | acldvppDestroyChannel + */ +ACL_FUNC_VISIBILITY aclError acldvppDestroyChannelDesc(acldvppChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get dvpp channel Id. + * + * @par Restriction + * Interface calling sequence: + * acldvppCreateChannelDesc --> acldvppCreateChannel --> + * acldvppGetChannelDescChannelId + * + * @param channelDesc [IN] the channel description. + * + * @retval channel id. + * + * @see acldvppCreateChannelDesc | acldvppCreateChannel + */ +ACL_FUNC_VISIBILITY uint64_t acldvppGetChannelDescChannelId(const acldvppChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Create dvpp picture description. + * + * @retval null for failed. + * @retval OtherValues success. + */ +ACL_FUNC_VISIBILITY acldvppPicDesc *acldvppCreatePicDesc(); + +/** + * @ingroup AscendCL + * @brief Destroy dvpp picture description. + * + * @par Function + * Can only destroy picture description information created + * through acldvppCreatePicDesc interface. + * @param picDesc [IN] dvpp picture description. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreatePicDesc + */ +ACL_FUNC_VISIBILITY aclError acldvppDestroyPicDesc(acldvppPicDesc *picDesc); + +/** + * @ingroup AscendCL + * @brief Set dvpp picture description's data. + * + * @param picDesc [OUT] dvpp picture description. + * @param dataDev [IN] dvpp picture dataDev.Must be the memory + * requested using the acldvppMalloc interface. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppMalloc + */ +ACL_FUNC_VISIBILITY aclError acldvppSetPicDescData(acldvppPicDesc *picDesc, void *dataDev); + +/** + * @ingroup AscendCL + * @brief Set dvpp picture description's size. + * + * @param picDesc [OUT] dvpp picture description. + * @param size dvpp [IN] picture size. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetPicDescSize(acldvppPicDesc *picDesc, uint32_t size); + +/** + * @ingroup AscendCL + * @brief Set dvpp picture description's format. + * + * @param picDesc [OUT] dvpp picture description. + * @param format [IN] dvpp picture format. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetPicDescFormat(acldvppPicDesc *picDesc, acldvppPixelFormat format); + +/** + * @ingroup AscendCL + * @brief Set dvpp picture description's width. + * + * @param picDesc [OUT] dvpp picture description. + * @param width [IN] dvpp picture width. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetPicDescWidth(acldvppPicDesc *picDesc, uint32_t width); + +/** + * @ingroup AscendCL + * @brief Set dvpp picture description's height. + * + * @param picDesc [OUT] dvpp picture description. + * @param height [IN] dvpp picture height. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetPicDescHeight(acldvppPicDesc *picDesc, uint32_t height); + +/** + * @ingroup AscendCL + * @brief Set dvpp picture description's widthStride. + * + * @par Restriction + * Width alignment requirements: + * @li The minimum stride is 32 and the maximum is 4096 * 4 + * (that is, an image in argb format with a width of 4096); + * @li For 8K scaling, widthStride is required to be aligned to 2; + * @li For non 8K scaling, the calculation formula for widthStride + * is different for different image formats: + * @li yuv400sp, yuv420sp, yuv422sp, yuv444sp: input image width aligned to 16 + * @li yuv422packed: input image width * 2 and then align to 16 + * @li yuv444packed, rgb888: input image width alignment * 3, alignment to 16 + * @li xrgb8888: input image width * 4, align to 16 + * @li HFBC:input image width + * + * @param picDesc [OUT] dvpp picture description. + * @param widthStride [IN] dvpp picture widthStride. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetPicDescWidthStride(acldvppPicDesc *picDesc, uint32_t widthStride); + +/** + * @ingroup AscendCL + * @brief Set dvpp picture description's heightStride. + * + * @par Restriction + * Height alignment requirements: + * @li The height of the input image is aligned to 2. + * High stride minimum 6 and maximum 4096. + * + * @param picDesc [OUT] dvpp picture description. + * @param heightStride [IN] dvpp picture heightStride. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetPicDescHeightStride(acldvppPicDesc *picDesc, uint32_t heightStride); + +/** + * @ingroup AscendCL + * @brief Set dvpp picture description's retcode. + * + * @param picDesc [OUT] dvpp picture description. + * @param retCode [IN] dvpp picture retcode. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetPicDescRetCode(acldvppPicDesc *picDesc, uint32_t retCode); + +/** + * @ingroup AscendCL + * @brief Get picture data. + * + * @param picDesc [IN] dvpp picture description. + * + * @retval picture data addr. + * @retval default nullptr. + */ +ACL_FUNC_VISIBILITY void *acldvppGetPicDescData(const acldvppPicDesc *picDesc); + +/** + * @ingroup AscendCL + * @brief Get picture data size. + * + * @param picDesc [IN] dvpp picture description. + * + * @retval picture data size. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetPicDescSize(const acldvppPicDesc *picDesc); + +/** + * @ingroup AscendCL + * @brief Get dvpp picture desc's format. + * + * @param picDesc [IN] dvpp picture description. + * + * @retval format + * @retval default PIXEL_FORMAT_YUV_400. + */ +ACL_FUNC_VISIBILITY acldvppPixelFormat acldvppGetPicDescFormat(const acldvppPicDesc *picDesc); + +/** + * @ingroup AscendCL + * @brief Get dvpp picture desc's width. + * + * @param picDesc [IN] dvpp picture description. + * + * @retval width. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetPicDescWidth(const acldvppPicDesc *picDesc); + +/** + * @ingroup AscendCL + * @brief Get dvpp picture desc's height. + * + * @param picDesc [IN] dvpp picture description. + * + * @retval height. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetPicDescHeight(const acldvppPicDesc *picDesc); + +/** + * @ingroup AscendCL + * @brief Get dvpp picture desc's widthStride. + * + * @par Restriction + * Width alignment requirements: + * @li The minimum stride is 32 and the maximum is 4096 * 4 + * (that is, an image in argb format with a width of 4096); + * @li For 8K scaling, widthStride is required to be aligned to 2; + * @li For non 8K scaling, the calculation formula for widthStride + * is different for different image formats: + * @li yuv400sp, yuv420sp, yuv422sp, yuv444sp: input image width aligned to 16 + * @li yuv422packed: input image width * 2 and then align to 16 + * @li yuv444packed, rgb888: input image width alignment * 3, alignment to 16 + * @li xrgb8888: input image width * 4, align to 16 + * @li HFBC:input image width + * + * @param picDesc [IN] dvpp picture description. + * + * @retval stride width. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetPicDescWidthStride(const acldvppPicDesc *picDesc); + +/** + * @ingroup AscendCL + * @brief Get dvpp picture desc's heightStride. + * + * @par Restriction + * Height alignment requirements: + * @li The height of the input image is aligned to 2. + * High stride minimum 6 and maximum 4096. + * + * @param picDesc [IN] dvpp picture description. + * + * @retval stride height. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetPicDescHeightStride(const acldvppPicDesc *picDesc); + +/** + * @ingroup AscendCL + * @brief Get dvpp picture desc's retcode. + * + * @param picDesc [IN] dvpp picture description. + * + * @retval ret code. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetPicDescRetCode(const acldvppPicDesc *picDesc); + +/** + * @ingroup AscendCL + * @brief Create dvpp roi config. + * + * @param left [IN] the left offset, must be even + * @param right [IN] the right offset, must be odd + * @param top [IN] the top offset, must be even + * @param bottom [IN] the bottom offset, must be odd + * + * @retval null for failed. + * @retval other success + */ +ACL_FUNC_VISIBILITY acldvppRoiConfig *acldvppCreateRoiConfig(uint32_t left, uint32_t right, uint32_t top, + uint32_t bottom); + +/** + * @ingroup AscendCL + * @brief Destroy dvpp roi config. + * + * @par Function + * Destroys data created through the acldvppCreateRoiConfig interface + * @param roiConfig [IN] dvpp roi config. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateRoiConfig + */ +ACL_FUNC_VISIBILITY aclError acldvppDestroyRoiConfig(acldvppRoiConfig *roiConfig); + +/** + * @ingroup AscendCL + * @brief Set left of RoiConfig. + * + * @param config [OUT] RoiConfig + * @param left [IN] left offset + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetRoiConfigLeft(acldvppRoiConfig *config, uint32_t left); + +/** + * @ingroup AscendCL + * @brief Set right of RoiConfig. + * + * @param config [OUT] RoiConfig + * @param right [IN] right offset + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetRoiConfigRight(acldvppRoiConfig *config, uint32_t right); + +/** + * @ingroup AscendCL + * @brief Set top of RoiConfig. + * + * @param config [OUT] RoiConfig + * @param top [IN] top offset + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetRoiConfigTop(acldvppRoiConfig *config, uint32_t top); + +/** + * @ingroup AscendCL + * @brief Set bottom of RoiConfig. + * + * @param config [OUT] RoiConfig + * @param bottom [IN] bottom offset + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetRoiConfigBottom(acldvppRoiConfig *config, uint32_t bottom); + +/** + * @ingroup AscendCL + * @brief Set RoiConfig. + * + * @param config [OUT] RoiConfig + * @param left [IN] left offset + * @param right [IN] right offset + * @param top [IN] top offset + * @param bottom [IN] bottom offset + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetRoiConfig(acldvppRoiConfig *config, uint32_t left, uint32_t right, uint32_t top, + uint32_t bottom); + +/** + * @ingroup AscendCL + * @brief Create dvpp resize config. + * The specified scaling algorithm is not supported. + * The default scaling algorithm is "nearest neighbor interpolation". + * + * @retval null for failed. + * @retval other success. + */ +ACL_FUNC_VISIBILITY acldvppResizeConfig *acldvppCreateResizeConfig(); + +/** + * @ingroup AscendCL + * @brief Destroy dvpp resize config. + * + * @par Function + * Destroys the scaling configuration data created by + * the acldvppCreateResizeConfig interface + * + * @param resizeConfig [IN] resize config. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateResizeConfig + */ +ACL_FUNC_VISIBILITY aclError acldvppDestroyResizeConfig(acldvppResizeConfig *resizeConfig); + +/** + * @ingroup AscendCL + * @brief Create jpege config. + * + * @retval null for failed. + * @retval other success. + */ +ACL_FUNC_VISIBILITY acldvppJpegeConfig *acldvppCreateJpegeConfig(); + +/** + * @ingroup AscendCL + * @brief Destroy jpege config. + * + * @par Function + * Destroys the encoding configuration data created by + * the acldvppCreateJpegeConfig interface + * @param jpegeConfig [IN] config pointer to destroy. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateJpegeConfig + */ +ACL_FUNC_VISIBILITY aclError acldvppDestroyJpegeConfig(acldvppJpegeConfig *jpegeConfig); + +/** + * @ingroup AscendCL + * @brief Set jpege config's level. + * + * @param jpegeConfig [OUT] Call the acldvppCreateJpegeConfig + * interface to create acldvppJpegeConfig data + * @param level [IN] Encoding quality range [0, 100], + * where level 0 encoding quality is similar to level 100, + * and the smaller the value in [1, 100], + * the worse the quality of the output picture. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetJpegeConfigLevel(acldvppJpegeConfig *jpegeConfig, uint32_t level); + +/** + * @ingroup AscendCL + * @brief Get jpege config's level. + * + * @param jpegeConfig [IN] jpege config. + * + * @retval compression level. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetJpegeConfigLevel(const acldvppJpegeConfig *jpegeConfig); + +/** + * @ingroup AscendCL + * @brief create vdecChannelDesc.Channel description information + * when creating a video data processing channel. + * + * @retval null for failed. + * @retval other success + */ +ACL_FUNC_VISIBILITY aclvdecChannelDesc *aclvdecCreateChannelDesc(); + +/** + * @ingroup AscendCL + * @brief destroy vdecChannelDesc. + * + * @par Function + * Can only destroy aclvdecChannelDesc type created + * through aclvdecCreateChannelDesc interface + * @param channelDesc [IN] channel description. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + + * @see aclvdecCreateChannelDesc + */ +ACL_FUNC_VISIBILITY aclError aclvdecDestroyChannelDesc(aclvdecChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Set vdec channel description's channel id. + * + * @param channelDesc [OUT] vdec channel description. + * @param channelId [IN] decoding channel id: 0~15. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclvdecSetChannelDescChannelId(aclvdecChannelDesc *channelDesc, uint32_t channelId); + +/** + * @ingroup AscendCL + * @brief Set vdec channel description's thread id. + * + * @param channelDesc [OUT] vdec channel description. + * @param threadId [IN] thread id. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclvdecSetChannelDescThreadId(aclvdecChannelDesc *channelDesc, uint64_t threadId); + +/** + * @ingroup AscendCL + * @brief Set vdec channel description's callback function. + * + * @param channelDesc [OUT] vdec channel description. + * @param callback [IN] function callback.Function prototype: + * void (* aclvdecCallback) + * (acldvppStreamDesc * input, acldvppPicDesc * output, void* userdata) + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclvdecCallback + */ +ACL_FUNC_VISIBILITY aclError aclvdecSetChannelDescCallback(aclvdecChannelDesc *channelDesc, aclvdecCallback callback); + +/** + * @ingroup AscendCL + * @brief Set vdec channel description's video encoding type. + * + * @param channelDesc [OUT] vdec channel description. + * @param enType [IN] video encoding type. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclvdecSetChannelDescEnType(aclvdecChannelDesc *channelDesc, acldvppStreamFormat enType); + +/** + * @ingroup AscendCL + * @brief Set vdec channel description's out picture format. + * + * @param channelDesc [OUT] vdec channel description. + * @param outPicFormat [IN] out picture format (acldvppPixelFormat). + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclvdecSetChannelDescOutPicFormat(aclvdecChannelDesc *channelDesc, + acldvppPixelFormat outPicFormat); + +/** + * @ingroup AscendCL + * @brief Set vdec channel description's out picture width. + * + * @param channelDesc [OUT] vdec channel description. + * @param outPicWidth [IN] out picture width. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclvdecSetChannelDescOutPicWidth(aclvdecChannelDesc *channelDesc, uint32_t outPicWidth); + +/** + * @ingroup AscendCL + * @brief Set vdec channel description's out picture height. + * + * @param channelDesc [OUT] vdec channel description. + * @param outPicHeight [IN] out picture height. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclvdecSetChannelDescOutPicHeight(aclvdecChannelDesc *channelDesc, uint32_t outPicHeight); + +/** + * @ingroup AscendCL + * @brief Set vdec channel description's reference frame num. + * + * @param channelDesc [OUT] vdec channel description. + * @param refFrameNum [IN] reference frame num. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclvdecSetChannelDescRefFrameNum(aclvdecChannelDesc *channelDesc, uint32_t refFrameNum); + +/** + * @ingroup AscendCL + * @brief Set vdec channel description's bit depth. + * + * @param channelDesc [OUT] vdec channel description. + * @param bitDepth [IN] bit depth. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclvdecSetChannelDescBitDepth(aclvdecChannelDesc *channelDesc, uint32_t bitDepth); + +/** + * @ingroup AscendCL + * @brief Get vdec channel description's channel id. + * + * @param channelDesc [IN] vdec channel description. + * + * @retval decoding channel id: 0~15. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t aclvdecGetChannelDescChannelId(const aclvdecChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get vdec channel description's thread id. + * + * @param channelDesc [IN] vdec channel description. + * + * @retval thread id. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint64_t aclvdecGetChannelDescThreadId(const aclvdecChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get vdec channel description's callback function. + * + * @param channelDesc [IN] vdec channel description. + * + * @retval function callback.Function prototype: + * void (* aclvdecCallback) + * (acldvppStreamDesc * input, acldvppPicDesc * output, void* userdata) + * @retval default null. + * + * @see aclvdecCallback + */ +ACL_FUNC_VISIBILITY aclvdecCallback aclvdecGetChannelDescCallback(const aclvdecChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get vdec channel description's video encoding type. + * + * @param channelDesc [IN] vdec channel description. + * + * @retval video encoding type. + * @retval default H265_MAIN_LEVEL. + */ +ACL_FUNC_VISIBILITY acldvppStreamFormat aclvdecGetChannelDescEnType(const aclvdecChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get vdec channel description's out picture format. + * + * @param channelDesc [IN] vdec channel description. + * + * @retval out picture format. + * @retval default DVPP_OUTPUT_YUV420SP_UV. + */ +ACL_FUNC_VISIBILITY acldvppPixelFormat aclvdecGetChannelDescOutPicFormat(const aclvdecChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get vdec channel description's out picture width. + * + * @param channelDesc [IN] vdec channel description. + * + * @retval out picture width. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t aclvdecGetChannelDescOutPicWidth(const aclvdecChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get vdec channel description's out picture height. + * + * @param channelDesc [IN] vdec channel description. + * + * @retval out picture height (for vdec malloc memory). + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t aclvdecGetChannelDescOutPicHeight(const aclvdecChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get vdec channel description's bit depth. + * + * @param channelDesc [IN] vdec channel description. + * + * @retval bit depth. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t aclvdecGetChannelDescBitDepth(const aclvdecChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get vdec channel description's reference frame num. + * + * @param channelDesc [IN] vdec channel description. + * + * @retval reference frame num. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t aclvdecGetChannelDescRefFrameNum(const aclvdecChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief create vencChannelDesc. + * + * @retval null for failed, other success + */ +ACL_FUNC_VISIBILITY aclvencChannelDesc *aclvencCreateChannelDesc(); + +/** + * @ingroup AscendCL + * @brief destroy vencChannelDesc. + * + * @param channelDesc [IN] channel desc. + * + * @retval ACL_SUCCESS:success, other:failed + */ +ACL_FUNC_VISIBILITY aclError aclvencDestroyChannelDesc(aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Set decoding thread id for venc channel desc. + * + * @param channelDesc [OUT] venc channel desc + * @param threadId [IN] thread id + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencSetChannelDescThreadId(aclvencChannelDesc *channelDesc, uint64_t threadId); + +/** + * @ingroup AscendCL + * @brief Set func callback for venc channel desc. + * + * @param channelDesc [OUT] venc channel desc + * @param callback [IN] func callback + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencSetChannelDescCallback(aclvencChannelDesc *channelDesc, aclvencCallback callback); + +/** + * @ingroup AscendCL + * @brief Set video encoding type for venc channel desc. + * + * @param channelDesc [OUT] venc channel desc + * @param enType [IN] video encoding type + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencSetChannelDescEnType(aclvencChannelDesc *channelDesc, acldvppStreamFormat enType); + +/** + * @ingroup AscendCL + * @brief Set pic format for venc channel desc. + * + * @param channelDesc [OUT] venc channel desc + * @param picFormat [IN] pic format + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencSetChannelDescPicFormat(aclvencChannelDesc *channelDesc, + acldvppPixelFormat picFormat); + +/** + * @ingroup AscendCL + * @brief Set out pic width for venc channel desc. + * + * @param channelDesc [OUT] venc channel desc + * @param picWidth [IN] pic width + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencSetChannelDescPicWidth(aclvencChannelDesc *channelDesc, uint32_t picWidth); + +/** + * @ingroup AscendCL + * @brief Set pic height for venc channel desc. + * + * @param channelDesc [OUT] venc channel desc + * @param picHeight [IN] pic height + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencSetChannelDescPicHeight(aclvencChannelDesc *channelDesc, uint32_t picHeight); + +/** + * @ingroup AscendCL + * @brief Set key frame interval for venc channel desc. + * + * @param channelDesc [OUT] venc channel desc + * @param keyFrameInterval [IN] Interval of key frame + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencSetChannelDescKeyFrameInterval(aclvencChannelDesc *channelDesc, + uint32_t keyFrameInterval); + +/** + * @ingroup AscendCL + * @brief Set output buffer address for venc channel desc. + * + * @param channelDesc [OUT] venc channel desc + * @param bufAddr [IN] output buffer address + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencSetChannelDescBufAddr(aclvencChannelDesc *channelDesc, void *bufAddr); + +/** + * @ingroup AscendCL + * @brief Set output buffer size for venc channel desc. + * + * @param channelDesc [OUT] venc channel desc + * @param bufSize [IN] output buffer size + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencSetChannelDescBufSize(aclvencChannelDesc *channelDesc, uint32_t bufSize); + +/** + * @ingroup AscendCL + * @brief Set rc model for venc channel desc. + * + * @param channelDesc [OUT] venc channel desc + * @param rcMode [IN] venc rc mode(VBR=1, CBR=2) + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencSetChannelDescRcMode(aclvencChannelDesc *channelDesc, uint32_t rcMode); + +/** + * @ingroup AscendCL + * @brief Set source rate for venc channel desc. + * + * @param channelDesc [OUT] venc channel desc + * @param srcRate [IN] source rate + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencSetChannelDescSrcRate(aclvencChannelDesc *channelDesc, uint32_t srcRate); + +/** + * @ingroup AscendCL + * @brief Set max bit rate for venc channel desc. + * + * @param channelDesc [OUT] venc channel desc + * @param maxBitRate [IN] max bit rate + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencSetChannelDescMaxBitRate(aclvencChannelDesc *channelDesc, uint32_t maxBitRate); + +/** + * @ingroup AscendCL + * @brief Set venc parameter for venc channel desc. + * + * @param channelDesc [OUT] venc channel desc + * @param paramType [IN] parameter type + * @param length [IN] parameter length + * @param param [IN] pointer to parameter value + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencSetChannelDescParam(aclvencChannelDesc *channelDesc, + aclvencChannelDescParamType paramType, size_t length, + const void *param); + +/** + * @ingroup AscendCL + * @brief Get output buffer address for venc channel desc. + * + * @param channelDesc[IN] venc channel desc + * + * @retval output buffer address + */ +ACL_FUNC_VISIBILITY void *aclvencGetChannelDescBufAddr(const aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get output buffer size for venc channel desc. + * + * @param channelDesc [IN] venc channel desc + * + * @retval output buffer size + */ +ACL_FUNC_VISIBILITY uint32_t aclvencGetChannelDescBufSize(const aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get decoding channel id for venc channel desc. + * + * @param channelDesc [IN] venc channel desc + * + * @retval decoding channel id: 0~15, default 0 + */ +ACL_FUNC_VISIBILITY uint32_t aclvencGetChannelDescChannelId(const aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get decoding thread id for venc channel desc. + * + * @param channelDesc [IN] venc channel desc + * + * @retval thread id, default 0 + */ +ACL_FUNC_VISIBILITY uint64_t aclvencGetChannelDescThreadId(const aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get func callback for venc channel desc. + * + * @param channelDesc [IN] venc channel desc + * + * @retval func callback, default null + */ +ACL_FUNC_VISIBILITY aclvencCallback aclvencGetChannelDescCallback(const aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get video encoding type for venc channel desc. + * + * @param channelDesc [IN] venc channel desc + * + * @retval video encoding type, default H265_MAIN_LEVEL + */ +ACL_FUNC_VISIBILITY acldvppStreamFormat aclvencGetChannelDescEnType(const aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get pic format for venc channel desc. + * + * @param channelDesc [IN] venc channel desc + * + * @retval pic format + */ +ACL_FUNC_VISIBILITY acldvppPixelFormat aclvencGetChannelDescPicFormat(const aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get pic width for venc channel desc. + * + * @param channelDesc [IN] venc channel desc + * + * @retval pic width, default 0 + */ +ACL_FUNC_VISIBILITY uint32_t aclvencGetChannelDescPicWidth(const aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get pic height for venc channel desc. + * + * @param channelDesc [IN] venc channel desc + * + * @retval pic height, default 0 + */ +ACL_FUNC_VISIBILITY uint32_t aclvencGetChannelDescPicHeight(const aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get interval of key frame for venc channel desc. + * + * @param channelDesc [IN] venc channel desc + * + * @retval interval of key frame, default 0 + */ +ACL_FUNC_VISIBILITY uint32_t aclvencGetChannelDescKeyFrameInterval(const aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * + * @brief Get rc mode for venc channel desc. + * + * @param channelDesc [IN] venc channel desc + * + * @retval rc mode, default 0 + */ +ACL_FUNC_VISIBILITY uint32_t aclvencGetChannelDescRcMode(const aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * + * @brief Get source rate for venc channel desc. + * + * @param channelDesc [IN] venc channel desc + * + * @retval source rate, default 0 + */ +ACL_FUNC_VISIBILITY uint32_t aclvencGetChannelDescSrcRate(const aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * + * @brief Get max bit rate for venc channel desc. + * + * @param channelDesc [IN] venc channel desc + * + * @retval max bit rate, default 0 + */ +ACL_FUNC_VISIBILITY uint32_t aclvencGetChannelDescMaxBitRate(const aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * + * @brief Get venc parameter for venc channel desc. + * + * @param channelDesc [IN] venc channel desc + * @param paramType [IN] parameter type + * @param length [IN] parameter length + * @param paramRetSize [OUT] pointer to parameter real length + * @param param [OUT] pointer to parameter value + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencGetChannelDescParam(const aclvencChannelDesc *channelDesc, + aclvencChannelDescParamType paramType, size_t length, + size_t *paramRetSize, void *param); + +/** + * @ingroup AscendCL + * @brief get forced restart of I-frame interval from config + * + * @param config [IN] venc frame config + * + * @retval 0: Not forced; 1: Forced restart of I-frame -1: error + */ +ACL_FUNC_VISIBILITY uint8_t aclvencGetFrameConfigForceIFrame(const aclvencFrameConfig *config); + +/** + * @ingroup AscendCL + * @brief get forced restart of I-frame interval from config + * + * @param config [IN] venc frame config + * + * @retval Whether it is the end frame: 0: no; 1: end frame + */ +ACL_FUNC_VISIBILITY uint8_t aclvencGetFrameConfigEos(const aclvencFrameConfig *config); + +/** + * @ingroup AscendCL + * @brief set single frame encoding configuration parameters + * + * @param config [OUT] venc frame config + * @param forceFrame [IN] forced restart of I-frame interval: 0: Not forced; 1: Forced restart of I-frame + * + * @retval ACL_SUCCESS for ok, others for fail + */ +ACL_FUNC_VISIBILITY aclError aclvencSetFrameConfigForceIFrame(aclvencFrameConfig *config, uint8_t forceIFrame); + +/** + * @ingroup AscendCL + * @brief set single frame encoding configuration parameters + * + * @param config [OUT] venc frame config + * @param eos [IN] Whether it is the end frame: 0: no; 1: end frame + * + * @retval ACL_SUCCESS for ok, others for fail + */ +ACL_FUNC_VISIBILITY aclError aclvencSetFrameConfigEos(aclvencFrameConfig *config, uint8_t eos); + +/** + * @ingroup AscendCL + * @brief dvpp venc destroy frame config + * + * @param config [IN] venc frame config + * + * @retval ACL_SUCCESS for ok, others for fail + */ +ACL_FUNC_VISIBILITY aclError aclvencDestroyFrameConfig(aclvencFrameConfig *config); + +/** + * @ingroup AscendCL + * @brief Create dvpp venc frame config. + * + * @retval null for failed, other aclvencFrameConfig ptr + */ +ACL_FUNC_VISIBILITY aclvencFrameConfig *aclvencCreateFrameConfig(); + +/** + * @ingroup AscendCL + * @brief Create dvpp venc channel. + * + * @param channelDesc [IN|OUT] venc channel desc + * + * @retval ACL_SUCCESS for ok, others for fail + */ +ACL_FUNC_VISIBILITY aclError aclvencCreateChannel(aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Destroy dvpp venc channel. + * + * @param channelDesc [IN] venc channel desc + * + * @retval ACL_SUCCESS for ok, others for fail + */ +ACL_FUNC_VISIBILITY aclError aclvencDestroyChannel(aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief dvpp venc launch send frame task. + * + * @param channelDesc [IN] venc channel desc + * @param input [IN] input picture desc + * @param reserve [IN] reserve parameter + * @param config [IN] dvpp frame config + * @param userdata [IN] user callback function + * + * @retval ACL_SUCCESS for ok, others for fail + */ +ACL_FUNC_VISIBILITY aclError aclvencSendFrame(aclvencChannelDesc *channelDesc, acldvppPicDesc *input, void *reserve, + aclvencFrameConfig *config, void *userdata); + +/** + * @ingroup AscendCL + * @brief Create dvpp stream description. + * + * @retval null for failed. + * @retval other success. + */ +ACL_FUNC_VISIBILITY acldvppStreamDesc *acldvppCreateStreamDesc(); + +/** + * @ingroup AscendCL + * @brief Destroy dvpp stream description. + * + * @par Function + * Can only destroy acldvppStreamDesc type created through + * acldvppCreateStreamDesc interface. + * + * @param streamDesc [IN] dvpp stream description. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateStreamDesc + */ +ACL_FUNC_VISIBILITY aclError acldvppDestroyStreamDesc(acldvppStreamDesc *streamDesc); + +/** + * @ingroup AscendCL + * @brief Set stream description's data addr. + * + * @param streamDesc [OUT] dvpp stream description. + * @param dataDev [IN] data addr. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetStreamDescData(acldvppStreamDesc *streamDesc, void *dataDev); + +/** + * @ingroup AscendCL + * @brief Set stream description's data size. + * + * @param streamDesc [OUT] dvpp stream description. + * @param size [IN] data size. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetStreamDescSize(acldvppStreamDesc *streamDesc, uint32_t size); + +/** + * @ingroup AscendCL + * @brief Set stream description's format. + * + * @param streamDesc [OUT] dvpp stream description. + * @param format [IN] stream format. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetStreamDescFormat(acldvppStreamDesc *streamDesc, acldvppStreamFormat format); + +/** + * @ingroup AscendCL + * @brief Set stream description's timestamp. + * + * @param streamDesc [OUT] dvpp stream description. + * @param timestamp [IN] current timestamp. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetStreamDescTimestamp(acldvppStreamDesc *streamDesc, uint64_t timestamp); + +/** + * @ingroup AscendCL + * @brief Set stream description's ret code. + * + * @param streamDesc [OUT] dvpp stream description. + * @param retCode [IN] result code. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetStreamDescRetCode(acldvppStreamDesc *streamDesc, uint32_t retCode); + +/** + * @ingroup AscendCL + * @brief Set stream description's eos. + * + * @param streamDesc [OUT] dvpp stream description. + * @param eos [IN] end flag of sequence. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetStreamDescEos(acldvppStreamDesc *streamDesc, uint8_t eos); + +/** + * @ingroup AscendCL + * @brief Get stream description's data addr. + * + * @param streamDesc [IN] dvpp stream description. + * + * @retval data addr. + * @retval deault nullptr. + */ +ACL_FUNC_VISIBILITY void *acldvppGetStreamDescData(const acldvppStreamDesc *streamDesc); + +/** + * @ingroup AscendCL + * @brief Get stream description's data size. + * + * @param streamDesc [IN] dvpp stream description. + * + * @retval data size. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetStreamDescSize(const acldvppStreamDesc *streamDesc); + +/** + * @ingroup AscendCL + * @brief Get stream description's format. + * + * @param streamDesc [IN] dvpp stream description. + * + * @retval stream format. + * @retval default ACL_DVPP_STREAM_H264. + */ +ACL_FUNC_VISIBILITY acldvppStreamFormat acldvppGetStreamDescFormat(const acldvppStreamDesc *streamDesc); + +/** + * @ingroup AscendCL + * @brief Get stream description's timestamp. + * + * @param streamDesc [IN] dvpp stream description. + * + * @retval current timestamp. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint64_t acldvppGetStreamDescTimestamp(const acldvppStreamDesc *streamDesc); + +/** + * @ingroup AscendCL + * @brief Get stream description's retCode. + * + * @param streamDesc [IN] dvpp stream description. + * + * @retval result code. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetStreamDescRetCode(const acldvppStreamDesc *streamDesc); + +/** + * @ingroup AscendCL + * @brief Get stream description's eos. + * + * @param streamDesc [IN] dvpp stream description. + * + * @retval end flag of sequence. + * @retval default 0(false). + */ +ACL_FUNC_VISIBILITY uint8_t acldvppGetStreamDescEos(const acldvppStreamDesc *streamDesc); + +/** + * @ingroup AscendCL + * @brief Create vdec frame config. + * + * @retval null for failed. + * @retval other success. + */ +ACL_FUNC_VISIBILITY aclvdecFrameConfig *aclvdecCreateFrameConfig(); + +/** + * @ingroup AscendCL + * @brief Destroy vdec frame config. + * + * @par Function + * Can only destroy aclvdecFrameConfig type created through + * aclvdecCreateFrameConfig interface + * + * @param vdecFrameConfig [IN] vdec frame config. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclvdecCreateFrameConfig + */ +ACL_FUNC_VISIBILITY aclError aclvdecDestroyFrameConfig(aclvdecFrameConfig *vdecFrameConfig); + +/** + * @ingroup AscendCL + * @brief Get image width and height of jpeg. + * + * @param data [IN] image data in host memory + * @param size [IN] the size of image data + * @param width [OUT] the width of image from image header + * @param height [OUT] the height of image from image header + * @param components [OUT] the components of image from image header + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppJpegGetImageInfo(const void *data, uint32_t size, uint32_t *width, uint32_t *height, + int32_t *components); + +/** + * @ingroup AscendCL + * @brief Get image width and height of jpeg. + * + * @param data [IN] image data in host memory + * @param size [IN] the size of image data + * @param width [OUT] the width of image from image header + * @param height [OUT] the height of image from image header + * @param components [OUT] the components of image from image header + * @param format [OUT] the format of image from image header + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppJpegGetImageInfoV2(const void *data, uint32_t size, uint32_t *width, + uint32_t *height, int32_t *components, + acldvppJpegFormat *format); + +/** + * @ingroup AscendCL + * @brief Predict encode size of jpeg image. + * + * @param inputDesc [IN] dvpp image desc + * @param config [IN] jpeg encode config + * @param size [OUT] the size predicted of image + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppJpegPredictEncSize(const acldvppPicDesc *inputDesc, + const acldvppJpegeConfig *config, uint32_t *size); + +/** + * @ingroup AscendCL + * @brief Predict decode size of jpeg image. + * + * @param data [IN] origin image data in host memory + * @param dataSize [IN] the size of origin image data + * @param outputPixelFormat [IN] the pixel format jpeg decode + * @param decSize [OUT] the size predicted for decode image + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppJpegPredictDecSize(const void *data, uint32_t dataSize, + acldvppPixelFormat outputPixelFormat, uint32_t *decSize); + +/** + * @ingroup AscendCL + * @brief Get image width and height of png. + * + * @param data [IN] image data in host memory + * @param size [IN] the size of image data + * @param width [OUT] the width of image from image header + * @param height [OUT] the height of image from image header + * @param components [OUT] the components of image from image header + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppPngGetImageInfo(const void *data, uint32_t dataSize, uint32_t *width, + uint32_t *height, int32_t *components); + +/** + * @ingroup AscendCL + * @brief Predict decode size of png image. + * + * @param data [IN] origin image data in host memory + * @param dataSize [IN] the size of origin image data + * @param outputPixelFormat [IN] the pixel format jpeg decode + * @param decSize [OUT] the size predicted for decode image + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppPngPredictDecSize(const void *data, uint32_t dataSize, + acldvppPixelFormat outputPixelFormat, uint32_t *decSize); + +/** + * @ingroup AscendCL + * @brief Create dvpp channel, the same channel can be reused + * and is no longer available after destruction. + * + * @param channelDesc [IN|OUT] the channel destruction + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannelDesc + */ +ACL_FUNC_VISIBILITY aclError acldvppCreateChannel(acldvppChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Destroy dvpp channel. + * + * @par Restriction + * Can only destroy channel created through the acldvppCreateChannel interface + * + * @param channelDesc [IN] the channel destruction + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel + */ +ACL_FUNC_VISIBILITY aclError acldvppDestroyChannel(acldvppChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief dvpp vpc resize. + * + * @par Restriction + * Width alignment requirements: + * @li The minimum stride is 32 and the maximum is 4096 * 4 + * (that is, an image in argb format with a width of 4096); + * @li For 8K scaling, widthStride is required to be aligned to 2; + * @li For non 8K scaling, the calculation formula for widthStride + * is different for different image formats: + * @li yuv400sp, yuv420sp, yuv422sp, yuv444sp: input image width aligned to 16 + * @li yuv422packed: input image width * 2 and then align to 16 + * @li yuv444packed, rgb888: input image width alignment * 3, alignment to 16 + * @li xrgb8888: input image width * 4, align to 16 + * @li HFBC:input image width + * Height alignment requirements: + * @li The height of the input image is aligned to 2. + * High stride minimum 6 and maximum 4096. + * + * @param channelDesc [IN] the channel destruction + * @param inputDesc [IN] resize input picture destruction + * @param outputDesc [IN|OUT] resize output picture destruction + * @param resizeConfig [IN] resize config + * @param stream [IN] resize task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreatePicDesc + * | acldvppCreateResizeConfig + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcResizeAsync(acldvppChannelDesc *channelDesc, acldvppPicDesc *inputDesc, + acldvppPicDesc *outputDesc, acldvppResizeConfig *resizeConfig, + aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief dvpp vpc crop. + * + * @par Function + * crop the input picture according to the specified area, + * and then store the picture in the output memory as the output picture + * + * @par Restriction + * Width alignment requirements: + * @li The minimum stride is 32 and the maximum is 4096 * 4 + * (that is, an image in argb format with a width of 4096); + * @li For 8K scaling, widthStride is required to be aligned to 2; + * @li For non 8K scaling, the calculation formula for widthStride + * is different for different image formats: + * @li yuv400sp, yuv420sp, yuv422sp, yuv444sp: input image width aligned to 16 + * @li yuv422packed: input image width * 2 and then align to 16 + * @li yuv444packed, rgb888: input image width alignment * 3, alignment to 16 + * @li xrgb8888: input image width * 4, align to 16 + * @li HFBC:input image width + * Height alignment requirements: + * @li The height of the input image is aligned to 2. + * High stride minimum 6 and maximum 4096. + * + * @param channelDesc [IN] the channel destruction + * @param inputDesc [IN] crop input picture destruction + * @param outputDesc [IN|OUT] crop output picture destruction + * @param cropArea [IN] crop area config + * @param stream [IN] crop task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcCropAsync(acldvppChannelDesc *channelDesc, acldvppPicDesc *inputDesc, + acldvppPicDesc *outputDesc, acldvppRoiConfig *cropArea, + aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief dvpp vpc crop and resize config. + * + * @par Function + * crop the input picture with resize config according to the specified area, + * and then store the picture in the output memory as the output picture + * + * @par Restriction + * Width alignment requirements: + * @li The minimum stride is 32 and the maximum is 4096 * 4 + * (that is, an image in argb format with a width of 4096); + * @li For 8K scaling, widthStride is required to be aligned to 2; + * @li For non 8K scaling, the calculation formula for widthStride + * is different for different image formats: + * @li yuv400sp, yuv420sp, yuv422sp, yuv444sp: input image width aligned to 16 + * @li yuv422packed: input image width * 2 and then align to 16 + * @li yuv444packed, rgb888: input image width alignment * 3, alignment to 16 + * @li xrgb8888: input image width * 4, align to 16 + * @li HFBC:input image width + * Height alignment requirements: + * @li The height of the input image is aligned to 2. + * High stride minimum 6 and maximum 4096. + * + * @param channelDesc [IN] the channel destruction + * @param inputDesc [IN] crop input picture destruction + * @param outputDesc [IN|OUT] crop output picture destruction + * @param cropArea [IN] crop area config + * @param resizeConfig [IN] resize config + * @param stream [IN] crop and resize config task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcCropResizeAsync(acldvppChannelDesc *channelDesc, acldvppPicDesc *inputDesc, + acldvppPicDesc *outputDesc, acldvppRoiConfig *cropArea, + acldvppResizeConfig *resizeConfig, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief dvpp vpc batch crop. + * + * @par Function + * crop the input batch picture according to the specified area + * as the output batch pictures + * + * @param channelDesc [IN] the channel destruction + * @param srcBatchPicDescs [IN] crop input batch picture destruction + * @param roiNums [IN] roi config numbers + * @param size [IN] roiNum size + * @param dstBatchPicDescs [IN|OUT] crop output batch picture destruction + * @param cropAreas [IN] crop area configs + * @param stream [IN] crop batch task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreateBatchPicDesc | acldvppCreateRoiConfig + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcBatchCropAsync(acldvppChannelDesc *channelDesc, + acldvppBatchPicDesc *srcBatchPicDescs, uint32_t *roiNums, + uint32_t size, acldvppBatchPicDesc *dstBatchPicDescs, + acldvppRoiConfig *cropAreas[], aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief dvpp vpc batch crop and resize config. + * + * @par Function + * crop the input batch picture with resize config according to the specified area + * as the output batch pictures + * + * @param channelDesc [IN] the channel destruction + * @param srcBatchPicDescs [IN] crop input batch picture destruction + * @param roiNums [IN] roi config numbers + * @param size [IN] roiNum size + * @param dstBatchPicDescs [IN|OUT] crop output batch picture destruction + * @param cropAreas [IN] crop area configs + * @param resizeConfig [IN] resize config + * @param stream [IN] crop batch and resize config task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreateBatchPicDesc | acldvppCreateRoiConfig | acldvppCreateDvppConfig + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcBatchCropResizeAsync(acldvppChannelDesc *channelDesc, + acldvppBatchPicDesc *srcBatchPicDescs, uint32_t *roiNums, + uint32_t size, acldvppBatchPicDesc *dstBatchPicDescs, + acldvppRoiConfig *cropAreas[], + acldvppResizeConfig *resizeConfig, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief dvpp vpc crop and paste. + * + * @par Function + * crop the input picture according to the specified area, + * and paste the picture to the specified position of the target picture + * as the output picture + * + * @param channelDesc [IN] thechannel destruction + * @param inputDesc [IN] crop and paste input picture destruction + * @param outputDesc [IN|OUT] crop and paste output picture destruction + * @param cropArea [IN] crop area config + * @param pasteArea [IN] paste area config + * @param stream [IN] crop and paste task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreatePicDesc | acldvppCreateRoiConfig + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcCropAndPasteAsync(acldvppChannelDesc *channelDesc, acldvppPicDesc *inputDesc, + acldvppPicDesc *outputDesc, acldvppRoiConfig *cropArea, + acldvppRoiConfig *pasteArea, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief dvpp vpc crop, resize config and paste. + * + * @par Function + * crop the input picture with resize config according to the specified area, + * and paste the picture to the specified position of the target picture + * as the output picture + * + * @param channelDesc [IN] thechannel destruction + * @param inputDesc [IN] crop and paste input picture destruction + * @param outputDesc [IN|OUT] crop and paste output picture destruction + * @param cropArea [IN] crop area config + * @param pasteArea [IN] paste area config + * @param resizeConfig [IN] resize config + * @param stream [IN] crop, paste and resize task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreatePicDesc | acldvppCreateRoiConfig | acldvppCreateResizeConfig + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcCropResizePasteAsync(acldvppChannelDesc *channelDesc, acldvppPicDesc *inputDesc, + acldvppPicDesc *outputDesc, acldvppRoiConfig *cropArea, + acldvppRoiConfig *pasteArea, + acldvppResizeConfig *resizeConfig, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief dvpp vpc batch crop and paste. + * + * @par Function + * crop the input batch picture according to the specified area, + * and paste the pictures to the specified position of the target pictures + * as the output batch pictures + * + * @param channelDesc [IN] the channel destruction + * @param srcBatchPicDescs [IN] crop input batch picture destruction + * @param roiNums [IN] roi config numbers + * @param size [IN] roiNum size + * @param dstBatchPicDescs [IN|OUT] crop output batch picture destruction + * @param cropAreas [IN] crop area configs + * @param pasteAreas [IN] paste area configs + * @param stream [IN] crop batch task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreateBatchPicDesc | acldvppCreateRoiConfig + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcBatchCropAndPasteAsync(acldvppChannelDesc *channelDesc, + acldvppBatchPicDesc *srcBatchPicDescs, uint32_t *roiNums, + uint32_t size, acldvppBatchPicDesc *dstBatchPicDescs, + acldvppRoiConfig *cropAreas[], + acldvppRoiConfig *pasteAreas[], aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief dvpp vpc batch crop, resize config and paste. + * + * @par Function + * crop the input batch picture with resize config according to the specified area, + * and paste the pictures to the specified position of the target pictures + * as the output batch pictures + * + * @param channelDesc [IN] the channel destruction + * @param srcBatchPicDescs [IN] crop input batch picture destruction + * @param roiNums [IN] roi config numbers + * @param size [IN] roiNum size + * @param dstBatchPicDescs [IN|OUT] crop output batch picture destruction + * @param cropAreas [IN] crop area configs + * @param pasteAreas [IN] paste area configs + * @param resizeConfig [IN] resize config + * @param stream [IN] crop batch and resize config task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreateBatchPicDesc | acldvppCreateRoiConfig | acldvppCreateResizeConfig + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcBatchCropResizePasteAsync( + acldvppChannelDesc *channelDesc, acldvppBatchPicDesc *srcBatchPicDescs, uint32_t *roiNums, uint32_t size, + acldvppBatchPicDesc *dstBatchPicDescs, acldvppRoiConfig *cropAreas[], acldvppRoiConfig *pasteAreas[], + acldvppResizeConfig *resizeConfig, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief dvpp vpc jpeg decode. + * + * @par Function + * For different source picture formats, after decoding, + * output pictures in the following format: + * @li jpeg(444) -> YUV444SP:V is front U is back, + * YUV420 SP V is front U is back, YUV420SP U is front V is back; + * @li jpeg(422) -> YUV422SP:V is in front U is behind, + * YUV420SP V is in front U is behind, YUV420SP U is in front V is behind; + * @li jpeg(420) -> YUV420SP: + * V is front U is back, YUV420SP U is front V is back; + * @li jpeg(400) -> YUV420SP:UV data is filled with 0 x 80. + * + * @param channelDesc [IN] the channel destruction + * @param data [IN] decode input picture destruction's data + * @param size [IN] decode input picture destruction's size + * @param outputDesc [IN|OUT] decode output picture destruction + * @param stream [IN] decode task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreatePicDesc + */ +ACL_FUNC_VISIBILITY aclError acldvppJpegDecodeAsync(acldvppChannelDesc *channelDesc, const void *data, uint32_t size, + acldvppPicDesc *outputDesc, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief dvpp vpc jpeg encode. + * + * @param channelDesc [IN] the channel destruction + * @param inputDesc [IN] encode input picture destruction + * @param data [OUT] encode output picture destruction's data + * @param size [IN|OUT] encode output picture destruction's size + * @param config [IN] jpeg encode config + * @param stream [IN] encode task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreateJpegeConfig + */ +ACL_FUNC_VISIBILITY aclError acldvppJpegEncodeAsync(acldvppChannelDesc *channelDesc, acldvppPicDesc *inputDesc, + const void *data, uint32_t *size, acldvppJpegeConfig *config, + aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief dvpp vpc png decode. + * + * @param channelDesc [IN] the channel destruction + * @param data [IN] decode input picture destruction's data + * @param size [IN] decode input picture destruction's size + * @param outputDesc [IN|OUT] decode output picture destruction + * @param stream [IN] decode task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreatePicDesc + */ +ACL_FUNC_VISIBILITY aclError acldvppPngDecodeAsync(acldvppChannelDesc *channelDesc, const void *data, uint32_t size, + acldvppPicDesc *outputDesc, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief Create vdec channel. + * + * @par Function + * Create a channel for video data processing, + * the same channel can be reused, + * and is no longer available after destruction + * + * @param channelDesc [IN|OUT] the channel destruction + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclvdecCreateChannelDesc + */ +ACL_FUNC_VISIBILITY aclError aclvdecCreateChannel(aclvdecChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Destroy vdec channel. + * + * @par Function + * Can only destroy channels created by the aclvdecCreateChannel interface + * + * @param channelDesc [IN] the channel destruction + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclvdecCreateChannel + */ +ACL_FUNC_VISIBILITY aclError aclvdecDestroyChannel(aclvdecChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief dvpp vdec send frame. + * + * @par Function + * Pass the input memory to be decoded + * and the decoded output memory to the decoder for decoding + * + * @param channelDesc [IN] vdec channel destruction + * @param input [IN] input stream destruction + * @param output [IN|OUT] output picture destruction + * @param config [IN] vdec frame config + * @param userData [IN] user data for callback function + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclvdecCreateChannel | acldvppCreateStreamDesc | acldvppCreatePicDesc + */ +ACL_FUNC_VISIBILITY aclError aclvdecSendFrame(aclvdecChannelDesc *channelDesc, acldvppStreamDesc *input, + acldvppPicDesc *output, aclvdecFrameConfig *config, void *userData); + +/** + * @ingroup AscendCL + * @brief dvpp vdec send skipped frame. + * + * @par Function + * Pass video frame to decoder + * + * @param channelDesc [IN] vdec channel destruction + * @param input [IN] input stream destruction + * @param config [IN] vdec frame config + * @param userData [IN] user data for callback function + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclvdecCreateChannel | acldvppCreateStreamDesc | acldvppCreatePicDesc | aclvdecSendFrame + */ +ACL_FUNC_VISIBILITY aclError aclvdecSendSkippedFrame(aclvdecChannelDesc *channelDesc, acldvppStreamDesc *input, + aclvdecFrameConfig *config, void *userData); + +/** + * @ingroup AscendCL + * @brief dvpp vpc convert color. + * + * @par Restriction + * @li outputDesc:Width height stride, No changes are allowed. Just configure 0 + * @par Function + * Convert color gamut + * + * @param channelDesc [IN] the channel destruction + * @param inputDesc [IN] convert color input picture destruction + * @param outputDesc [IN|OUT] convert color output picture destruction + * @param stream [IN] convert color task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreatePicDesc + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcConvertColorAsync(acldvppChannelDesc *channelDesc, acldvppPicDesc *inputDesc, + acldvppPicDesc *outputDesc, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief dvpp vpc pyramid down. + * + * @par Restriction + * @li outputDesc:format only supported YUV400 + * @par Function + * Image pyramid down + * + * @param channelDesc [IN] the channel destruction + * @param inputDesc [IN] pyr down input picture destruction + * @param outputDesc [IN|OUT] pyr down output picture destruction + * @param reserve [IN] reserved param , must be nullptr + * @param stream [IN] pyr down task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreatePicDesc + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcPyrDownAsync(acldvppChannelDesc *channelDesc, acldvppPicDesc *inputDesc, + acldvppPicDesc *outputDesc, void *reserve, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief Set dvpp channel mode. + * + * @param channelDesc [OUT] the channel destruction + * @param mode [IN] channel mode + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetChannelDescMode(acldvppChannelDesc *channelDesc, uint32_t mode); + +/** + * @ingroup AscendCL + * @brief Set resize config interpolation. + * + * @param resizeConfig [OUT] the resize config + * @param interpolation [IN] interpolation + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetResizeConfigInterpolation(acldvppResizeConfig *resizeConfig, + uint32_t interpolation); + +/** + * @ingroup AscendCL + * @brief Get resize config interpolation. + * + * @param resizeConfig [IN] the resize config + * + * @retval Interpolation of resize config. + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetResizeConfigInterpolation(const acldvppResizeConfig *resizeConfig); + +/** + * @ingroup AscendCL + * @brief Set vdec channel out mode. + * + * @param channelDesc [OUT] the channel destruction + * @param outMode [IN] channel out mode + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclvdecSetChannelDescOutMode(aclvdecChannelDesc *channelDesc, uint32_t outMode); + +/** + * @ingroup AscendCL + * @brief Get vdec channel out mode. + * + * @param channelDesc [IN] the channel destruction + * + * @retval Out mode of channel destruction + * @retval default 0 + */ +ACL_FUNC_VISIBILITY uint32_t aclvdecGetChannelDescOutMode(const aclvdecChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Create dvpp batch picture description. + * + * @param batchSize [IN] batch size + * + * @retval null for failed. + * @retval OtherValues success. + */ +ACL_FUNC_VISIBILITY acldvppBatchPicDesc *acldvppCreateBatchPicDesc(uint32_t batchSize); + +/** + * @ingroup AscendCL + * @brief Get dvpp picture description. + * + * @param batchPicDesc [IN] dvpp batch picture description. + * @param index [IN] index of batch + * + * @retval null for failed. + * @retval OtherValues Failure + * + * @see acldvppCreateBatchPicDesc + */ +ACL_FUNC_VISIBILITY acldvppPicDesc *acldvppGetPicDesc(acldvppBatchPicDesc *batchPicDesc, uint32_t index); + +/** + * @ingroup AscendCL + * @brief Destroy dvpp batch picture description. + * + * @par Function + * Can only destroy batch picture description information created + * through acldvppCreateBatchPicDesc interface. + * + * @param batchPicDesc [IN] dvpp batch picture description. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateBatchPicDesc + */ +ACL_FUNC_VISIBILITY aclError acldvppDestroyBatchPicDesc(acldvppBatchPicDesc *batchPicDesc); + +/** + * @ingroup AscendCL + * @brief Create dvpp lut map. + * + * @retval null for failed. + * @retval OtherValues success. + */ +ACL_FUNC_VISIBILITY acldvppLutMap *acldvppCreateLutMap(); + +/** + * @ingroup AscendCL + * @brief Destroy lut map. + * + * @param lutMap [IN] lut map + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError acldvppDestroyLutMap(acldvppLutMap *lutMap); + +/** + * @ingroup AscendCL + * @brief Get lut map dims. + * + * @param lutMap [IN] lut map + * + * @retval 0 for failed. + * @retval OtherValues success. + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetLutMapDims(const acldvppLutMap *lutMap); + +/** + * @ingroup AscendCL + * @brief Get lut map data. + * + * @param lutMap [IN] lut map + * @param dim [IN] input dim of map + * @param data [OUT] the dim of lut map's data + * @param len [OUT] the dim of lut map's length + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppGetLutMapData(const acldvppLutMap *lutMap, uint32_t dim, uint8_t **data, + uint32_t *len); +/** + * @ingroup AscendCL + * @brief Vpc equalize hist. + * + * @param channelDesc [IN] channel desc + * @param inputDesc [IN] input desc + * @param outputDesc [IN|OUT] output desc + * @param lutMap [IN] lut map param + * @param stream [IN] runtime stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel|acldvppCreatePicDesc|acldvppCreateLutMap + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcEqualizeHistAsync(const acldvppChannelDesc *channelDesc, + const acldvppPicDesc *inputDesc, acldvppPicDesc *outputDesc, + const acldvppLutMap *lutMap, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief Create dvpp border config. + * + * @retval null for failed. + * @retval OtherValues success. + */ +ACL_FUNC_VISIBILITY acldvppBorderConfig *acldvppCreateBorderConfig(); + +/** + * @ingroup AscendCL + * @brief Set value of border config. + * + * @param borderConfig [OUT] border config + * @param index [IN] index of value array + * @param value [IN] value + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetBorderConfigValue(acldvppBorderConfig *borderConfig, uint32_t index, + double value); + +/** + * @ingroup AscendCL + * @brief Set border type of border config. + * + * @param borderConfig [OUT] border config + * @param borderType [IN] border type + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetBorderConfigBorderType(acldvppBorderConfig *borderConfig, + acldvppBorderType borderType); + +/** + * @ingroup AscendCL + * @brief Set top of border config. + * + * @param borderConfig [OUT] border config + * @param top [IN] top of border + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetBorderConfigTop(acldvppBorderConfig *borderConfig, uint32_t top); + +/** + * @ingroup AscendCL + * @brief Set bottom of border config. + * + * @param borderConfig [OUT] border config + * @param bottom [IN] bottom of border + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetBorderConfigBottom(acldvppBorderConfig *borderConfig, uint32_t bottom); + +/** + * @ingroup AscendCL + * @brief Set left of border config. + * + * @param borderConfig [OUT] border config + * @param left [IN] left of border + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetBorderConfigLeft(acldvppBorderConfig *borderConfig, uint32_t left); + +/** + * @ingroup AscendCL + * @brief Set right of border config. + * + * @param borderConfig [OUT] border config + * @param right [IN] right of border + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetBorderConfigRight(acldvppBorderConfig *borderConfig, uint32_t right); + +/** + * @ingroup AscendCL + * @brief Get value of border config. + * + * @param borderConfig [IN] border config + * @param index[IN] index of value array + * + * @retval invalid value is < 0, normal Value is >= 0 + */ +ACL_FUNC_VISIBILITY double acldvppGetBorderConfigValue(const acldvppBorderConfig *borderConfig, uint32_t index); + +/** + * @ingroup AscendCL + * @brief Get border type of border config. + * + * @param borderConfig [IN] border config + * @retval border type of border config + */ +ACL_FUNC_VISIBILITY acldvppBorderType acldvppGetBorderConfigBorderType(const acldvppBorderConfig *borderConfig); + +/** + * @ingroup AscendCL + * @brief Get right of border config. + * + * @param borderConfig [IN] border config + * + * @retval default 0, top value of border config + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetBorderConfigTop(const acldvppBorderConfig *borderConfig); + +/** + * @ingroup AscendCL + * @brief Get Bottom of border config. + * + * @param borderConfig [IN] border config + * + * @retval default 0, top value of border config + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetBorderConfigBottom(const acldvppBorderConfig *borderConfig); + +/** + * @ingroup AscendCL + * @brief Get left of border config. + * + * @param borderConfig [IN] border config + * + * @retval default 0, top value of border config + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetBorderConfigLeft(const acldvppBorderConfig *borderConfig); + +/** + * @ingroup AscendCL + * @brief Get right of border config. + * + * @param borderConfig [IN] border config + * + * @retval default 0, right value of border config + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetBorderConfigRight(const acldvppBorderConfig *borderConfig); + +/** + * @ingroup AscendCL + * @brief Destroy border config. + * + * @param borderConfig [IN] border config + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError acldvppDestroyBorderConfig(acldvppBorderConfig *borderConfig); + +/** + * @ingroup AscendCL + * @brief Vpc make border. + * + * @param channelDesc [IN] channel desc + * @param inputDesc [IN] input desc + * @param outputDesc [IN|OUT] output desc + * @param borderConfig [IN] border config param + * @param stream [IN] runtime stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel|acldvppCreatePicDesc|acldvppCreateBorderConfig + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcMakeBorderAsync(const acldvppChannelDesc *channelDesc, + const acldvppPicDesc *inputDesc, acldvppPicDesc *outputDesc, + const acldvppBorderConfig *borderConfig, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief Dvpp vpc calc hist. + * + * @param channelDesc [IN] the channel destruction + * @param srcPicDesc [IN] pyr down input picture destruction + * @param hist [IN|OUT] pyr down output picture destruction + * @param reserve [IN] reserved param, must be nullptr + * @param stream [IN] task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreatePicDesc | acldvppCreateHist + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcCalcHistAsync(acldvppChannelDesc *channelDesc, acldvppPicDesc *srcPicDesc, + acldvppHist *hist, void *reserve, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief Create vpc hist description. + * + * @retval null for failed. + * @retval OtherValues success. + */ +ACL_FUNC_VISIBILITY acldvppHist *acldvppCreateHist(); + +/** + * @ingroup AscendCL + * @brief Destroy vpc hist description. + * + * @par Function + * Can only destroy hist description information created + * through acldvppCreateHist interface. + * + * @param hist [IN] vpc hist description. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateHist + */ +ACL_FUNC_VISIBILITY aclError acldvppDestroyHist(acldvppHist *hist); + +/** + * @ingroup AscendCL + * @brief Get dims of vpc hist description. + * + * @param hist [IN] vpc hist description. + * + * @retval dims of vpc hist description. + * + * @see acldvppCreateHist | acldvppVpcCalcHistAsync + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetHistDims(acldvppHist *hist); + +/** + * @ingroup AscendCL + * @brief Get data from vpc hist description by dim. + * + * @param hist [IN] vpc hist description. + * @param dim [IN] which dim to get data. + * @param data [OUT] address of output hist data. + * @param len [OUT] len of output hist data. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateHist | acldvppVpcCalcHistAsync + */ +ACL_FUNC_VISIBILITY aclError acldvppGetHistData(acldvppHist *hist, uint32_t dim, uint32_t **data, uint16_t *len); + +/** + * @ingroup AscendCL + * @brief Get dvpp calc hist process return code. + * + * @param hist [IN] vpc hist description. + * + * @retval Dvpp calc hist process return code. + * + * @see acldvppCreateHist | acldvppVpcCalcHistAsync + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetHistRetCode(acldvppHist *hist); + +/** + * @ingroup AscendCL + * @brief Set vpc hist description to 0. + * + * @par Function + * Can only clear hist description information created + * through acldvppCreateHist interface. + * + * @param hist [IN] vpc hist description. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateHist + */ +ACL_FUNC_VISIBILITY aclError acldvppClearHist(acldvppHist *hist); + +/** + * @ingroup AscendCL + * @brief dvpp vpc batch crop, resize config and make border. + * + * @par Function + * crop the input batch picture with resize config and border configs according to the specified area + * as the output batch pictures + * + * @param channelDesc [IN] the channel destruction + * @param srcBatchPicDescs [IN] crop input batch picture destruction + * @param roiNums [IN] roi config numbers + * @param size [IN] roiNum size + * @param dstBatchPicDescs [IN|OUT] crop output batch picture destruction + * @param cropAreas [IN] crop area configs + * @param borderCfgs [IN] border configs + * @param resizeConfig [IN] resize config + * @param stream [IN] crop batch, resize config and make border task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreateBatchPicDesc | acldvppCreateRoiConfig | acldvppCreateResizeConfig + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcBatchCropResizeMakeBorderAsync( + acldvppChannelDesc *channelDesc, acldvppBatchPicDesc *srcBatchPicDescs, uint32_t *roiNums, uint32_t size, + acldvppBatchPicDesc *dstBatchPicDescs, acldvppRoiConfig *cropAreas[], acldvppBorderConfig *borderCfgs[], + acldvppResizeConfig *resizeConfig, aclrtStream stream); + +#ifdef __cplusplus +} +#endif + +#endif // INC_EXTERNAL_ACL_OPS_ACL_DVPP_H_ diff --git a/inc/external/acl/ops/acl_fv.h b/inc/external/acl/ops/acl_fv.h new file mode 100644 index 00000000..4bd392c9 --- /dev/null +++ b/inc/external/acl/ops/acl_fv.h @@ -0,0 +1,348 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef INC_EXTERNAL_ACL_OPS_ACL_RETR_H_ +#define INC_EXTERNAL_ACL_OPS_ACL_RETR_H_ + +#include "acl/acl.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct aclfvInitPara aclfvInitPara; +typedef struct aclfvFeatureInfo aclfvFeatureInfo; +typedef struct aclfvRepoRange aclfvRepoRange; +typedef struct aclfvQueryTable aclfvQueryTable; +typedef struct aclfvSearchInput aclfvSearchInput; +typedef struct aclfvSearchResult aclfvSearchResult; + +// search operation type +enum aclfvSearchType { + SEARCH_1_N, // 1:N operation type + SEARCH_N_M // N:M operation type +}; + +/** + * @ingroup AscendCL + * @brief Create fv init param. + * + * @param fsNum [IN] The feature num + * + * @retval null for failed. + * @retval OtherValues success. + */ +ACL_FUNC_VISIBILITY aclfvInitPara *aclfvCreateInitPara(uint64_t fsNum); + +/** + * @ingroup AscendCL + * @brief Destroy fv init param. + * + * @par Function + * Can only destroy fv init param information created + * through aclfvCreateInitPara interface. + * + * @param initPara [IN] fv init param. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclfvCreateInitPara + */ +ACL_FUNC_VISIBILITY aclError aclfvDestroyInitPara(aclfvInitPara *initPara); + +/** + * @ingroup AscendCL + * @brief set value for maxTopNumFor1N which in fv init param. + * + * @param initPara [IN|OUT] fv init param. + * @param maxTopNumFor1N [IN] maxTopNumFor1N value for init param. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclfvSet1NTopNum(aclfvInitPara *initPara, uint32_t maxTopNumFor1N); + +/** + * @ingroup AscendCL + * @brief set value for maxTopNumForNM which in fv init param. + * + * @param initPara [IN|OUT] fv init param. + * @param maxTopNumForNM [IN] maxTopNumForNM value for init param. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclfvSetNMTopNum(aclfvInitPara *initPara, uint32_t maxTopNumForNM); + +/** + * @ingroup AscendCL + * @brief Create fv feature info. + * + * @param id0 [IN] The first level library id0 + * @param id1 [IN] Secondary library id1 + * @param offset [IN] The offset of the first feature in the library + * @param featureLen [IN] Single feature length + * @param featureCount [IN] Single feature count + * @param featureData [IN] Feature value list + * @param featureDataLen [IN] Feature value list length + * + * @retval null for failed. + * @retval OtherValues success. + */ +ACL_FUNC_VISIBILITY aclfvFeatureInfo *aclfvCreateFeatureInfo(uint32_t id0, uint32_t id1, uint32_t offset, + uint32_t featureLen, uint32_t featureCount, + uint8_t *featureData, uint32_t featureDataLen); + +/** + * @ingroup AscendCL + * @brief Destroy fv feature info. + * + * @par Function + * Can only destroy fv feature info information created + * through aclfvCreateFeatureInfo interface. + * + * @param featureInfo [IN] fv feature info. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclfvCreateFeatureInfo + */ +ACL_FUNC_VISIBILITY aclError aclfvDestroyFeatureInfo(aclfvFeatureInfo *featureInfo); + +/** + * @ingroup AscendCL + * @brief Create fv repo range. + * + * @param id0Min [IN] id0 start value + * @param id0Min [IN] id0 max + * @param id1Min [IN] id0 start value + * @param id1Max [IN] id1 max + * + * @retval null for failed. OtherValues success + */ +ACL_FUNC_VISIBILITY aclfvRepoRange *aclfvCreateRepoRange(uint32_t id0Min, uint32_t id0Max, uint32_t id1Min, + uint32_t id1Max); + +/** + * @ingroup AscendCL + * @brief Destroy fv repo range. + * + * @par Function + * Can only destroy fv repo range information created + * through aclfvCreateRepoRange interface. + * + * @param repoRange [IN] fv repo range. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclfvCreateRepoRange + */ +ACL_FUNC_VISIBILITY aclError aclfvDestroyRepoRange(aclfvRepoRange *repoRange); + +/** + * @ingroup AscendCL + * @brief Create query table. + * + * @param queryCnt [IN] Number of tables, the maximum number is 6 + * @param tableLen [IN] Single table length, table length is 32KB + * @param tableData [IN] Feature value list + * @param tableDataLen [IN] The length of memory requested by the featureData pointer + * + * @retval null for failed. OtherValues success + */ +ACL_FUNC_VISIBILITY aclfvQueryTable *aclfvCreateQueryTable(uint32_t queryCnt, uint32_t tableLen, uint8_t *tableData, + uint32_t tableDataLen); + +/** + * @ingroup AscendCL + * @brief Destroy query table. + * + * @par Function + * Can only destroy query table information created + * through aclfvCreateQueryTable interface. + * + * @param queryTable [IN] query table. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclfvCreateQueryTable + */ +ACL_FUNC_VISIBILITY aclError aclfvDestroyQueryTable(aclfvQueryTable *queryTable); + +/** + * @ingroup AscendCL + * @brief Create search input. + * + * @param queryTable [IN] query table + * @param repoRange [IN] query repo range + * @param topk [IN] query topk + * + * @retval null for failed. OtherValues success + */ +ACL_FUNC_VISIBILITY aclfvSearchInput *aclfvCreateSearchInput(aclfvQueryTable *queryTable, aclfvRepoRange *repoRange, + uint32_t topk); + +/** + * @ingroup AscendCL + * @brief Destroy search input. + * + * @par Function + * Can only destroy search input information created + * through aclfvCreateSearchInput interface. + * + * @param searchInput [IN] search input. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclfvCreateSearchInput + */ +ACL_FUNC_VISIBILITY aclError aclfvDestroySearchInput(aclfvSearchInput *searchInput); + +/** + * @ingroup AscendCL + * @brief Create search result. + * + * @param queryCnt [IN] Retrieve the number of features + * @param resultNum [IN] The number of search results for each feature, the number is queryCnt + * @param resultNumDataLen [IN] resultNum memory length + * @param id0 [IN] Level 1 library id0 + * @param id1 [IN] Secondary library id1 + * @param resultOffset [IN] The offset of the bottom library corresponding + * to each feature retrieval result, total length topK * queryCnt + * @param resultDistance [IN] Distance, total length topK * queryCnt + * @param dataLen [IN] The memory size requested by + * id0\id1\reslutOffset\resultDistance + * + * @retval null for failed. OtherValues success + */ +ACL_FUNC_VISIBILITY aclfvSearchResult *aclfvCreateSearchResult(uint32_t queryCnt, uint32_t *resultNum, + uint32_t resultNumDataLen, uint32_t *id0, uint32_t *id1, + uint32_t *resultOffset, float *resultDistance, + uint32_t dataLen); + +/** + * @ingroup AscendCL + * @brief Destroy search result. + * + * @par Function + * Can only destroy search result information created + * through aclfvCreateSearchResult interface. + * + * @param searchResult [IN] search result. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclfvCreateSearchResult + */ +ACL_FUNC_VISIBILITY aclError aclfvDestroySearchResult(aclfvSearchResult *searchResult); + +/** + * @ingroup AscendCL + * @brief fv IP initialize. + * + * @param initPara [IN] fv init param. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure. + */ +ACL_FUNC_VISIBILITY aclError aclfvInit(aclfvInitPara *initPara); + +/** + * @ingroup AscendCL + * @brief release fv resources. + * + * @par Function + * Can only release fv resources created + * through aclfvInit interface. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure. + * + * @see aclfvInit + */ +ACL_FUNC_VISIBILITY aclError aclfvRelease(); + +/** + * @ingroup AscendCL + * @brief fv repo add. + * + * @param type [IN] repo add type + * @param featureInfo [IN] add feature information + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure. + */ +ACL_FUNC_VISIBILITY aclError aclfvRepoAdd(aclfvSearchType type, aclfvFeatureInfo *featureInfo); + +/** + * @ingroup AscendCL + * @brief fv repo del. + * + * @param type [IN] repo delete type + * @param repoRange [IN] repo range information + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure. + */ +ACL_FUNC_VISIBILITY aclError aclfvRepoDel(aclfvSearchType type, aclfvRepoRange *repoRange); + +/** + * @ingroup AscendCL + * @brief fv accurate del. + * + * @param featureInfo [IN] accurate delete feature information + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure. + */ +ACL_FUNC_VISIBILITY aclError aclfvDel(aclfvFeatureInfo *featureInfo); + +/** + * @ingroup AscendCL + * @brief fv accurate modify. + * + * @param featureInfo [IN] accurate modify feature information + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure. + */ +ACL_FUNC_VISIBILITY aclError aclfvModify(aclfvFeatureInfo *featureInfo); + +/** + * @ingroup AscendCL + * @brief fv search. + * + * @param type [IN] search type + * @param searchInput [IN] search input + * @param searchRst [OUT] search result + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure. + */ +ACL_FUNC_VISIBILITY aclError aclfvSearch(aclfvSearchType type, aclfvSearchInput *searchInput, + aclfvSearchResult *searchRst); + +#ifdef __cplusplus +} +#endif + +#endif // INC_EXTERNAL_ACL_OPS_ACL_RETR_H_ diff --git a/inc/external/hccl/hccl.h b/inc/external/hccl/hccl.h new file mode 100644 index 00000000..8261adc4 --- /dev/null +++ b/inc/external/hccl/hccl.h @@ -0,0 +1,159 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file hccl.h + * @brief HCCL API + */ + +#ifndef HCCL_H_ +#define HCCL_H_ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +/** + * @brief Initialize HCCL. + * + * @param clusterInfo A string identifying the cluster info file path, include file name. + * @param rank A integer identifying the identify for the rank. + * @param comm A pointer identifying the initialized communication resource. + * @return HcclResult + * @see HcclCommDestroy() + */ +extern HcclResult HcclCommInitClusterInfo(const char *clusterInfo, uint32_t rank, HcclComm *comm); + +/** + * @brief Get hccl root info. + * + * @param rootInfo A pointer identifying the hccl root info. + * @return HcclResult + */ +extern HcclResult HcclGetRootInfo(HcclRootInfo *rootInfo); + +/** + * @brief Initialize HCCL with root info. + * + * @param nRanks A integer identifying the rank size of the cluster. + * @param rootInfo A struct identifying the hccl root info. + * @param rank A integer identifying the identify for the rank. + * @param comm A pointer identifying the initialized communication resource. + * @return HcclResult + * @see HcclCommDestroy() + */ +extern HcclResult HcclCommInitRootInfo(uint32_t nRanks, const HcclRootInfo *rootInfo, uint32_t rank, HcclComm *comm); + +/** + * @brief AllReduce operator. + * + * @param sendBuf A pointer identifying the input data address of the operator. + * @param recvBuf A pointer identifying the output data address of the operator. + * @param count An integer(u64) identifying the number of the output data. + * @param dataType The data type of the operator, must be one of the following types: int8, int16, int32, float16, + * float32. + * @param op The reduction type of the operator, must be one of the following types: sum, min, max, prod. + * @param comm A pointer identifying the communication resource based on. + * @param stream A pointer identifying the stream information. + * @return HcclResult + */ +extern HcclResult HcclAllReduce(void *sendBuf, void *recvBuf, uint64_t count, HcclDataType dataType, HcclReduceOp op, + HcclComm comm, aclrtStream stream); + +/** + * @brief Broadcast operator. + * + * @param buf A pointer identifying the data address of the operator. + * @param count An integer(u64) identifying the number of the data. + * @param dataType The data type of the operator, must be one of the following types: int8, int32, float16, float32. + * @param root An integer(u32) identifying the the root rank in the operator. + * @param comm A pointer identifying the communication resource based on + * @param stream A pointer identifying the stream information. + * @return HcclResult + */ +extern HcclResult HcclBroadcast(void *buf, uint64_t count, HcclDataType dataType, uint32_t root, HcclComm comm, + aclrtStream stream); + +/** + * @brief ReduceScatter operator. + * + * @param sendBuf A pointer identifying the input data address of the operator. + * @param recvBuf A pointer identifying the output data address of the operator. + * @param recvCount An integer(u64) identifying the number of the output data. + * @param dataType The data type of the operator, must be one of the following types: int8, int32, float16, float32. + * @param op The reduction type of the operator, must be one of the following types: sum, min, max, prod. + * @param comm A pointer identifying the communication resource based on. + * @param stream A pointer identifying the stream information. + * @return HcclResult + */ +extern HcclResult HcclReduceScatter(void *sendBuf, void *recvBuf, uint64_t recvCount, HcclDataType dataType, + HcclReduceOp op, HcclComm comm, aclrtStream stream); + +/** + * @brief AllGather operator. + * + * @param sendBuf A pointer identifying the input data address of the operator. + * @param recvBuf A pointer identifying the output data address of the operator. + * @param sendCount An integer(u64) identifying the number of the input data. + * @param dataType The data type of the operator, must be one of the following types: int8, int32, float16, float32. + * @param comm A pointer identifying the communication resource based on. + * @param stream A pointer identifying the stream information. + * @return HcclResult + */ +extern HcclResult HcclAllGather(void *sendBuf, void *recvBuf, uint64_t sendCount, HcclDataType dataType, HcclComm comm, + aclrtStream stream); +/** + * @brief Get the rank size of this comm. + * + * @param comm A pointer identifying the communication resource based on. + * @param rankSize A pointer identifying the rank size. + * @return HcclResult + */ +extern HcclResult HcclGetRankSize(HcclComm comm, uint32_t *rankSize); + +/** + * @brief Get the rank id of this comm. + * + * @param comm A pointer identifying the communication resource based on. + * @param rankSize A pointer identifying the rank id. + * @return HcclResult + */ +extern HcclResult HcclGetRankId(HcclComm comm, uint32_t *rank); +/** + * @brief Barrier operator. + * + * @param comm A pointer identifying the communication resource based on. + * @param stream A pointer identifying the stream information. + * @return HcclResult + */ +extern HcclResult HcclBarrier(HcclComm comm, aclrtStream stream); + +/** + * @brief Destroy HCCL comm + * + * @param comm A pointer identifying the communication resource targetting + * @return HcclResult + * @see HcclCommInitClusterInfo() + */ +extern HcclResult HcclCommDestroy(HcclComm comm); + +#ifdef __cplusplus +} +#endif // __cplusplus +#endif // HCCL_H_ diff --git a/inc/external/hccl/hccl_types.h b/inc/external/hccl/hccl_types.h new file mode 100644 index 00000000..0e832396 --- /dev/null +++ b/inc/external/hccl/hccl_types.h @@ -0,0 +1,101 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file hccl_types.h + * @brief HCCL data type definition + * + */ + +#ifndef HCCL_TYPES_H_ +#define HCCL_TYPES_H_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +/** + * @brief HCCL functions return value definition + */ +typedef enum { + HCCL_SUCCESS = 0, /**< success */ + HCCL_E_PARA = 1, /**< parameter error */ + HCCL_E_PTR = 2, /**< empty pointer */ + HCCL_E_MEMORY = 3, /**< memory error */ + HCCL_E_INTERNAL = 4, /**< internal error */ + HCCL_E_NOT_SUPPORT = 5, /**< not support feature */ + HCCL_E_NOT_FOUND = 6, /**< not found specific resource */ + HCCL_E_UNAVAIL = 7, /**< resource unavailable */ + HCCL_E_SYSCALL = 8, /**< call system interface error */ + HCCL_E_TIMEOUT = 9, /**< timeout */ + HCCL_E_OPEN_FILE_FAILURE = 10, /**< open file fail */ + HCCL_E_TCP_CONNECT = 11, /**< tcp connect fail */ + HCCL_E_ROCE_CONNECT = 12, /**< roce connect fail */ + HCCL_E_TCP_TRANSFER = 13, /**< tcp transfer fail */ + HCCL_E_ROCE_TRANSFER = 14, /**< roce transfer fail */ + HCCL_E_RUNTIME = 15, /**< call runtime api fail */ + HCCL_E_DRV = 16, /**< call driver api fail */ + HCCL_E_PROFILING = 17, /**< call profiling api fail */ + HCCL_E_CCE = 18, /**< call cce api fail */ + HCCL_E_NETWORK = 19, /**< call network api fail */ + HCCL_E_RESERVED /**< reserved */ +} HcclResult; + +/** + * @brief handle to HCCL communicator + */ +typedef void *HcclComm; + +/** + * @brief HCCL Reduction opperation + */ +typedef enum { + HCCL_REDUCE_SUM = 0, /**< sum */ + HCCL_REDUCE_PROD = 1, /**< prod */ + HCCL_REDUCE_MAX = 2, /**< max */ + HCCL_REDUCE_MIN = 3, /**< min */ + HCCL_REDUCE_RESERVED /**< reserved */ +} HcclReduceOp; + +/** + * @brief HCCL data type + */ +typedef enum { + HCCL_DATA_TYPE_INT8 = 0, /**< int8 */ + HCCL_DATA_TYPE_INT16 = 1, /**< int16 */ + HCCL_DATA_TYPE_INT32 = 2, /**< int32 */ + HCCL_DATA_TYPE_FP16 = 3, /**< fp16 */ + HCCL_DATA_TYPE_FP32 = 4, /**< fp32 */ + HCCL_DATA_TYPE_INT64 = 5, /**< int64 */ + HCCL_DATA_TYPE_UINT64 = 6, /**< uint64 */ + HCCL_DATA_TYPE_RESERVED /**< reserved */ +} HcclDataType; + +const uint32_t HCCL_ROOT_INFO_BYTES = 4108; // 4108: root info length + +/** + * @brief HCCL root info + */ +typedef struct HcclRootInfoDef { + char internal[HCCL_ROOT_INFO_BYTES]; +} HcclRootInfo; + +#ifdef __cplusplus +} +#endif // __cplusplus +#endif // HCCL_TYPES_H_ diff --git a/inc/external/runtime/rt_error_codes.h b/inc/external/runtime/rt_error_codes.h new file mode 100644 index 00000000..a1392cc6 --- /dev/null +++ b/inc/external/runtime/rt_error_codes.h @@ -0,0 +1,109 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __INC_EXTERNEL_RT_ERROR_CODES_H__ +#define __INC_EXTERNEL_RT_ERROR_CODES_H__ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +static const int32_t ACL_RT_SUCCESS = 0; // success + +static const int32_t ACL_ERROR_RT_PARAM_INVALID = 107000; // param invalid +static const int32_t ACL_ERROR_RT_INVALID_DEVICEID = 107001; // invalid device id +static const int32_t ACL_ERROR_RT_CONTEXT_NULL = 107002; // current context null +static const int32_t ACL_ERROR_RT_STREAM_CONTEXT = 107003; // stream not in current context +static const int32_t ACL_ERROR_RT_MODEL_CONTEXT = 107004; // model not in current context +static const int32_t ACL_ERROR_RT_STREAM_MODEL = 107005; // stream not in model +static const int32_t ACL_ERROR_RT_EVENT_TIMESTAMP_INVALID = 107006; // event timestamp invalid +static const int32_t ACL_ERROR_RT_EVENT_TIMESTAMP_REVERSAL = 107007; // event timestamp reversal +static const int32_t ACL_ERROR_RT_ADDR_UNALIGNED = 107008; // memory address unaligned +static const int32_t ACL_ERROR_RT_FILE_OPEN = 107009; // open file failed +static const int32_t ACL_ERROR_RT_FILE_WRITE = 107010; // write file failed +static const int32_t ACL_ERROR_RT_STREAM_SUBSCRIBE = 107011; // error subscribe stream +static const int32_t ACL_ERROR_RT_THREAD_SUBSCRIBE = 107012; // error subscribe thread +static const int32_t ACL_ERROR_RT_GROUP_NOT_SET = 107013; // group not set +static const int32_t ACL_ERROR_RT_GROUP_NOT_CREATE = 107014; // group not create +static const int32_t ACL_ERROR_RT_STREAM_NO_CB_REG = 107015; // callback not register to stream +static const int32_t ACL_ERROR_RT_INVALID_MEMORY_TYPE = 107016; // invalid memory type +static const int32_t ACL_ERROR_RT_INVALID_HANDLE = 107017; // invalid handle +static const int32_t ACL_ERROR_RT_INVALID_MALLOC_TYPE = 107018; // invalid malloc type +static const int32_t ACL_ERROR_RT_WAIT_TIMEOUT = 107019; // wait timeout + +static const int32_t ACL_ERROR_RT_FEATURE_NOT_SUPPORT = 207000; // feature not support +static const int32_t ACL_ERROR_RT_MEMORY_ALLOCATION = 207001; // memory allocation error +static const int32_t ACL_ERROR_RT_MEMORY_FREE = 207002; // memory free error +static const int32_t ACL_ERROR_RT_AICORE_OVER_FLOW = 207003; // aicore over flow +static const int32_t ACL_ERROR_RT_NO_DEVICE = 207004; // no device +static const int32_t ACL_ERROR_RT_RESOURCE_ALLOC_FAIL = 207005; // resource alloc fail +static const int32_t ACL_ERROR_RT_NO_PERMISSION = 207006; // no permission +static const int32_t ACL_ERROR_RT_NO_EVENT_RESOURCE = 207007; // no event resource +static const int32_t ACL_ERROR_RT_NO_STREAM_RESOURCE = 207008; // no stream resource +static const int32_t ACL_ERROR_RT_NO_NOTIFY_RESOURCE = 207009; // no notify resource +static const int32_t ACL_ERROR_RT_NO_MODEL_RESOURCE = 207010; // no model resource +static const int32_t ACL_ERROR_RT_NO_CDQ_RESOURCE = 207011; // no cdq resource + +static const int32_t ACL_ERROR_RT_INTERNAL_ERROR = 507000; // runtime internal error +static const int32_t ACL_ERROR_RT_TS_ERROR = 507001; // ts internel error +static const int32_t ACL_ERROR_RT_STREAM_TASK_FULL = 507002; // task full in stream +static const int32_t ACL_ERROR_RT_STREAM_TASK_EMPTY = 507003; // task empty in stream +static const int32_t ACL_ERROR_RT_STREAM_NOT_COMPLETE = 507004; // stream not complete +static const int32_t ACL_ERROR_RT_END_OF_SEQUENCE = 507005; // end of sequence +static const int32_t ACL_ERROR_RT_EVENT_NOT_COMPLETE = 507006; // event not complete +static const int32_t ACL_ERROR_RT_CONTEXT_RELEASE_ERROR = 507007; // context release error +static const int32_t ACL_ERROR_RT_SOC_VERSION = 507008; // soc version error +static const int32_t ACL_ERROR_RT_TASK_TYPE_NOT_SUPPORT = 507009; // task type not support +static const int32_t ACL_ERROR_RT_LOST_HEARTBEAT = 507010; // ts lost heartbeat +static const int32_t ACL_ERROR_RT_MODEL_EXECUTE = 507011; // model execute failed +static const int32_t ACL_ERROR_RT_REPORT_TIMEOUT = 507012; // report timeout +static const int32_t ACL_ERROR_RT_SYS_DMA = 507013; // sys dma error +static const int32_t ACL_ERROR_RT_AICORE_TIMEOUT = 507014; // aicore timeout +static const int32_t ACL_ERROR_RT_AICORE_EXCEPTION = 507015; // aicore exception +static const int32_t ACL_ERROR_RT_AICORE_TRAP_EXCEPTION = 507016; // aicore trap exception +static const int32_t ACL_ERROR_RT_AICPU_TIMEOUT = 507017; // aicpu timeout +static const int32_t ACL_ERROR_RT_AICPU_EXCEPTION = 507018; // aicpu exception +static const int32_t ACL_ERROR_RT_AICPU_DATADUMP_RSP_ERR = 507019; // aicpu datadump response error +static const int32_t ACL_ERROR_RT_AICPU_MODEL_RSP_ERR = 507020; // aicpu model operate response error +static const int32_t ACL_ERROR_RT_PROFILING_ERROR = 507021; // profiling error +static const int32_t ACL_ERROR_RT_IPC_ERROR = 507022; // ipc error +static const int32_t ACL_ERROR_RT_MODEL_ABORT_NORMAL = 507023; // model abort normal +static const int32_t ACL_ERROR_RT_KERNEL_UNREGISTERING = 507024; // kernel unregistering +static const int32_t ACL_ERROR_RT_RINGBUFFER_NOT_INIT = 507025; // ringbuffer not init +static const int32_t ACL_ERROR_RT_RINGBUFFER_NO_DATA = 507026; // ringbuffer no data +static const int32_t ACL_ERROR_RT_KERNEL_LOOKUP = 507027; // kernel lookup error +static const int32_t ACL_ERROR_RT_KERNEL_DUPLICATE = 507028; // kernel register duplicate +static const int32_t ACL_ERROR_RT_DEBUG_REGISTER_FAIL = 507029; // debug register failed +static const int32_t ACL_ERROR_RT_DEBUG_UNREGISTER_FAIL = 507030; // debug unregister failed +static const int32_t ACL_ERROR_RT_LABEL_CONTEXT = 507031; // label not in current context +static const int32_t ACL_ERROR_RT_PROGRAM_USE_OUT = 507032; // program register num use out +static const int32_t ACL_ERROR_RT_DEV_SETUP_ERROR = 507033; // device setup error +static const int32_t ACL_ERROR_RT_VECTOR_CORE_TIMEOUT = 507034; // vector core timeout +static const int32_t ACL_ERROR_RT_VECTOR_CORE_EXCEPTION = 507035; // vector core exception +static const int32_t ACL_ERROR_RT_VECTOR_CORE_TRAP_EXCEPTION = 507036; // vector core trap exception +static const int32_t ACL_ERROR_RT_CDQ_BATCH_ABNORMAL = 507037; // cdq alloc batch abnormal + +static const int32_t ACL_ERROR_RT_DRV_INTERNAL_ERROR = 507899; // drv internal error +static const int32_t ACL_ERROR_RT_AICPU_INTERNAL_ERROR = 507900; // aicpu internal error +static const int32_t ACL_ERROR_RT_SOCKET_CLOSE = 507901; // hdc disconnect + +#ifdef __cplusplus +} +#endif + +#endif // __INC_EXTERNEL_RT_ERROR_CODES_H__ diff --git a/inc/framework/ge_runtime/task_info.h b/inc/framework/ge_runtime/task_info.h index f59c6454..4530bff7 100644 --- a/inc/framework/ge_runtime/task_info.h +++ b/inc/framework/ge_runtime/task_info.h @@ -271,13 +271,14 @@ class FusionEndTaskInfo : public TaskInfo { class HcclTaskInfo : public TaskInfo { public: HcclTaskInfo(const std::string &op_name, uint32_t stream_id, const std::string hccl_type, void *input_data_addr, - void *output_data_addr, int64_t workspace_size, int64_t hccl_stream_num, + void *output_data_addr, void *workspace_addr, int64_t workspace_size, int64_t hccl_stream_num, const std::vector &private_def, void *ops_kernel_store, int32_t count, int64_t root_id, int64_t op_type, int64_t data_type, const std::string &group, bool dump_flag) : TaskInfo(op_name, stream_id, TaskInfoType::HCCL, dump_flag), hccl_type_(hccl_type), input_data_addr_(input_data_addr), output_data_addr_(output_data_addr), + workspace_addr_(workspace_addr), workspace_size_(workspace_size), hccl_stream_num_(hccl_stream_num), private_def_(private_def), @@ -292,6 +293,7 @@ class HcclTaskInfo : public TaskInfo { const std::string &hccl_type() const { return hccl_type_; } void *input_data_addr() const { return input_data_addr_; } void *output_data_addr() const { return output_data_addr_; } + void *workspace_addr() const { return workspace_addr_; } int64_t workspace_size() const { return workspace_size_; } int64_t hccl_stream_num() const { return hccl_stream_num_; } const std::vector &private_def() const { return private_def_; } @@ -306,6 +308,7 @@ class HcclTaskInfo : public TaskInfo { std::string hccl_type_; void *input_data_addr_; void *output_data_addr_; + void *workspace_addr_; int64_t workspace_size_; int64_t hccl_stream_num_; std::vector private_def_; diff --git a/metadef b/metadef index a725349b..21178899 160000 --- a/metadef +++ b/metadef @@ -1 +1 @@ -Subproject commit a725349b65aef2940555af2ddb7b9461fbe0d5fd +Subproject commit 211788997dcc9aa63527541a44d511388c06bce5 diff --git a/scripts/format_source_code.sh b/scripts/format_source_code.sh new file mode 100755 index 00000000..1fd0b4f6 --- /dev/null +++ b/scripts/format_source_code.sh @@ -0,0 +1,107 @@ +#!/bin/bash +# Copyright 2019-2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +set -e + +CLANG_FORMAT=$(which clang-format) || (echo "Please install 'clang-format' tool first"; exit 1) + +version=$("${CLANG_FORMAT}" --version | sed -n "s/.*\ \([0-9]*\)\.[0-9]*\.[0-9]*.*/\1/p") +if [[ "${version}" -lt "8" ]]; then + echo "clang-format's version must be at least 8.0.0" + exit 1 +fi + +CURRENT_PATH=$(pwd) +SCRIPTS_PATH=$(dirname "$0") + +echo "CURRENT_PATH=${CURRENT_PATH}" +echo "SCRIPTS_PATH=${SCRIPTS_PATH}" + +# print usage message +function usage() +{ + echo "Format the specified source files to conform the code style." + echo "Usage:" + echo "bash $0 [-a] [-c] [-l] [-h]" + echo "e.g. $0 -c" + echo "" + echo "Options:" + echo " -a format of all files" + echo " -c format of the files changed compared to last commit, default case" + echo " -l format of the files changed in last commit" + echo " -h Print usage" +} + +# check and set options +function checkopts() +{ + # init variable + mode="changed" # default format changed files + + # Process the options + while getopts 'aclh' opt + do + case "${opt}" in + a) + mode="all" + ;; + c) + mode="changed" + ;; + l) + mode="lastcommit" + ;; + h) + usage + exit 0 + ;; + *) + echo "Unknown option ${opt}!" + usage + exit 1 + esac + done +} + +# init variable +# check options +checkopts "$@" + +# switch to project root path, which contains clang-format config file '.clang-format' +cd "${SCRIPTS_PATH}/.." || exit 1 + +FMT_FILE_LIST='__format_files_list__' + +if [[ "X${mode}" == "Xall" ]]; then + find src -type f -name "*" | grep "\.h$\|\.cc$" > "${FMT_FILE_LIST}" || true + find inc -type f -name "*" | grep "\.h$\|\.cc$" >> "${FMT_FILE_LIST}" || true +elif [[ "X${mode}" == "Xchanged" ]]; then + # --diff-filter=ACMRTUXB will ignore deleted files in commit + git diff --diff-filter=ACMRTUXB --name-only | grep "^inc\|^src" | grep "\.h$\|\.cc$" >> "${FMT_FILE_LIST}" || true +else # "X${mode}" == "Xlastcommit" + git diff --diff-filter=ACMRTUXB --name-only HEAD~ HEAD | grep "^inc\|^src" | grep "\.h$\|\.cc$" > "${FMT_FILE_LIST}" || true +fi + +while read line; do + if [ -f "${line}" ]; then + ${CLANG_FORMAT} -i "${line}" + fi +done < "${FMT_FILE_LIST}" + +rm "${FMT_FILE_LIST}" +cd "${CURRENT_PATH}" || exit 1 + +echo "Specified cpp source files have been format successfully." diff --git a/third_party/fwkacllib/inc/cce/taskdown_common.hpp b/third_party/fwkacllib/inc/cce/taskdown_common.hpp index 3ecea523..7954162e 100644 --- a/third_party/fwkacllib/inc/cce/taskdown_common.hpp +++ b/third_party/fwkacllib/inc/cce/taskdown_common.hpp @@ -27,15 +27,16 @@ namespace cce { #define CC_FUSION_OP_MAX 32 typedef enum tagccKernelType { - CCE_AI_CORE = 0, /* cce aicore */ - CCE_AI_CPU = 1, /* cce aicpu */ - TE = 2, /* te operator*/ - CUSTOMIZED = 3, /* customized operator */ - TE_AI_CORE = 4, /* te aicore operator*/ - TE_AI_CPU = 5, /* te aicpu operator */ - AI_CPU = 6, /* aicpu */ - CUST_AI_CPU = 7, /* custom aicpu*/ - INVALID = 8, /* unknown kernel type */ + CCE_AI_CORE = 0, /* cce aicore */ + CCE_AI_CPU = 1, /* cce aicpu */ + TE = 2, /* te operator*/ + CUSTOMIZED = 3, /* customized operator */ + TE_AI_CORE = 4, /* te aicore operator*/ + TE_AI_CPU = 5, /* te aicpu operator */ + AI_CPU = 6, /* aicpu */ + CUST_AI_CPU = 7, /* custom aicpu*/ + HOST_CPU = 8, /* host cpu */ + INVALID = 10000 /* unknown kernel type */ } ccKernelType; typedef struct tagOpContext { diff --git a/third_party/fwkacllib/inc/external/runtime/rt_error_codes.h b/third_party/fwkacllib/inc/external/runtime/rt_error_codes.h old mode 100755 new mode 100644 diff --git a/third_party/fwkacllib/inc/hccl/base.h b/third_party/fwkacllib/inc/hccl/base.h index e57563b3..ffbf552b 100644 --- a/third_party/fwkacllib/inc/hccl/base.h +++ b/third_party/fwkacllib/inc/hccl/base.h @@ -124,27 +124,27 @@ struct HcomRemoteAccessAddrInfo { }; struct HcomAllToAllVParams { - void *sendbuf; - void *sendcounts; - void *sdispls; - HcclDataType sendtype; - void *recvbuf; - void *recvcounts; - void *rdispls; - HcclDataType recvtype; - const char *group; + void *sendbuf; // device mem + void *sendcounts; // device mem; Type: uint_64 + void *sdispls; // device mem; Type: uint_64 + HcclDataType sendtype; + void *recvbuf; // device mem + void *recvcounts; // device mem; Type: uint_64 + void *rdispls; // device mem; Type: uint_64 + HcclDataType recvtype; + const char *group; // not used now }; struct HcomGatherAllToAllVParams { - void *addrInfo; - void *addrInfoCountPerRank; - void *recvbuf; - void *recvcounts; - void *rdispls; - void *gatheredbuf; - s32 addrLength; - HcclDataType recvtype; - const char *group; + void *addrInfo; // device mem; contains host VA[uint_64]: [addr, length, addr, length, addr, length, ...] + void *addrInfoCountPerRank; // device mem; length: ranksize; contains addrInfoCounts for every rank + void *recvbuf; // device mem + void *recvcounts; // device mem; Type: uint_64 + void *rdispls; // device mem; Type: uint_64 + void *gatheredbuf; // device mem + s32 addrLength; + HcclDataType recvtype; + const char *group; // not used now }; #ifdef __cplusplus diff --git a/third_party/fwkacllib/inc/hccl/hccl_types.h b/third_party/fwkacllib/inc/hccl/hccl_types.h deleted file mode 100644 index 50a64795..00000000 --- a/third_party/fwkacllib/inc/hccl/hccl_types.h +++ /dev/null @@ -1,101 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @file hccl_types.h - * @brief HCCL data type definition - * - */ - -#ifndef HCCL_TYPES_H_ -#define HCCL_TYPES_H_ - -#include - -#ifdef __cplusplus -extern "C" { -#endif // __cplusplus - -/** - * @brief HCCL functions return value definition - */ -typedef enum { - HCCL_SUCCESS = 0, /**< success */ - HCCL_E_PARA = 1, /**< parameter error */ - HCCL_E_PTR = 2, /**< empty pointer */ - HCCL_E_MEMORY = 3, /**< memory error */ - HCCL_E_INTERNAL = 4, /**< internal error */ - HCCL_E_NOT_SUPPORT = 5, /**< not support feature */ - HCCL_E_NOT_FOUND = 6, /**< not found specific resource */ - HCCL_E_UNAVAIL = 7, /**< resource unavailable */ - HCCL_E_SYSCALL = 8, /**< call system interface error */ - HCCL_E_TIMEOUT = 9, /**< timeout */ - HCCL_E_OPEN_FILE_FAILURE = 10, /**< open file fail */ - HCCL_E_TCP_CONNECT = 11, /**< tcp connect fail */ - HCCL_E_ROCE_CONNECT = 12, /**< roce connect fail */ - HCCL_E_TCP_TRANSFER = 13, /**< tcp transfer fail */ - HCCL_E_ROCE_TRANSFER = 14, /**< roce transfer fail */ - HCCL_E_RUNTIME = 15, /**< call runtime api fail */ - HCCL_E_DRV = 16, /**< call driver api fail */ - HCCL_E_PROFILING = 17, /**< call profiling api fail */ - HCCL_E_CCE = 18, /**< call cce api fail */ - HCCL_E_NETWORK = 19, /**< call network api fail */ - HCCL_E_RESERVED /**< reserved */ -} HcclResult; - -/** - * @brief handle to HCCL communicator - */ -typedef void *HcclComm; - -/** - * @brief HCCL Reduction opperation - */ -typedef enum { - HCCL_REDUCE_SUM = 0, /**< sum */ - HCCL_REDUCE_PROD = 1, /**< prod */ - HCCL_REDUCE_MAX = 2, /**< max */ - HCCL_REDUCE_MIN = 3, /**< min */ - HCCL_REDUCE_RESERVED /**< reserved */ -} HcclReduceOp; - -/** - * @brief HCCL data type - */ -typedef enum { - HCCL_DATA_TYPE_INT8 = 0, /**< int8 */ - HCCL_DATA_TYPE_INT16 = 1, /**< int16 */ - HCCL_DATA_TYPE_INT32 = 2, /**< int32 */ - HCCL_DATA_TYPE_FP16 = 3, /**< fp16 */ - HCCL_DATA_TYPE_FP32 = 4, /**< fp32 */ - HCCL_DATA_TYPE_INT64 = 5, /**< int64 */ - HCCL_DATA_TYPE_UINT64 = 6, /**< uint64 */ - HCCL_DATA_TYPE_RESERVED /**< reserved */ -} HcclDataType; - -const uint32_t HCCL_ROOT_INFO_BYTES = 4108; // 4108: root info length - -/** - * @brief HCCL root info - */ -typedef struct HcclRootInfoDef { - char internal[HCCL_ROOT_INFO_BYTES]; -} HcclRootInfo; - -#ifdef __cplusplus -} -#endif // __cplusplus -#endif // HCCL_TYPES_H_ diff --git a/third_party/fwkacllib/inc/hccl/hcom.h b/third_party/fwkacllib/inc/hccl/hcom.h index 955764d6..bf1f395b 100644 --- a/third_party/fwkacllib/inc/hccl/hcom.h +++ b/third_party/fwkacllib/inc/hccl/hcom.h @@ -164,8 +164,22 @@ HcclResult HcomExecEnqueueRemoteAccess(const std::string& remoteAccessType, const std::vector& addrInfos, std::function callback); +/** + * @brief Put alltoallv communication operation into hcom executor. + * + * @param params information about alltoallv communication operation. + * @param callback callback after collective communication operation. + * @return HcclResult + */ HcclResult HcomExecEnqueueAllToAllV(HcomAllToAllVParams params, std::function callback); +/** + * @brief Put agther alltoallv communication operation into hcom executor. + * + * @param params information about agther alltoallv communication operation. + * @param callback callback after collective communication operation. + * @return HcclResult + */ HcclResult HcomExecEnqueueGatherAllToAllV(HcomGatherAllToAllVParams params, std::function callback); diff --git a/third_party/fwkacllib/inc/mmpa/mmpa_api.h b/third_party/fwkacllib/inc/mmpa/mmpa_api.h index 38a689ee..f8d5ccf3 100644 --- a/third_party/fwkacllib/inc/mmpa/mmpa_api.h +++ b/third_party/fwkacllib/inc/mmpa/mmpa_api.h @@ -56,6 +56,7 @@ #include #include #include +#include #include #include diff --git a/third_party/fwkacllib/inc/mmpa/sub_inc/mmpa_linux.h b/third_party/fwkacllib/inc/mmpa/sub_inc/mmpa_linux.h index 993f36ba..3d196e41 100644 --- a/third_party/fwkacllib/inc/mmpa/sub_inc/mmpa_linux.h +++ b/third_party/fwkacllib/inc/mmpa/sub_inc/mmpa_linux.h @@ -550,6 +550,10 @@ MMPA_FUNC_VISIBILITY mmFileHandle mmShmOpen(const CHAR *name, INT32 oflag, mmMod MMPA_FUNC_VISIBILITY INT32 mmShmUnlink(const CHAR *name); MMPA_FUNC_VISIBILITY VOID *mmMmap(mmFd_t fd, mmSize_t size, mmOfft_t offset, mmFd_t *extra, INT32 prot, INT32 flags); MMPA_FUNC_VISIBILITY INT32 mmMunMap(VOID *data, mmSize_t size, mmFd_t *extra); + +MMPA_FUNC_VISIBILITY mmSize mmGetPageSize(); +MMPA_FUNC_VISIBILITY VOID *mmAlignMalloc(mmSize mallocSize, mmSize alignSize); +MMPA_FUNC_VISIBILITY VOID mmAlignFree(VOID *addr); #define MMPA_DLL_API #ifdef __cplusplus diff --git a/third_party/fwkacllib/inc/mmpa/sub_inc/mmpa_win.h b/third_party/fwkacllib/inc/mmpa/sub_inc/mmpa_win.h index 49e97a5d..e6b6f71e 100644 --- a/third_party/fwkacllib/inc/mmpa/sub_inc/mmpa_win.h +++ b/third_party/fwkacllib/inc/mmpa/sub_inc/mmpa_win.h @@ -557,6 +557,10 @@ MMPA_FUNC_VISIBILITY mmFileHandle mmShmOpen(const CHAR *name, INT32 oflag, mmMod MMPA_FUNC_VISIBILITY INT32 mmShmUnlink(const CHAR *name); MMPA_FUNC_VISIBILITY VOID *mmMmap(mmFd_t fd, mmSize_t size, mmOfft_t offset, mmFd_t *extra, INT32 prot, INT32 flags); MMPA_FUNC_VISIBILITY INT32 mmMunMap(VOID *data, mmSize_t size, mmFd_t *extra); + +MMPA_FUNC_VISIBILITY mmSize mmGetPageSize(); +MMPA_FUNC_VISIBILITY VOID *mmAlignMalloc(mmSize mallocSize, mmSize alignSize); +MMPA_FUNC_VISIBILITY VOID mmAlignFree(VOID *addr); #ifdef __cplusplus #if __cplusplus } diff --git a/third_party/fwkacllib/inc/ops/aipp.h b/third_party/fwkacllib/inc/ops/aipp.h index bed984bd..86805f72 100644 --- a/third_party/fwkacllib/inc/ops/aipp.h +++ b/third_party/fwkacllib/inc/ops/aipp.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -65,6 +65,8 @@ in aipp config file, framework will auto add one input node to graph at last. \n *@par Third-party framework compatibility *Compatible with the TensorFlow operator AippData. +*@par Restrictions: +*Warning: This operator can be integrated only by configuring INSERT_OP_FILE of aclgrphBuildModel. Please do not use it directly. */ REG_OP(AippData) .INPUT(data, TensorType::ALL()) diff --git a/third_party/fwkacllib/inc/ops/all_ops.h b/third_party/fwkacllib/inc/ops/all_ops.h index 1ac83783..cc11f5f9 100644 --- a/third_party/fwkacllib/inc/ops/all_ops.h +++ b/third_party/fwkacllib/inc/ops/all_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -39,6 +39,7 @@ #include "image_ops.h" #include "internal_ops.h" #include "linalg_ops.h" +#include "list_ops.h" #include "logging_ops.h" #include "lookup_ops.h" #include "math_ops.h" diff --git a/third_party/fwkacllib/inc/ops/array_ops.h b/third_party/fwkacllib/inc/ops/array_ops.h index e1f64421..fd35b546 100644 --- a/third_party/fwkacllib/inc/ops/array_ops.h +++ b/third_party/fwkacllib/inc/ops/array_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -626,7 +626,7 @@ REG_OP(StopGradient) *x: A tensor. \n *@par Outputs: -*y: A tensor. \n +*y: A tensor with the same shape and contents as input. \n *@par Third-party framework compatibility *Compatible with the TensorFlow operator Identity. @@ -666,7 +666,7 @@ REG_OP(IdentityN) *@li axis: The dimension index at which to expand. \n *@par Outputs: -*y: A tensor. \n +*y: A tensor with the same data as input, with an additional dimension inserted at the index specified by axis. \n *@par Third-party framework compatibility *Compatible with the TensorFlow operator ExpandDims. @@ -713,7 +713,7 @@ REG_OP(Unsqueeze) *@par Outputs: *y: A tensor. \n -*@par Attention: +*@attention Constraints: *This operator cannot be directly called by the acllopExecute API. \n *@par Third-party framework compatibility @@ -1153,6 +1153,102 @@ REG_OP(EditDistance) .OUTPUT(output, TensorType({DT_FLOAT})) .OP_END_FACTORY_REG(EditDistance) +/** +* @brief sort_v2. + +* @par Inputs: +* @li x: An ND tensor of type float16. + +* @par Attributes: + +* @li axis: An optional int. The dimension to sort along. This value defaults to -1. +* @li descending: An optional bool. Controls the sorting order (ascending or descending). This value defaults to False. + +* @par Outputs: +* @li y: An ND tensor of type float16. + +* @attention Constraints: +* @li Axis should select the last dim. +* @li When the sorting data is less than 150K, it is recommended to use this tbe ops, + and the descending performance is better than the ascending. +* @li The upper limit of data on Ascend910 is 2000K. +*/ +REG_OP(SortV2) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .ATTR(axis, Int, -1) + .ATTR(descending, Bool, false) + .OP_END_FACTORY_REG(SortV2) + +/** +* @brief Expand the input tensor to a compatible shape. \n + +* @par Inputs: +* One inputs, including: +* @li x: A Tensor. Must be one of the following types: +* float16, float32, int32, int8 ,uint8. \n +* @li shape: A Tensor to specify the shape that the input tensor expanded to. \n + +* @par Outputs: +* @li y: A Tensor. Has the same type as "x", and the shape specified by input and attr shape \n + +* @par Third-party framework compatibility +* Compatible with the ONNX operator Expand. +*/ + +REG_OP(Expand) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8})) + .INPUT(shape, TensorType({DT_INT16, DT_INT32, DT_INT64})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8})) + .OP_END_FACTORY_REG(Expand) + +/** +*@Returns a tensor containing the indices of all non-zero elements of input. \n + +*@par Inputs: +*@li x: A Tensor. Must be one of the following types: float16, float32, int32, int64. + +*@par Attributes: +* transpose: the output tensor will be transposed if true. \n + +*@par Outputs: +* y: A Tensor. Has the same type as "x" . \n + +*@par Third-party framework compatibility +*Compatible with the PyTorch operator NonZero. +*/ + +REG_OP(NonZero) + .INPUT(x, TensorType({DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT8, DT_UINT8, DT_INT16, \ + DT_UINT16, DT_INT32, DT_UINT32, DT_INT64, DT_UINT64, DT_BOOL})) + .OUTPUT(y, TensorType({DT_INT64})) + .ATTR(transpose, Bool, false) + .OP_END_FACTORY_REG(NonZero) + +/** +* @brief Expand the input tensor to a compatible shape. \n + +* @par Inputs: +* One inputs, including: +* @li x: A Tensor. Must be one of the following types: +* float16, float32, int32, int8 ,uint8. \n + +* @par Attributes: +* @li shape: A required listInt to specify the shape that the input tensor expanded to. \n + + +* @par Outputs: +* @li y: A Tensor. Has the same type as "x", and the shape specified by input and attr shape \n + +* @par Third-party framework compatibility +* Compatible with the ONNX operator Expand. +*/ + +REG_OP(ExpandD) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8})) + .REQUIRED_ATTR(shape, ListInt) + .OP_END_FACTORY_REG(ExpandD) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_ARRAY_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/audio_ops.h b/third_party/fwkacllib/inc/ops/audio_ops.h index d9883253..f05135d1 100644 --- a/third_party/fwkacllib/inc/ops/audio_ops.h +++ b/third_party/fwkacllib/inc/ops/audio_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/avg_pool_1d_ops.h b/third_party/fwkacllib/inc/ops/avg_pool_1d_ops.h new file mode 100644 index 00000000..d0800a08 --- /dev/null +++ b/third_party/fwkacllib/inc/ops/avg_pool_1d_ops.h @@ -0,0 +1,58 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! + * \file avg_pool_1d_ops.h + * \brief + */ +#ifndef OPS_BUILT_IN_OP_PROTO_INC_AVGPOOL1DOPS_H_ +#define OPS_BUILT_IN_OP_PROTO_INC_AVGPOOL1DOPS_H_ +#include "graph/operator_reg.h" + +namespace ge { +/** +*@brief Generate an auxiliary matrix . \n + +*@par Inputs: +* @li x: A tensor. Must be one of the following types:uint8, int8,int16, int32, + int64, float16, float, double.The format must be NHWC NCHW NC1HWC0. + +*@par Attributes: +*@li ksize: Kernel size. Input type is int. +*@li strides: Input type is int. +*@li pads: Input type is listInt . +*@li ceil_mode: Bool, default value is false. +*@li count_include_pad: Bool, default value is false. \n + +*@par Outputs: +*y_tensor: A tensor with the same types as "x" . \n +*@par Third-party framework compatibility + +*Compatible with the TensorFlow operator Unbatch. +*/ +REG_OP(AvgPool1DAvgMatrix) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT8, + DT_INT32, DT_INT64, DT_DOUBLE})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT8, + DT_INT32, DT_INT64, DT_DOUBLE})) + .REQUIRED_ATTR(ksize, Int) + .REQUIRED_ATTR(strides, Int) + .REQUIRED_ATTR(pads, ListInt) + .ATTR(ceil_mode, Bool, false) + .ATTR(count_include_pad, Bool, false) + .OP_END_FACTORY_REG(AvgPool1DAvgMatrix) +} +#endif \ No newline at end of file diff --git a/third_party/fwkacllib/inc/ops/batch_ops.h b/third_party/fwkacllib/inc/ops/batch_ops.h index 8a1c5a7b..ca4fe1db 100644 --- a/third_party/fwkacllib/inc/ops/batch_ops.h +++ b/third_party/fwkacllib/inc/ops/batch_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -64,10 +64,10 @@ the same types as "x_tensors" . It's a dynamic output. \n REG_OP(Batch) .DYNAMIC_INPUT(x_tensors, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, \ DT_INT16, DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE})) - .OUTPUT(y_index, TensorType({ DT_INT64 })) - .OUTPUT(y_id, TensorType({ DT_INT64 })) .DYNAMIC_OUTPUT(y_tensors, TensorType({DT_INT8, DT_UINT8, DT_INT16, \ DT_UINT16, DT_INT32, DT_INT64, DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_BOOL})) + .OUTPUT(y_index, TensorType({ DT_INT64 })) + .OUTPUT(y_id, TensorType({ DT_INT64 })) .REQUIRED_ATTR(num_batch_threads, Int) .REQUIRED_ATTR(max_batch_size, Int) .ATTR(max_enqueued_batches, Int, 10) @@ -107,11 +107,13 @@ across multiple sessions . \n REG_OP(Unbatch) .INPUT(x_tensor, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \ - DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT, DT_DOUBLE})) + DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT, DT_DOUBLE, DT_FLOAT16, \ + DT_COMPLEX64, DT_COMPLEX128})) .INPUT(index, TensorType({DT_INT64})) .INPUT(id, TensorType({DT_INT64})) .OUTPUT(y_tensor, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \ - DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT, DT_DOUBLE})) + DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT, DT_DOUBLE, DT_FLOAT16, \ + DT_COMPLEX64, DT_COMPLEX128})) .REQUIRED_ATTR(timeout_micros, Int) .ATTR(container, String, "") .ATTR(shared_name, String, "") @@ -146,13 +148,16 @@ across multiple sessions . \n REG_OP(UnbatchGrad) .INPUT(x_input, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \ - DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT, DT_DOUBLE})) + DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT, DT_DOUBLE, DT_FLOAT16, \ + DT_COMPLEX64, DT_COMPLEX128})) .INPUT(index, TensorType({DT_INT64})) .INPUT(grad, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \ - DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT, DT_DOUBLE})) + DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT, DT_DOUBLE, DT_FLOAT16, \ + DT_COMPLEX64, DT_COMPLEX128})) .INPUT(id, TensorType({DT_INT64})) .OUTPUT(y_grad, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \ - DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT, DT_DOUBLE})) + DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT, DT_DOUBLE, DT_FLOAT16, \ + DT_COMPLEX64, DT_COMPLEX128})) .ATTR(container, String, "") .ATTR(shared_name, String, "") .OP_END_FACTORY_REG(UnbatchGrad) diff --git a/third_party/fwkacllib/inc/ops/bitwise_ops.h b/third_party/fwkacllib/inc/ops/bitwise_ops.h index 5c83e161..dac78118 100644 --- a/third_party/fwkacllib/inc/ops/bitwise_ops.h +++ b/third_party/fwkacllib/inc/ops/bitwise_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -26,6 +26,35 @@ namespace ge { /** +*@brief Element-wise computes the bitwise left-shift of x and y . \n + +*@par Inputs: +*Input "x" is a k-dimensional tensor. Inputs "num_lower" and "num_upper" +are 0D scalars. +* @li x: A Tensor. Must be one of the following types: int8, int16, int32, +int64, uint8, uint16, uint32, uint64. +* @li y: A Tensor. Has the same type as "x". \n + +*@par Outputs: +* z: A Tensor. Has the same type as "x". \n + +*@attention Constraints: +*Unique runs on the Ascend AI CPU, which delivers poor performance. \n + +*@par Third-party framework compatibility +*Compatible with the TensorFlow operator LeftShift. +*/ + +REG_OP(LeftShift) + .INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, \ + DT_UINT8, DT_UINT16, DT_UINT32, DT_UINT64})) + .INPUT(y, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, \ + DT_UINT8, DT_UINT16, DT_UINT32, DT_UINT64})) + .OUTPUT(z, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, \ + DT_UINT8, DT_UINT16, DT_UINT32, DT_UINT64})) + .OP_END_FACTORY_REG(LeftShift) + +/** *@brief Element-wise computes the bitwise right-shift of x and y . \n *@par Inputs: diff --git a/third_party/fwkacllib/inc/ops/boosted_trees_ops.h b/third_party/fwkacllib/inc/ops/boosted_trees_ops.h index 550e8b7d..08e54824 100644 --- a/third_party/fwkacllib/inc/ops/boosted_trees_ops.h +++ b/third_party/fwkacllib/inc/ops/boosted_trees_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/candidate_sampling_ops.h b/third_party/fwkacllib/inc/ops/candidate_sampling_ops.h index e20607bf..890c52ae 100644 --- a/third_party/fwkacllib/inc/ops/candidate_sampling_ops.h +++ b/third_party/fwkacllib/inc/ops/candidate_sampling_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/condtake_ops.h b/third_party/fwkacllib/inc/ops/condtake_ops.h index 5e91eb07..029cffbf 100644 --- a/third_party/fwkacllib/inc/ops/condtake_ops.h +++ b/third_party/fwkacllib/inc/ops/condtake_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/control_flow_ops.h b/third_party/fwkacllib/inc/ops/control_flow_ops.h index 7196b14f..e5bd3534 100644 --- a/third_party/fwkacllib/inc/ops/control_flow_ops.h +++ b/third_party/fwkacllib/inc/ops/control_flow_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -96,7 +96,7 @@ REG_OP(RefMerge) * Otherwise, the data is forwarded to "output_false" . \n *@par Inputs: - *@li data: The tensor to be forwarded. \ n + *@li data: The tensor to be forwarded. \n * Must be one of the following types: float16, float32, float64, * int8, int16, int32, int64, uint8, uint16, uint32, uint64, bool. *@li pred: A boolean scalar. The output port that will receive data . \n @@ -387,12 +387,12 @@ REG_OP(ControlTrigger) *@par Inputs: * Three inputs, including: -*@li x: One dimensional tensore of type int32, specifying queried shape, max size is 8. -*@li data_seq: One dimensional tensore of type int32, specifying the mapped table is queried. -*@li level_index: One dimensional tensore of type int32, specifying secondary index. \n +*@li x: One dimensional tensor of type int32, specifying queried shape, max size is 128. +*@li data_seq: One dimensional tensor of type int32, specifying the mapped table is queried. +*@li level_index: One dimensional tensor of type int32, specifying secondary index. \n *@par Outputs: -*@li y: A Tensor with shape [batch, 8], of type int32, specifying index of shape in the map. +*@li y: A Tensor with shape [8], of type int32, specifying index of shape in the map. *@par Third-party framework compatibility * It is a custom operator. It has no corresponding operator in Caffe. */ diff --git a/third_party/fwkacllib/inc/ops/correlation.h b/third_party/fwkacllib/inc/ops/correlation.h new file mode 100644 index 00000000..caebba50 --- /dev/null +++ b/third_party/fwkacllib/inc/ops/correlation.h @@ -0,0 +1,52 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! + * \file correlation.h + * \brief + */ +#ifndef GE_OP_CORRELATION_OPS_H +#define GE_OP_CORRELATION_OPS_H + +#include "graph/operator_reg.h" + +namespace ge { +/** +*@brief Computes a 2D Correlation given 4D "x" and "filter" tensors. +* +*@par Inputs: +* @li filter: A 4D tensor of filters. +* @li x: A 4D tensor of input images, batch number must equal to batch +* number of "filter", and channel must equal to channel of "filter". +* +*@par Attributes: +* @li groups: set correlation mode, must be 1 or channel. +* +*@par Outputs: +*y: A Tensor. Has the same type as "x". + +*@par Third-party framework compatibility +* Compatible with caffe correlation custom operator. +*/ +REG_OP(Correlation) + .INPUT(filter, TensorType({DT_FLOAT16, DT_INT8})) + .INPUT(x, TensorType({DT_FLOAT16, DT_INT8})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_INT32})) + .ATTR(groups, Int, 1) + .OP_END_FACTORY_REG(Correlation) +} // namespace ge + +#endif // GE_OP_NN_CALCULATION_OPS_H diff --git a/third_party/fwkacllib/inc/ops/ctc_ops.h b/third_party/fwkacllib/inc/ops/ctc_ops.h index 2c75fd09..e907b828 100644 --- a/third_party/fwkacllib/inc/ops/ctc_ops.h +++ b/third_party/fwkacllib/inc/ops/ctc_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -137,6 +137,87 @@ REG_OP(CTCBeamSearchDecoder) .OUTPUT(log_probability, TensorType({DT_FLOAT, DT_DOUBLE})) .OP_END_FACTORY_REG(CTCBeamSearchDecoder) +/** +*@brief The Connectionist Temporal Classification loss. + +*@par Inputs: +*@li log_probs: Tensor of size (T, N, C), where T =input length, N =batch size, + and C = number of classes (including blank). + It represent the logarithmized probabilities of the outputs. +*@li targets: Tensor of size (N, S), where S= max target length. + It represent the target sequences. +*@li input_lengths: Tuple or tensor of size (N). It represent the lengths of the inputs. +*@li target_lengths: Tuple or tensor of size (N). It represent lengths of the targets. + +*@par Outputs: +*@li neg_log_likelihood: A loss value which is differentiable with respect to each input node. +*@li log_alpha: The probability of possible trace of input to target. + +*@par Attributes: +*@li blank : Blank label. Default 0. +*@li reduction: Specifies the reduction to apply to the output. Default: 'mean'. +*@li zero_infinity : Whether to zero infinite losses and the associated gradients. + +*@par Third-party framework compatibility +* Compatible with Pytorch CTCLoss operator. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(CTCLossV2) + .INPUT(log_probs, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(targets, TensorType({DT_INT32, DT_INT64})) + .INPUT(input_lengths, TensorType({DT_INT32, DT_INT64})) + .INPUT(target_lengths, TensorType({DT_INT32, DT_INT64})) + .OUTPUT(neg_log_likelihood, TensorType({DT_FLOAT, DT_DOUBLE})) + .OUTPUT(log_alpha, TensorType({DT_FLOAT, DT_DOUBLE})) + .ATTR(blank, Int, 0) + .ATTR(reduction, String, "mean") + .ATTR(zero_infinity, Bool, false) + .OP_END_FACTORY_REG(CTCLossV2) + +/** +*@brief The Connectionist Temporal Classification loss grad. + +*@par Inputs: +*@li grad_out: Gradient renewal coefficient. Tensor of size (N), where N = batch size. +*@li log_probs: Tensor of size (T, N, C), where T =input length, N =batch size, + and C = number of classes (including blank). + It represent the logarithmized probabilities of the outputs. +*@li targets: Tensor of size (N, S), where S= max target length. + It represent the target sequences. +*@li input_lengths: Tuple or tensor of size (N). It represent the lengths of the inputs. +*@li target_lengths: Tuple or tensor of size (N). It represent lengths of the targets. +*@li neg_log_likelihood: A loss value which is differentiable with respect to each input node. +*@li log_alpha: The probability of possible trace of input to target. + +*@par Outputs: +*@li grad: Tensor of size (T, N, C), The grad of Connectionist Temporal Classification loss. + +*@par Attributes: +*@li blank : Blank label. Default 0. +*@li reduction: Specifies the reduction to apply to the output. Default: 'mean'. +*@li zero_infinity : Whether to zero infinite losses and the associated gradients. + +*@par Third-party framework compatibility +* Compatible with Pytorch CTCLoss operator. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(CTCLossV2Grad) + .INPUT(grad_out, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(log_probs, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(targets, TensorType({DT_INT32, DT_INT64})) + .INPUT(input_lengths, TensorType({DT_INT32, DT_INT64})) + .INPUT(target_lengths, TensorType({DT_INT32, DT_INT64})) + .INPUT(neg_log_likelihood, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(log_alpha, TensorType({DT_FLOAT, DT_DOUBLE})) + .OUTPUT(grad, TensorType({DT_FLOAT, DT_DOUBLE})) + .ATTR(blank, Int, 0) + .ATTR(reduction, String, "mean") + .ATTR(zero_infinity, Bool, false) + .OP_END_FACTORY_REG(CTCLossV2Grad) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_CTC_OPS_H_ \ No newline at end of file diff --git a/third_party/fwkacllib/inc/ops/data_flow_ops.h b/third_party/fwkacllib/inc/ops/data_flow_ops.h index bb937a75..6021f4e3 100644 --- a/third_party/fwkacllib/inc/ops/data_flow_ops.h +++ b/third_party/fwkacllib/inc/ops/data_flow_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -908,7 +908,7 @@ REG_OP(TensorArray) .OUTPUT(handle, TensorType({DT_RESOURCE})) .OUTPUT(flow, TensorType({DT_FLOAT})) .REQUIRED_ATTR(dtype, Type) - .ATTR(element_shape, ListInt, ge::UNKNOWN_SHAPE) + .ATTR(element_shape, ListInt, ge::UNKNOWN_RANK) .ATTR(dynamic_size, Bool, false) .ATTR(clear_after_read, Bool, true) .ATTR(identical_element_shapes, Bool, false) @@ -963,7 +963,7 @@ REG_OP(TensorArrayConcat) DT_QUINT8, DT_QINT32})) .OUTPUT(lengths, TensorType({DT_INT64})) .REQUIRED_ATTR(dtype, Type) - .ATTR(element_shape_except0, ListInt, ge::UNKNOWN_SHAPE) + .ATTR(element_shape_except0, ListInt, ge::UNKNOWN_RANK) .OP_END_FACTORY_REG(TensorArrayConcat) /** @@ -999,7 +999,7 @@ REG_OP(TensorArrayGather) DT_STRING, DT_COMPLEX64, DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT32})) .REQUIRED_ATTR(dtype, Type) - .ATTR(element_shape, ListInt, ge::UNKNOWN_SHAPE) + .ATTR(element_shape, ListInt, ge::UNKNOWN_RANK) .OP_END_FACTORY_REG(TensorArrayGather) /** @@ -1431,6 +1431,24 @@ REG_OP(OrderedMapClear) .OP_END_FACTORY_REG(OrderedMapClear) /** +*@brief FakeQueue, support tf api FixedLengthRecordReader. \n + +*@par Inputs: +*Including: +* @li resource: A Tensor of type DT_RESOURCE. + +*@par Outputs: +*handle: A Tensor of type DT_STRING ref. \n + +*@par Third-party framework compatibility +*Compatible with the TensorFlow operator FakeQueue. +*/ +REG_OP(FakeQueue) + .INPUT(resource, TensorType({DT_RESOURCE})) + .OUTPUT(handle, TensorType({DT_STRING})) + .OP_END_FACTORY_REG(FakeQueue) + +/** *@brief Returns the number of incomplete elements in the underlying container. \n *@par Attributes: @@ -2258,6 +2276,7 @@ REG_OP(LruCache) .ATTR(shared_name, String, "LruCache") .ATTR(cache_size, Int, 100000) .ATTR(load_factor, Float, 1) + .REQUIRED_ATTR(dtype, Type) .OP_END_FACTORY_REG(LruCache) /** @@ -2277,9 +2296,9 @@ REG_OP(CacheAdd) .INPUT(cache, TensorType({DT_RESOURCE})) .INPUT(ids, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32})) .OUTPUT(swap_in_id, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32})) - .OUTPUT(swap_in_idx, TensorType({DT_INT64})) + .OUTPUT(swap_in_idx, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32})) .OUTPUT(swap_out_id, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32})) - .OUTPUT(swap_out_idx, TensorType({DT_INT64})) + .OUTPUT(swap_out_idx, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32})) .OP_END_FACTORY_REG(CacheAdd) /** @@ -2295,9 +2314,65 @@ REG_OP(CacheAdd) REG_OP(CacheRemoteIndexToLocal) .INPUT(cache, TensorType({DT_RESOURCE})) .INPUT(ids, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32})) - .OUTPUT(local_idx, TensorType({DT_INT64})) + .OUTPUT(local_idx, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32})) .OP_END_FACTORY_REG(CacheRemoteIndexToLocal) +/** +*@brief CacheAllToLocalIndex, get id in cache +*@par Inputs: +*cache: resource data +*local_idx: id in cache. +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(CacheAllIndexToLocal) + .INPUT(cache, TensorType({DT_RESOURCE})) + .OUTPUT(local_idx, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32})) + .REQUIRED_ATTR(dtype, Type) + .OP_END_FACTORY_REG(CacheAllIndexToLocal) + +/** +*@brief DynamicGetNext, dynamic get next data +*@par Inputs: +*x: the iterator, all types are available +*@par Outputs: +*y: the date in iterator, all types are available +*@par Attributes: +*output_types: types of all outputs +*output_shapes: shapes of all outputs +*_dynamic_graph_execute_mode: dynamic graph execution mode, +value is one of lazy_recompile and dynamic_execute +*_getnext_inputs_shape_range: shape ranges of outputs, +it works where _dynamic_graph_execute_mode is dynamic_execute +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(DynamicGetNext) + .INPUT(x, TensorType::ALL()) + .DYNAMIC_OUTPUT(y, TensorType::ALL()) + .ATTR(output_types, ListType, {}) + .ATTR(output_shapes, ListListInt, {{}, {}}) + .ATTR(_dynamic_graph_execute_mode, String, "lazy_recompile") + .ATTR(_getnext_inputs_shape_range, String, "") + .OP_END_FACTORY_REG(DynamicGetNext) + +/** +*@brief AdpGetNext +*@par Outputs: +*y: the data in iterator, all types are available +*@par Attributes: +*output_types: types of all outputs +*output_shapes: shapes of all outputs +*queue_name: cdqm queue name +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(AdpGetNext) + .DYNAMIC_OUTPUT(y, TensorType::ALL()) + .ATTR(output_types, ListType, {}) + .ATTR(output_shapes, ListListInt, {{}, {}}) + .ATTR(queue_name, String, "") + .OP_END_FACTORY_REG(AdpGetNext) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_DATA_FLOW_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/elewise_calculation_ops.h b/third_party/fwkacllib/inc/ops/elewise_calculation_ops.h index c64bc138..f61e2939 100644 --- a/third_party/fwkacllib/inc/ops/elewise_calculation_ops.h +++ b/third_party/fwkacllib/inc/ops/elewise_calculation_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -28,10 +28,13 @@ namespace ge { *@par Inputs: *Dynamic inputs, including: -* @li x: A list of Tensor objects, each with same shape and type. The supported types are: +*x: A list of Tensor objects, each with same shape and type. The supported types are: * float16, float32, double, int32, uint8, int16, int8, complex64, int64, * qint8, quint8, qint32, uint16, complex128, uint32, uint64. It's a dynamic input. \n +*@par Attributes: +*N: An required attribute of type int32, means nums of inputs. \n + *@par Outputs: *y: A Tensor. Has the same shape and type as the elements of "x". \n @@ -122,7 +125,8 @@ REG_OP(MinimumGrad) *@par Inputs: *One input: *x:A Tensor. Must be one of the following types: bool, float16, float, int8, int32, uint32, uint8, - int64, uint64, int16, uint16, double, complex64, complex128, qint8, quint8, qint16, quint16, qint32. \n + int64, uint64, int16, uint16, double, complex64, complex128, qint8, quint8, qint16, quint16, qint32. + For float32 type, the actual calculation on the chip is based on float16. \n *@par Attributes: *dst_type: An required attribute of type int32, specifying the dst data type. \n @@ -142,6 +146,8 @@ REG_OP(Cast) /** *@brief Returns the truth value of (x1 >= x2) element-wise. \n +*when input is int32 and (x2 - x1) > 2**31 or < -2**31 +*aicore accuracy is not guaranteed \n *@par Inputs: *Two inputs, including: @@ -163,6 +169,8 @@ REG_OP(GreaterEqual) /** *@brief Returns the truth value of (x1 < x2) element-wise. \n +*when input is int32 and (x2 - x1) > 2**31 or < -2**31 +*aicore accuracy is not guaranteed \n *@par Inputs: *Two inputs, including: @@ -322,8 +330,8 @@ REG_OP(Sub) *@brief computes the absolute value of a tensor. \n *@par Inputs: -*One inputs, including: -* @li x: A Tensor. Must be one of the following types: float16, float32, double, int32, int64. \n +*One input, including: \n +*x: A Tensor. Must be one of the following types: float16, float32, double, int32, int64. \n *@par Outputs: *y: A Tensor. Has the same type as "x". \n @@ -563,6 +571,8 @@ REG_OP(InvGrad) /** *@brief: Returns the truth value of (x <= y) element-wise. \n +*when input is int32 and (x2 - x1) > 2**31 or < -2**31 +*aicore accuracy is not guaranteed \n *@par Inputs: * Two inputs, including: @@ -611,6 +621,15 @@ REG_OP(Log1p) *@par Outputs: *y: A Tensor. Has the same type as "x1". + +*@attention Constraints: +*@li x2: The input data does not support 0 +*@li When NUM exceeds 2048 , the accuracy of operator cannot guarantee the +*requirement of double thousandths in the mini form +*@li Due to different architectures, the calculation results of this operator +*on NPU and CPU may be inconsistent +*@li If shape is expressed as (D1,D2... ,Dn), then D1*D2... *DN<=1000000,n<=8 + *@par Third-party framework compatibility *Compatible with the TensorFlow operator Mod. */ @@ -1020,7 +1039,7 @@ REG_OP(BesselI1e) * y = log_base(shift + scale * x), with "base" > 0. \n * @par Inputs: -* @li x: A Tensor of type complex64, complex128, float16, float32 or double. \n +* x: A Tensor of type complex64, complex128, float16, float32 or double. \n * @par Attributes: * @li base: An optional float32, specifying the base "e". Defaults to "-1.0" @@ -1065,7 +1084,7 @@ REG_OP(Log) * uint8, int8, uint16, int16, int32, int64, complex64, complex128. \n * @attention Constraints: -* @li "x1" and "x2" have incompatible shapes or types. \n +* "x1" and "x2" have incompatible shapes or types. \n * @par Third-party framework compatibility * Compatible with the TensorFlow operator Multiply. @@ -1451,6 +1470,8 @@ REG_OP(ReciprocalGrad) /** *@brief Returns the truth value of (x1 > x2) element-wise. \n +*when input is int32 and (x2 - x1) > 2**31 or < -2**31 +*aicore accuracy is not guaranteed \n *@par Inputs: *@li x1: A Tensor of type float16, float32, double, int64, int32, int16, int8, @@ -2042,6 +2063,15 @@ REG_OP(FloorDiv) * *@par Outputs: *y: Result remainder. + +*@attention Constraints: +*@li x2: The input data does not support 0 +*@li When NUM exceeds 2048 , the accuracy of operator cannot guarantee the +*requirement of double thousandths in the mini form +*@li Due to different architectures, the calculation results of this operator +*on NPU and CPU may be inconsistent +*@li If shape is expressed as (D1,D2... ,Dn), then D1*D2... *DN<=1000000,n<=8 + *@par Third-party framework compatibility * Compatible with the TensorFlow operator FloorMod. */ @@ -2168,6 +2198,14 @@ REG_OP(Tan) *@par Outputs: *y: A Tensor. Has the same type as "x1". \n +*@attention Constraints: +*@li x2: The input data does not support 0 +*@li When NUM exceeds 2048 , the accuracy of operator cannot guarantee the +*requirement of double thousandths in the mini form +*@li Due to different architectures, the calculation results of this operator +*on NPU and CPU may be inconsistent +*@li If shape is expressed as (D1,D2... ,Dn), then D1*D2... *DN<=1000000,n<=8 + *@par Third-party framework compatibility *@li Compatible with the TensorFlow operator TruncateMod. */ @@ -2425,6 +2463,25 @@ REG_OP(Eltwise) .OP_END_FACTORY_REG(Eltwise) /** + *@brief Computes the inverse error function of each element of input. \n + + *@par Inputs: + *One inputs, including: + * @li input_x: A tensor. Must be one of the following types: + * float16, float32. \n + + *@par Outputs: + *y: A Tensor with the same type and shape of input_x's. \n + + *@par Third-party framework compatibility + *Compatible with the Pytorch operator Erfinv. \n + */ +REG_OP(Erfinv) + .INPUT(input_x, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(output_y, TensorType({DT_FLOAT, DT_FLOAT16})) + .OP_END_FACTORY_REG(Erfinv) + +/** *@brief Computes element-wise population count. \n *@par Inputs: @@ -2829,9 +2886,9 @@ REG_OP(AdamApplyOneAssign) *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(LambApplyOptimizerAssign) - .INPUT(input0, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(input1, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(input2, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(grad, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(inputv, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(inputm, TensorType({DT_FLOAT16,DT_FLOAT})) .INPUT(input3, TensorType({DT_FLOAT16,DT_FLOAT})) .INPUT(mul0_x, TensorType({DT_FLOAT16,DT_FLOAT})) .INPUT(mul1_x, TensorType({DT_FLOAT16,DT_FLOAT})) @@ -2842,6 +2899,8 @@ REG_OP(LambApplyOptimizerAssign) .INPUT(do_use_weight, TensorType({DT_FLOAT16,DT_FLOAT})) .INPUT(weight_decay_rate, TensorType({DT_FLOAT16,DT_FLOAT})) .OUTPUT(output0, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(inputv, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(inputm, TensorType({DT_FLOAT16,DT_FLOAT})) .OP_END_FACTORY_REG(LambApplyOptimizerAssign) /** @@ -2873,7 +2932,8 @@ REG_OP(LambApplyWeightAssign) .INPUT(input1, TensorType({DT_FLOAT16,DT_FLOAT})) .INPUT(input2, TensorType({DT_FLOAT16,DT_FLOAT})) .INPUT(input3, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(input4, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(input_param, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(input_param, TensorType({DT_FLOAT16,DT_FLOAT})) .OP_END_FACTORY_REG(LambApplyWeightAssign) /** @@ -3183,12 +3243,14 @@ REG_OP(Fills) *@brief Add tensor with scale. \n *@par Inputs: -*Five inputs, including: -* @li x1: A Tensor. Must be one of the following types:int32,int16, float16, float32. -* @li x2: A scale. Must be float. \n +*One input, including: \n +*x: A Tensor. Must be one of the following types:int32,int16, float16, float32. \n + +*@par Attributes: +*value: A scale. Must be float. \n *@par Outputs: -*@li y: A Tensor. Has the same type and shape as "x1". \n +*y: A Tensor. Has the same type and shape as "x1". \n *@par Third-party framework compatibility: * Compatible with the Pytorch operator adds. @@ -3329,8 +3391,441 @@ REG_OP(TensorRedirect) .OUTPUT(output_x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32, DT_UINT8, DT_INT64, DT_INT16, DT_UINT16, DT_UINT64, DT_UINT32})) .OP_END_FACTORY_REG(TensorRedirect) -} // namespace ge +/** +* @brief Performs the element-wise division of tensor x2 by tensor x3, +* multiply the result by the scalar value and add it to tensor x1 + +* @par Inputs: +* Three inputs, including: +* @li input_data: A mutable input Tensor. Must be one of the following types: +* float16, float32. +* @li x1: A mutable input Tensor of the same type as x1. +* @li x2: A mutable input Tensor of the same type as x1. +* @li value: A mutable input Tensor. Must be one of the following types: +* float16, float32, int32. \n + +* @par Outputs: +* @li y: A mutable Tensor. Has the same type as "x1". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator Addcdiv. +*/ +REG_OP(Addcdiv) + .INPUT(input_data, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(x1, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(x2, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(value, TensorType({ DT_FLOAT16, DT_FLOAT, DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .OP_END_FACTORY_REG(Addcdiv) + +/** +* @brief Performs the element-wise multiplication of tensor x2 by tensor x3, +* multiply the result by the scalar value and add it to tensor input_data + + +* @par Inputs: +* Three inputs, including: +* @li input_data: A mutable input Tensor. Must be one of the following types: +* float16, float32, int8, int32, uint8. +* @li x1: A mutable input Tensor of the same type as x1. +* @li x2: A mutable input Tensor of the same type as x1. +* @li value: A tensor which includes only one element of the same type as x1. \n + +* @par Outputs: +* @li y: A mutable output Tensor. Has the same type as "x1". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator Addcmul. +*/ +REG_OP(Addcmul) + .INPUT(input_data, TensorType({ DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32, DT_UINT8 })) + .INPUT(x1, TensorType({ DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32, DT_UINT8 })) + .INPUT(x2, TensorType({ DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32, DT_UINT8 })) + .INPUT(value, TensorType({ DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32, DT_UINT8 })) + .OUTPUT(y, TensorType({ DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32, DT_UINT8 })) + .OP_END_FACTORY_REG(Addcmul) +/** +* @brief Computes the result of x2 * alpha + x1. + +* @par Inputs: +* @li x1: An ND tensor of type float16, float32, int32. +* @li x2: An ND tensor of type float16, float32, int32. +* @li alpha: A scalar tensor of type float16, float32. \n + +* @par Outputs: +* @li y: An ND tensor tensor with the same shape and type as "x1". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator Axpy. +*/ +REG_OP(AxpyV2) + .INPUT(x1, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .INPUT(x2, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .INPUT(alpha, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .OP_END_FACTORY_REG(AxpyV2) + +/** +* @brief Computes the result of x1 - x2. + +* @par Inputs: +* @li x1: An ND tensor of type float16, float, int32. +* @li x2: An ND tensor of type float16, float, int32. \n + +* @par Outputs: +* @li y: An ND tensor tensor with the same type as "x1". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator Sub. +*/ +REG_OP(PtSub) + .INPUT(x1, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .INPUT(x2, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .OP_END_FACTORY_REG(PtSub) + +/** +* @brief Add the partial values of two tensors in format NC1HWC0. + +* @par Inputs: +* @li x1: A Tensor in 5HD, and must be one of the following types: float16, +* float32. \n +* @li x2: A Tensor of the same type as "x1", and the same shape as "x1", +* except for the C1 value. \n + +* @par Attributes: +* @li x1_c1_offset: A required int. Offset value of C1 in "x1". \n +* @li x2_c1_offset: A required int. Offset value of C1 in "x2". \n +* @li c1_len: A required int. C1 len of "y". The value must be less than +* the difference between C1 and offset in "x1" and "x2". \n + +* @par Outputs: +* @li y: A Tensor of the same type as "x1", and the same shape as "x1", +* except for the C1 value. Record the result after adding. \n +*/ +REG_OP(StrideAdd) + .INPUT(x1, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .INPUT(x2, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .OUTPUT(y, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .REQUIRED_ATTR(x1_c1_offset, Int) + .REQUIRED_ATTR(x2_c1_offset, Int) + .REQUIRED_ATTR(c1_len, Int) + .OP_END_FACTORY_REG(StrideAdd) + +/** +* @brief Compare two tensors are totally equal or not, only output a bool value" + +* @par Inputs: +* Two inputs, including: +* @li input_x: A Tensor. the first tensor. \n +* @li input_y: A Tensor. the second tensor. \n + +* @par Outputs: +* @li output_z: A Tensor. Bool type, compare result of the two inputs. \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch equal operator. \n +*/ +REG_OP(TensorEqual) + .INPUT(input_x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8})) + .INPUT(input_y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8})) + .OUTPUT(output_z, TensorType({DT_BOOL})) + .OP_END_FACTORY_REG(TensorEqual) + +/** + * @brief Element-wise min of each of the input tensors (with Numpy-style broadcasting support). + * All inputs and outputs must have the same data type. This operator supports multidirectional + * (i.e., Numpy-style) broadcasting + * + * @par inputs + * one input including: + * @li x: dynamic input A Tensor. Must be one of the following types: float32, float16, double, int32, int64 + * + * @par output + * one output including: + * @li y:A Tensor of the same type as x + * + */ +REG_OP(MaxN) + .DYNAMIC_INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_FLOAT64, DT_INT32, DT_INT64})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_FLOAT64, DT_INT32, DT_INT64})) + .OP_END_FACTORY_REG(MaxN) + + +/** + * @brief Calculates x * maske * value. + * + * @par Inputs: + * @li x: An tensor of type float16 or float32, specifying the input to the data layer. + * @li mask: An tensor of type int8 or float16 or float32, be same shape with x. \n + * + * @par Attributes: + * value: A optional float. \n + * + * @par Outputs: + * y: The output tensor of type float16 or float32. + @ li y:A Tensor of the same type and shape as x + * + */ +REG_OP(MaskedScale) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT32})) + .INPUT(mask, TensorType({DT_INT8, DT_FLOAT16, DT_FLOAT32})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT32})) + .REQUIRED_ATTR(value, Float) + .OP_END_FACTORY_REG(MaskedScale) + +/** + * @brief Calculate the lerp function. \n + + * @par Inputs: + * Three inputs, including: + * @li start: A tensor. Must be one of the following types: + * float16, float32. \n + * @li end: A tensor. Must be one of the following types: + * float16, float32. \n + * @li weight: A tensor. Must be one of the following types: + * float16, float32. \n + + * @par Outputs: + * y: A Tensor with the same type and shape of input_x's. \n + + * @par Third-party framework compatibility + * Compatible with the Pytorch operator Lerp. \n + */ +REG_OP(Lerp) + .INPUT(start, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(end, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(weight, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .OP_END_FACTORY_REG(Lerp) + +/** +*@brief Returns the num value of abs(x1-x2) > atol+rtol*abs(x2) element-wise. \n + +* +*@par Inputs: +*@li x1: A tensor. Must be one of the following types: float32, int32, uint8, int8, float16 +*@li x2: A tensor of the same type as "x1". +* +*@par Attributes: +* atol: Defaults to "1e-05". +* rtol: Defaults to "1e-03". +* +*@par Outputs: +* num: A tensor of type float32. +* +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +* +*/ +REG_OP(DataCompare) + .INPUT(x1, TensorType({ DT_FLOAT16, DT_FLOAT,DT_INT8, DT_UINT8, DT_INT32 })) + .INPUT(x2, TensorType({ DT_FLOAT16, DT_FLOAT,DT_INT8, DT_UINT8, DT_INT32 })) + .OUTPUT(num, TensorType({DT_FLOAT})) + .ATTR(atol, Float, 1e-5) + .ATTR(rtol, Float, 1e-3) + .OP_END_FACTORY_REG(DataCompare) + +/** +*@brief Hardmax(element in input, axis) = 1 if the element is the first maximum value along the specified axis, 0 +*otherwise The input does not need to explicitly be a 2D vector.The "axis" attribute indicates the dimension along +*which Hardmax will be performed.The output tensor has the same shape and contains the Hardmax values of the +*corresponding input. +* +*@par inputs +*one input including: +*@li x: input A Tensor.Must be one of the following types:float32,float16 +* +*@par Attributes: +*@li axis:A required int attribute that decides which dimension will be used to cal the hard_max +* +*@par output: +*one output including: +*@li y:A Tensor of the same type as x +* +*/ +REG_OP(HardMax) + .INPUT(x, TensorType({ DT_FLOAT16, DT_FLOAT })) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(axis, Int, -1) + .OP_END_FACTORY_REG(HardMax) + +/** +* @brief Computes the dot product (inner product) of two tensors. This function does not broadcast. + +* @par Inputs: +* Two inputs, including: +* @li input_x: A Tensor. the first tensor must be 1d. \n +* @li input_y: A Tensor. the second tensor must be 1d. \n + +* @par Outputs: +* @li output: A Tensor. Result of the two inputs, must be 1d. \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch dot operator. \n +*/ +REG_OP(Dot) + .INPUT(input_x, TensorType({DT_FLOAT, DT_FLOAT16, DT_UINT8, DT_INT8, DT_INT32})) + .INPUT(input_y, TensorType({DT_FLOAT, DT_FLOAT16, DT_UINT8, DT_INT8, DT_INT32})) + .OUTPUT(output, TensorType({DT_FLOAT, DT_FLOAT16, DT_UINT8, DT_INT8, DT_INT32})) + .OP_END_FACTORY_REG(Dot) + +/** +*@brief Returns a new tensor with boolean elements representing \n +*if each element of input is “close†to the corresponding element of other \n + +*@par Inputs: +*Two inputs, including: +* @li x1: A tensor. Must be one of the following types: +* float16, float32, int32. \n +* @li x2: A tensor with the same type and shape of x1's. \n + +*@par Attributes: +*@li rtol: An optional float.Defaults to 1e-05. \n +*@li atol: An optional float.Defaults to 1e-08. \n +*@li equal_nan: An optional bool.Defaults to false. \n + +*@par Outputs: +*y: A Tensor bool with the same shape of x1's. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator isclose. \n +*/ +REG_OP(IsClose) + .INPUT(x1, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .INPUT(x2, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .OUTPUT(y, TensorType({DT_BOOL})) + .ATTR(rtol, Float, 1e-05) + .ATTR(atol, Float, 1e-08) + .ATTR(equal_nan, Bool, false) + .OP_END_FACTORY_REG(IsClose) + +/** +* @brief Returns the reverse tensor of the ArgMax operator of a tensor. \n + +* @par Inputs: +* three input, including: +* var: A Tensor of type float16, float32, int32 or int8. \n +* indices: A Tensor of type int32. \n +* updates: A Tensor of type float16, float32, int32 or int8. \n + +* @par Attributes: +* @li dimension: An integer of type int, specifying the axis information of the index with the maximum value.\n + +* @par Outputs: +* y: A Tensor of type float16, float32, int32 or int8. \n +* +*@attention Constraints: +*@li indices: only support int32,and shape same to "updates" +*@li The value range of "dimension" is [-dims, dims - 1]. "dims" is the dimension length of "x". +*@li y:A Tensor, the type and shape is same to "var" \n + +*@par Third-party framework compatibility +* not support all scene like pytorch operator scatter +* exp: +* var.shape=[2,3,4,5], dim=2, the shape of indices and updates should be [2,3,5] +* not support the shape of indices and updates is [2,3,2,5] like pytorch operator scatter. \n +*/ +REG_OP(ArgMaxGrad) + .INPUT(var, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8})) + .INPUT(indices, TensorType({DT_INT32})) + .INPUT(updates, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8})) + .REQUIRED_ATTR(dimension, Int) + .OP_END_FACTORY_REG(ArgMaxGrad) + +/** +* @brief Returns the reverse tensor of the ArgMax operator of a tensor. \n + +* @par Inputs: +* three input, including: +* var: A Tensor of type float16, float32, int32 or int8. \n +* indices: A Tensor of type int32. \n +* updates: A Tensor of type float16, float32, int32 or int8. \n +* assist: A Tensor of int32,also a assist matrix and it's shape must match the shape of var \n + +* @par Attributes: +* @li dimension: An integer of type int, specifying the axis information of the index with the maximum value.\n + +* @par Outputs: +* y: A Tensor of type float16, float32, int32 or int8. \n + +*@attention Constraints: +*@li indices: only support int32,and shape same to "updates" +*@li The value range of "dimension" is [-dims, dims - 1]. "dims" is the dimension length of "x". +*@li y:A Tensor, the type and shape is same to "var" \n + +*@par Third-party framework compatibility +* not support all scene like pytorch operator scatter +* exp: +* var.shape=[2,3,4,5], dim=2, the shape of indices and updates should be [2,3,5] +* not support the shape of indices and updates is [2,3,2,5] like pytorch operator scatter. \n +*/ +REG_OP(ArgMaxGradD) + .INPUT(var, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8})) + .INPUT(indices, TensorType({DT_INT32})) + .INPUT(updates, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8})) + .INPUT(assist, TensorType({DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8})) + .REQUIRED_ATTR(dimension, Int) + .OP_END_FACTORY_REG(ArgMaxGradD) + +/** +*@brief Calculates the reversed outputs of the function "AddMatMatElements" +* c = c * beta + alpha * a * b + +*@par Inputs: +*Three inputs, including: +* @li c: A mutable Tensor. Must be one of the following types: +* float16, float32. +* @li a: A mutable Tensor of the same type as "c". +* @li b: A mutable Tensor of the same type as "c". +* @li beta: A mutable scalar of the same type as "c". +* @li alpha: A mutable scalar of the same type as "c". \n + +*@par Outputs: +* @li c: A mutable Tensor. Has the same type as "c". \n + +*@par Third-party framework compatibility +* Compatible with the TensorFlow operator AddMatMatElements. +*/ +REG_OP(AddMatMatElements) + .INPUT(c, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(a, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(b, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(beta, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(alpha, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(c, TensorType({DT_FLOAT, DT_FLOAT16})) + .OP_END_FACTORY_REG(AddMatMatElements) + +/** +*@brief Returns cosine similarity between x1 and x2,computed along dim. \n + +*@par Inputs: +*Two inputs, including: +* @li input_x1: A tensor. Must be the following types: +* float32. \n + +*@par Inputs: +*@li input_x2: A tensor. Must of the following types: +* float32. \n + +*@par Outputs: +*@li output_y: A Tensor with the same type of input_x's. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator CosineSimilarity. \n +*/ +REG_OP(CosineSimilarity) + .INPUT(input_x1, TensorType({DT_FLOAT})) /* "First operand." */ + .INPUT(input_x2, TensorType({DT_FLOAT})) /* "Second operand." */ + .OUTPUT(output_y, TensorType({DT_FLOAT})) /* "Result, has same element type as two inputs" */ + .ATTR(dim, Int, 1) + .ATTR(eps, Float, 1e-8) + .OP_END_FACTORY_REG(CosineSimilarity) + +} // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_ELEWISE_CALCULATION_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/functional_ops.h b/third_party/fwkacllib/inc/ops/functional_ops.h index 598d3ad3..b09ac058 100644 --- a/third_party/fwkacllib/inc/ops/functional_ops.h +++ b/third_party/fwkacllib/inc/ops/functional_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/get_data_ops.h b/third_party/fwkacllib/inc/ops/get_data_ops.h index 33dc4f14..e5518ef8 100644 --- a/third_party/fwkacllib/inc/ops/get_data_ops.h +++ b/third_party/fwkacllib/inc/ops/get_data_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/globalavgpool.h b/third_party/fwkacllib/inc/ops/globalavgpool.h new file mode 100644 index 00000000..06f03d30 --- /dev/null +++ b/third_party/fwkacllib/inc/ops/globalavgpool.h @@ -0,0 +1,49 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! + * \file globalavgpool.h + * \brief + */ +#ifndef OPS_BUILT_IN_OP_PROTO_INC_GLOBALAVERAGEPOOL_H_ +#define OPS_BUILT_IN_OP_PROTO_INC_GLOBALAVERAGEPOOL_H_ + +#include "graph/operator_reg.h" + +namespace ge { +/** +*@brief GlobalAveragePool consumes an input tensor X and applies average pooling across the values in the same channel. +This is equivalent to AveragePool with kernel size equal to the spatial dimension of input tensor \n + +*@par Inputs: +*@li x: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), +where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. +For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. + +*@par Outputs: +*y: Output data tensor from pooling across the input tensor. The output tensor has the same rank as the input. +The first two dimensions of output shape are the same as the input (N x C), while the other dimensions are all 1 + +*@par Restrictions: +*Warning: This operator can be integrated only by configuring INSERT_OP_FILE of aclgrphBuildModel. Please do not use it directly. +*/ +REG_OP(GlobalAveragePool) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .OP_END_FACTORY_REG(GlobalAveragePool) +} // namespace ge + +#endif // OPS_BUILT_IN_OP_PROTO_INC_GLOBALAVGPOOL_H_ \ No newline at end of file diff --git a/third_party/fwkacllib/inc/ops/hcom_ops.h b/third_party/fwkacllib/inc/ops/hcom_ops.h index b90b225e..497f6a68 100644 --- a/third_party/fwkacllib/inc/ops/hcom_ops.h +++ b/third_party/fwkacllib/inc/ops/hcom_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -45,8 +45,6 @@ REG_OP(HcomAllGather) .OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16, DT_INT64, DT_UINT64})) .REQUIRED_ATTR(rank_size, Int) .REQUIRED_ATTR(group, String) - .ATTR(alpha, Float, 1.0) - .ATTR(beta, Float, 0.0) .OP_END_FACTORY_REG(HcomAllGather) /** @@ -77,8 +75,6 @@ REG_OP(HcomAllReduce) .REQUIRED_ATTR(group, String) .ATTR(fusion, Int, 1) .ATTR(fusion_id, Int, -1) - .ATTR(alpha, Float, 1.0) - .ATTR(beta, Float, 0.0) .OP_END_FACTORY_REG(HcomAllReduce) /** @@ -91,7 +87,7 @@ REG_OP(HcomAllReduce) input of this rank will be broadcast to other ranks. * @li fusion: A required integer identifying if the op need to fusion,the default value is none fusion - * @li fusion: A required integer identifying the fusion id if para fusion + * @li fusion_id: A required integer identifying the fusion id if para fusion is set. * @li group: A required string identifying the group name of ranks participating in the op. @@ -109,11 +105,40 @@ REG_OP(HcomBroadcast) .REQUIRED_ATTR(group, String) .ATTR(fusion, Int, 0) .ATTR(fusion_id, Int, -1) - .ATTR(alpha, Float, 1.0) - .ATTR(beta, Float, 0.0) .OP_END_FACTORY_REG(HcomBroadcast) /** + * @brief preforms reduction from others rank to rootrank + * @par Inputs: +* @li root_rank: A required integer identifying the root rank in the op + the reduction result will be on this root rank + * x: A tensor. Must be one of the following types: int8, int16, int32, float16, + float32. + * @par Attributes: + * @li reduction: A required string identifying the reduction operation to + perform.The supported operation are: "sum", "max", "min", "prod". + * @li group: A required string identifying the group name of ranks + participating in the op. + * @li fusion: An optional integer identifying the fusion flag of the op. + 0: no fusion; 1 (default): fusion; 2: fusion the ops by fusion id. + * @li fusion_id: An optional integer identifying the fusion id of the op. + * The HcomReduce ops with the same fusion id will be fused. + * @par Outputs: + * y: A Tensor. Has the same type as "x". + * @attention Constraints: + *"group" is limited to 128 characters. Use "hccl_world_group" + as the name of a world group. + */ +REG_OP(HcomReduce) + .INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16})) + .REQUIRED_ATTR(root_rank, Int) + .REQUIRED_ATTR(reduction, String) + .REQUIRED_ATTR(group, String) + .ATTR(fusion, Int, 0) + .ATTR(fusion_id, Int, -1) + .OP_END_FACTORY_REG(HcomReduce) +/** * @brief Performs reduction across all input tensors, scattering in equal blocks among ranks, each rank getting a chunk of data based on its rank index. @@ -139,8 +164,6 @@ REG_OP(HcomReduceScatter) .REQUIRED_ATTR(reduction, String) .REQUIRED_ATTR(group, String) .REQUIRED_ATTR(rank_size, Int) - .ATTR(alpha, Float, 1.0) - .ATTR(beta, Float, 0.0) .OP_END_FACTORY_REG(HcomReduceScatter) /** @@ -167,8 +190,6 @@ REG_OP(HcomSend) .REQUIRED_ATTR(group, String) .REQUIRED_ATTR(sr_tag, Int) .REQUIRED_ATTR(dest_rank, Int) - .ATTR(alpha, Float, 1.0) - .ATTR(beta, Float, 0.0) .OP_END_FACTORY_REG(HcomSend) /** @@ -202,8 +223,6 @@ REG_OP(HcomReceive) .REQUIRED_ATTR(src_rank, Int) .REQUIRED_ATTR(shape, ListInt) .REQUIRED_ATTR(dtype, Type) - .ATTR(alpha, Float, 1.0) - .ATTR(beta, Float, 0.0) .OP_END_FACTORY_REG(HcomReceive) /** @@ -219,6 +238,15 @@ REG_OP(HcomRemoteRead) .REQUIRED_ATTR(dtype, Type) .OP_END_FACTORY_REG(HcomRemoteRead) +/** + * @brief Performs Remote Ref Read of input tensors + * @par Inputs: + * remote: A tensor. describing the remote memory address to read: u64 remoteId, u64 addrRemote, u64 length + * cache_var: The local base address + * local_offset: Skip step length + * @par Outputs: + * cache_var: The local base address + */ REG_OP(HcomRemoteRefRead) .INPUT(remote, TensorType({DT_UINT64})) .INPUT(cache_var, TensorType({DT_UINT64})) @@ -239,11 +267,90 @@ REG_OP(HcomRemoteWrite) .INPUT(local, TensorType::ALL()) .OP_END_FACTORY_REG(HcomRemoteWrite) +/** + * @brief Performs Remote Write of input tensors + * @par Inputs: + * remote: A tensor. describing the remote memory address to write: u64 remoteId, u64 addrRemote, u64 length + * @par Inputs: + * local: A Tensor. whose value is length / size_of(Type) + */ REG_OP(HcomRemoteScatterWrite) .INPUT(remote, TensorType({DT_INT64, DT_UINT64})) .INPUT(local, TensorType::ALL()) .OPTIONAL_INPUT(local_offset, TensorType({DT_UINT64})) .OP_END_FACTORY_REG(HcomRemoteScatterWrite) +/** + * @brief All ranks send different amount of data to, and receive different + amount of data from, all ranks. + * @par Inputs: + * Five inputs, including: + * @li send_data: A tensor. the memory to send. + * @li send_counts: A list, where entry i specifies the number of elements in + send_data to send to rank i. + * @li send_displacements: A list, where entry i specifies the displacement + (offset from sendbuf) from which to send data to rank i. + * @li recv_counts: A list, where entry i specifies the number of + elements to receive from rank i. + * @li recv_displacements: A list, , where entry i specifies the displacement + (offset from recv_data) to which data from rank i should be written. + * @par Outputs: + * recv_data: A Tensor has same element type as send_data. + * @par Attributes: + * @li group: A string identifying the group name of ranks participating in + the op. +* @attention all ranks participating in the op should be full-mesh networking + using the RDMA. + */ +REG_OP(HcomAllToAllV) + .INPUT(send_data, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16, DT_INT64, DT_UINT64})) + .INPUT(send_counts, TensorType({DT_INT64})) + .INPUT(send_displacements, TensorType({DT_INT64})) + .INPUT(recv_counts, TensorType({DT_INT64})) + .INPUT(recv_displacements, TensorType({DT_INT64})) + .OUTPUT(recv_data, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16, DT_INT64, DT_UINT64})) + .REQUIRED_ATTR(group, String) + .OP_END_FACTORY_REG(HcomAllToAllV) + +/** + * @brief All ranks send different amount of data to, and receive different + amount of data from, all ranks. And concat all data descripting by addrinfo + togather into output gathered. + * @par Inputs: + * Four inputs, including: + * @li addrinfo: A tensor, descripting the memory info(address, length) to send. + * @li addrinfo_count_per_rank: A list, where entry i specifies the number of + elements in send_data to send to rank i. + * @li recv_counts: A list, where entry i specifies the number of + elements to receive from rank i. + * @li recv_displacements: A list, , where entry i specifies the displacement + (offset from recv_data) to which data from rank i should be written. + * @par Outputs: + * Two outputs, including: + * @li recv_data: A Tensor has same element type as dtype. + * @li gathered: A Tensor has same element type as dtype. + * @par Attributes: + * @li group: A string identifying the group name of ranks participating in + the op. + * @li dtype: Datatype of send buffer elements. + * @li addr_length: descripting the element memory length in the addrinfo. + -2: all element memory length in the addrinfo is the same, but it is unknown. + -1: all element memory length is unknown. + >0: all element memory length in the addrinfo is the same. the attr value is the memory length. + * @attention all ranks participating in the op should be full-mesh networking + using the RDMA. + */ +REG_OP(HcomGatherAllToAllV) + .INPUT(addrinfo, TensorType({DT_UINT64})) + .INPUT(addrinfo_count_per_rank, TensorType({DT_INT64})) + .INPUT(recv_counts, TensorType({DT_INT64})) + .INPUT(recv_displacements, TensorType({DT_INT64})) + .OUTPUT(recv_data, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16, DT_INT64, DT_UINT64})) + .OUTPUT(gathered, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16, DT_INT64, DT_UINT64})) + .REQUIRED_ATTR(group, String) + .REQUIRED_ATTR(dtype, Type) + .REQUIRED_ATTR(addr_length, Int) + .OP_END_FACTORY_REG(HcomGatherAllToAllV) + } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_HCOM_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/hvd_ops.h b/third_party/fwkacllib/inc/ops/hvd_ops.h index a49ec5ed..00299ef7 100644 --- a/third_party/fwkacllib/inc/ops/hvd_ops.h +++ b/third_party/fwkacllib/inc/ops/hvd_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/image_ops.h b/third_party/fwkacllib/inc/ops/image_ops.h index ce3262f9..6909345a 100644 --- a/third_party/fwkacllib/inc/ops/image_ops.h +++ b/third_party/fwkacllib/inc/ops/image_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,6 +24,22 @@ #include "graph/operator_reg.h" namespace ge { +/** +*@brief Decode the frame(s) of a GIF-encoded image to a uint8 tensor . \n + +*@par Inputs: +*@li contents:A Tensor of type string. 0-D. The GIF-encoded image. \n + +*@par Outputs: +*image:A Tensor of type uint8. \n + +*@par Third-party framework compatibility +*Compatible with tensorflow DecodeGif operator. +*/ +REG_OP(DecodeGif) + .INPUT(contents, TensorType({DT_STRING})) + .OUTPUT(image, TensorType({DT_UINT8})) + .OP_END_FACTORY_REG(DecodeGif) /** *@brief Adjust the hue of one or more images . \n @@ -31,11 +47,12 @@ namespace ge { *@par Inputs: *Input images is a tensor of at least 3 dimensions. The last dimension is interpretted as channels, and must be three. Inputs include: -*@li images:A Tensor of type float. Images to adjust. At least 3-D. +*@li images:A Tensor of type float. Images to adjust. At least 3-D. The format +must be NHWC. *@li delta:A Tensor of type float. A float delta to add to the hue . \n *@par Outputs: -*y:A Tensor of type float . \n +*y:A Tensor of type float. The format must be NHWC. \n *@attention Constraints: *Input images is a tensor of at least 3 dimensions. The last dimension is @@ -57,11 +74,12 @@ REG_OP(AdjustHue) *@par Inputs: *Input images is a tensor of at least 3 dimensions. The last dimension is interpretted as channels, and must be three. Inputs include: -*@li images:A Tensor of type float. Images to adjust. At least 3-D. +*@li images:A Tensor of type float. Images to adjust. At least 3-D. The format +must be NHWC. *@li scale:A Tensor of type float. A float scale to add to the saturation . \n *@par Outputs: -*y:A Tensor of type float . \n +*y:A Tensor of type float. The format must be NHWC. \n *@attention Constraints: *Input images is a tensor of at least 3 dimensions. The last dimension is @@ -83,11 +101,12 @@ REG_OP(AdjustSaturation) *@par Inputs: *Input images is a tensor of at least 3 dimensions. The last 3 dimensions are interpreted as '[height, width, channels]'. Inputs include: -*@li images:A Tensor of type float. Images to adjust. At least 3-D. +*@li images:A Tensor of type float. Images to adjust. At least 3-D. The format +must be NHWC. *@li scale:A Tensor of type float. A float multiplier for adjusting contrast . \n *@par Outputs: -*y:A Tensor of type float . \n +*y:A Tensor of type float. The format must be NHWC. \n *@attention Constraints: *Input images is a tensor of at least 3 dimensions. The last dimension is @@ -112,7 +131,7 @@ nearest neighbor sampling to a common output size specified by crop_size . \n *Input images must be a 4-D tensor. Inputs include: *@li images:A Tensor. Must be one of the following types:uint8, uint16, int8, int16, int32, int64, float16, float, double. A 4-D tensor of shape -[batch, image_height, image_width, depth]. +[batch, image_height, image_width, depth]. The format must be NHWC. *@li boxes: A Tensor of type float. A 2-D tensor of shape [num_boxes, 4]. *@li box_index: A Tensor of type int32. A 1-D tensor of shape [num_boxes] with int32 values in [0, batch). @@ -127,7 +146,7 @@ extrapolation, when applicable. NearestNeighbor . \n *@par Outputs: -*y:A Tensor of type float . \n +*y:A Tensor of type float. The format must be NHWC. \n *@attention Constraints: *Input images must be a 4-D tensor . \n @@ -193,7 +212,9 @@ boxes tensor . \n *@par Inputs: *Input images and grads must be a 4-D tensor. Inputs include: *@li grads: A 4-D tensor of shape [num_boxes, crop_height, crop_width, depth]. +The format must be NHWC. *@li images: A 4-D tensor of shape [batch, image_height, image_width, depth]. +The format must be NHWC. Both image_height and image_width need to be positive. *@li boxes: A 2-D tensor of shape [num_boxes, 4]. The i-th row of the tensor specifies the coordinates of a box in the box_ind[i] image and is specified in @@ -233,6 +254,7 @@ images tensor . \n *@par Inputs: *Input grads must be a 4-D tensor. Inputs include: *@li grads: A 4-D tensor of shape [num_boxes, crop_height, crop_width, depth]. +The format must be NHWC. *@li boxes: A 2-D tensor of shape [num_boxes, 4]. The i-th row of the tensor specifies the coordinates of a box in the box_ind[i] image and is specified in normalized coordinates [y1, x1, y2, x2]. @@ -248,7 +270,8 @@ method: A string specifying the interpolation method. Only 'bilinear' is supported for now . \n *@par Outputs: -*y:A 4-D tensor of shape [batch, image_height, image_width, depth] . \n +*y:A 4-D tensor of shape [batch, image_height, image_width, depth]. The format +must be NHWC. \n *@attention Constraints: *Input grads must be a 4-D tensor . \n @@ -273,6 +296,7 @@ REG_OP(CropAndResizeGradImage) *@par Inputs: *Input x must be a 4-D tensor. Inputs include: *@li x: A 4-D float tensor of shape [batch_size, height, width, channels]. +The format must be NHWC. *@li size: A 1-D tensor of 2 elements containing the size of the glimpses to extract. The glimpse height must be specified first, following by the glimpse width. @@ -293,7 +317,7 @@ uniform_noise . \n *@par Outputs: *y:A tensor representing the glimpses [batch_size, glimpse_height, -glimpse_width, channels] . \n +glimpse_width, channels]. The format must be NHWC. \n *@attention Constraints: *Input x must be a 4-D tensor . \n @@ -340,7 +364,8 @@ REG_OP(HSVToRGB) *@par Inputs: *Input images must be a 4-D tensor. Inputs include: -*@li images: 4-D with shape [batch, height, width, channels]. +*@li images: 4-D with shape [batch, height, width, channels]. The format must +be NHWC. *@li size: A 1-D int32 Tensor of 2 elements: new_height, new_width. The new size for the images. *@li min: A Tensor of type float. @@ -354,6 +379,7 @@ the values at the corner pixels. Defaults to false. *@par Outputs: *@li resized_images: 4-D with shape [batch, new_height, new_width, channels]. +The format must be NHWC. *@li y_min: A Tensor of type float. *@li y_max: A Tensor of type float . \n @@ -381,7 +407,8 @@ REG_OP(QuantizedResizeBilinear) *@par Inputs: *Input images must be a 4-D tensor. Inputs include: -*@li images: 4-D with shape [batch, height, width, channels]. +*@li images: 4-D with shape [batch, height, width, channels]. The format must +be NHWC. *@li size: A 1-D int32 Tensor of 2 elements: new_height, new_width. The new size for the images . \n @@ -391,7 +418,8 @@ output tensors are aligned, preserving the values at the corner pixels. Defaults to false . \n *@par Outputs: -*y: 4-D with shape [batch, new_height, new_width, channels] . \n +*y: 4-D with shape [batch, new_height, new_width, channels]. The format must +be NHWC. \n *@attention Constraints: *Input images can be of different types but output images are always float . \n @@ -414,10 +442,10 @@ REG_OP(ResizeArea) *@par Inputs: *Input grads must be a 4-D tensor. Inputs include: *@li grads: A Tensor of type float. 4-D with shape [batch, height, width, -channels]. +channels]. The format must be NHWC. *@li original_image: A Tensor. Must be one of the following types: float, double. 4-D with shape [batch, orig_height, orig_width, channels], The image -tensor that was resized . \n +tensor that was resized. The format must be NHWC. \n *@par Attributes: *@li align_corners: An optional bool. Defaults to False. If true, the centers @@ -426,10 +454,10 @@ false. *@li half_pixel_centers: An optional bool. Defaults to False . \n *@par Outputs: -*y: A Tensor. Has the same type as original_image . \n +*y: A Tensor. Has the same type as original_image. The format must be NHWC. \n *@attention Constraints: -*Input images can be of different types but output images are always float . \n +*Input images can be of different types but output images are always float . *@par Third-party framework compatibility *Compatible with tensorflow ResizeBicubicGrad operator. @@ -448,7 +476,8 @@ REG_OP(ResizeBicubicGrad) *@par Inputs: *Input images must be a 4-D tensor. Inputs include: -*@li images: 4-D with shape [batch, height, width, channels]. +*@li images: 4-D with shape [batch, height, width, channels]. The format +must be NHWC. *@li size: A 1-D int32 Tensor of 2 elements: new_height, new_width. The new size for the images . \n @@ -459,10 +488,11 @@ Defaults to false. *@li half_pixel_centers: An optional bool. Defaults to False . \n *@par Outputs: -*y: 4-D with shape [batch, new_height, new_width, channels] . \n +*y: 4-D with shape [batch, new_height, new_width, channels]. The format +must be NHWC. \n *@attention Constraints: -*Input images can be of different types but output images are always float . \n +*Input images can be of different types but output images are always float . *@par Third-party framework compatibility *Compatible with tensorflow ResizeBicubic operator. @@ -483,7 +513,7 @@ REG_OP(ResizeBicubic) *@par Inputs: *Input grads must be a 4-D tensor. Inputs include: *@li grads: A Tensor. Must be one of the following types: uint8, int8, int32, -float16, float, double. 4-D with shape [batch, height, width, channels]. +float16, float, double. Must set the format, supported format list ["NCHW, NHWC"] *@li size: A 1-D int32 Tensor of 2 elements: orig_height, orig_width. The original input size . \n @@ -550,9 +580,8 @@ REG_OP(ResizeNearestNeighborV2GradD) *@par Inputs: *Input grads must be a 4-D tensor. Inputs include: -*@li grads: A Tensor of type float32. 4-D with shape [batch, height, width, -channels]. -*@li original_image: A Tensor. 4-D with shape [batch, orig_height, orig_width, +*@li grads: A Tensor of type float32. Must set the format, supported format list ["NCHW, NHWC"] +*@li original_image: A Tensor. 4-D shape. Must set the format, supported format list ["NCHW, NHWC"] channels], The image tensor that was resized . \n *@par Attributes: @@ -583,7 +612,7 @@ REG_OP(ResizeBilinearV2Grad) *@par Inputs: *Input images must be a 4-D tensor. Inputs include: -*@li x: 4-D with shape [batch, height, width, channels]. +*@li x: 4-D tensor. Must set the format, supported format list ["NCHW, NHWC"] *@li size: A 1-D int32 Tensor of 2 elements: new_height, new_width. The new size for the images . \n @@ -643,6 +672,62 @@ REG_OP(RGBToHSV) *Input images must be a 4-D tensor. Inputs include: *@li image_size: 1-D, containing [height, width, channels]. *@li bounding_boxes: 3-D with shape [batch, N, 4] describing the N bounding +boxes associated with the image. \n + +*@par Attributes: +*@li seed: If either seed or seed2 are set to non-zero, the random number +generator is seeded by the given seed. Otherwise, it is seeded by a random seed. +*@li seed2: A second seed to avoid seed collision. +*@li min_object_covered: The cropped area of the image must contain at least +this fraction of any bounding box supplied. The value of this parameter should +be non-negative. In the case of 0, the cropped area does not need to overlap +any of the bounding boxes supplied . +*@li aspect_ratio_range: The cropped area of the image must have an aspect +ratio = width / height within this range. +*@li max_attempts: Number of attempts at generating a cropped region of the +image of the specified constraints. After max_attempts failures, return the +entire image. +*@li use_image_if_no_bounding_boxes: Controls behavior if no bounding boxes +supplied. If true, assume an implicit bounding box covering the whole input. +If false, raise an error . \n + +*@par Outputs: +*@li begin: 1-D, containing [offset_height, offset_width, 0]. +*@li size: 1-D, containing [target_height, target_width, -1]. +*@li bboxes: 3-D with shape [1, 1, 4] containing the distorted bounding box . \n + +*@attention Constraints: +*Input images can be of different types but output images are always float . \n + +*@par Third-party framework compatibility +*Compatible with tensorflow SampleDistortedBoundingBox operator. +*/ + +REG_OP(SampleDistortedBoundingBox) + .INPUT(image_size, TensorType({ DT_UINT8, DT_INT8, DT_INT16, \ + DT_INT32, DT_INT64 })) + .INPUT(bounding_boxes, TensorType({ DT_FLOAT })) + .OUTPUT(begin, TensorType({ DT_UINT8, DT_INT8, DT_INT16, \ + DT_INT32, DT_INT64 })) + .OUTPUT(size, TensorType({ DT_UINT8, DT_INT8, DT_INT16, \ + DT_INT32, DT_INT64 })) + .OUTPUT(bboxes, TensorType({ DT_FLOAT })) + .ATTR(seed, Int, 0) + .ATTR(seed2, Int, 0) + .ATTR(min_object_covered, Float, 0.1f) + .ATTR(aspect_ratio_range, ListFloat, { 0.75f, 1.33f }) + .ATTR(area_range, ListFloat, { 0.05f, 1.0f }) + .ATTR(max_attempts, Int, 100) + .ATTR(use_image_if_no_bounding_boxes, Bool, false) + .OP_END_FACTORY_REG(SampleDistortedBoundingBox) + +/** +*@brief Generate a single randomly distorted bounding box for an image . \n + +*@par Inputs: +*Input images must be a 4-D tensor. Inputs include: +*@li image_size: 1-D, containing [height, width, channels]. +*@li bounding_boxes: 3-D with shape [batch, N, 4] describing the N bounding boxes associated with the image. *@li min_object_covered: The cropped area of the image must contain at least this fraction of any bounding box supplied. The value of this parameter should @@ -697,7 +782,7 @@ REG_OP(SampleDistortedBoundingBoxExt2) *@par Inputs: *Input x must be a 4-D tensor. Inputs include: -*@li x: 4-D with shape [batch, height, width, channels]. +*@li x: 4-D tensor. Must set the format, supported format list ["NCHW, NHWC"]. *@li size: A 1-D int32 Tensor of 2 elements: new_height, new_width. The new size for the images . \n @@ -729,12 +814,12 @@ REG_OP(ResizeNearestNeighborV2) *@par Inputs: *Input images must be a 4-D tensor. Inputs include: *@li images: A Tensor. Must be one of the following types: float. 4-D with -shape [batch, height, width, depth]. A batch of images. +shape [batch, height, width, depth]. A batch of images. The format must be NHWC. *@li boxes: A Tensor of type float32. 3-D with shape [batch, num_bounding_boxes, 4] containing bounding boxes . \n *@par Outputs: -*A Tensor. Has the same type as images . \n +*A Tensor. Has the same type as images. The format must be NHWC. \n *@attention Constraints: *Input images must be a 4-D tensor . \n @@ -1002,6 +1087,88 @@ REG_OP(EncodePng) .ATTR(compression, Int, -1) .OP_END_FACTORY_REG(EncodePng) + +/** +*@brief PNG-decode an image. +*@par Inputs: +*contents: 0-D. PNG-decoded image . + +*@par Attributes: +*channels: graph channels \n +*dtype: type of image + +*@par Outputs: +*image: is a 3-D uint8 or uint16 Tensor of shape [height, width, channels] +where channels is: 1: for grayscale; 2: for grayscale + alpha; 3: for RGB; +4: for RGBA . \n + +*@par Third-party framework compatibility +*Compatible with tensorflow DecodePng operator. +*/ +REG_OP(DecodePng) + .INPUT(contents, TensorType({DT_STRING})) + .OUTPUT(image, TensorType({DT_UINT8, DT_UINT16})) + .ATTR(dtype, Type, DT_UINT8) + .ATTR(channels, Int, 0) + .OP_END_FACTORY_REG(DecodePng) + +/** +*@brief Bmp-decode an image. \n + +*@par Inputs: +*@li contents: A Tensor of type string. 0-D. The BMP-encoded image. \n + +*@par Attributes: +*@li channels: Decode the desired number of color channels of the image. \n + +*@par Outputs: +*image: A Tensor dtype of uint8. + +* @par Third-party framework compatibility +* Compatible with tensorflow DecodeBmp operator. +*/ + +REG_OP(DecodeBmp) + .INPUT(contents, TensorType({DT_STRING})) + .OUTPUT(image, TensorType({DT_UINT8})) + .ATTR(channels, Int, 0) + .OP_END_FACTORY_REG(DecodeBmp) + +/** +*@brief Function parse image from string to int. \n + +*@par Inputs: +*@li contents: A Tensor of type string. 0-D. The JPEG-encoded image. \n +*@li crop_window: 1-D. The crop window: [crop_y, crop_x, crop_height, crop_width]. \n + +*@par Attributes: +*@li channels: An optional int. Defaults to 0. Number of color channels for the +*decoded image. +*@li ratio: An optional int. Defaults to 1. Downscaling ratio. +*@li fancy_upscaling: An optional bool. Defaults to True. If true use a slower +*but nicer upscaling of the chroma planes +*@li try_recover_truncated: An optional bool. Defaults to False. If true try to +*recover an image from truncated input. +*@li acceptable_fraction: An optional float. Defaults to 1. The minimum required +fraction of lines before a truncated input is accepted. +*@li dct_method: An optional string. Defaults to "". string specifying a hint +*about the algorithm used for decompression. \n + +*@par Outputs: +*image: A Tensor dtype of uint8. +*/ +REG_OP(DecodeAndCropJpeg) + .INPUT(contents, TensorType({DT_STRING})) + .INPUT(crop_window, TensorType({DT_INT32})) + .OUTPUT(image, TensorType({DT_UINT8})) + .ATTR(channels, Int, 0) + .ATTR(ratio, Int, 1) + .ATTR(fancy_upscaling, Bool, true) + .ATTR(try_recover_truncated, Bool, false) + .ATTR(acceptable_fraction, Float, 1.0) + .ATTR(dct_method, String, "") + .OP_END_FACTORY_REG(DecodeAndCropJpeg) + /** *@brief Resizes "images" to "size" using bilinear interpolation . \n @@ -1317,6 +1484,55 @@ REG_OP(CombinedNonMaxSuppression) .OP_END_FACTORY_REG(CombinedNonMaxSuppression) /** +*@brief Resizes "images" with "offset" using bilinear interpolation. \n + +*@par Inputs: +*@li img: input image, A 4-D tensor of shape `[n, h, w, c]`. +*@li warp_offset: the resize offset A 4-D float tensor of shape `[n, h, w, 2]`, 2 means (x, y) for offset point. + +*@par Outputs: +*warp_img: A Tensor after resize. \n +*/ +REG_OP(IMGWarp) + .INPUT(img, TensorType({DT_UINT8, DT_FLOAT16, DT_FLOAT32})) + .INPUT(warp_offset, TensorType({DT_FLOAT32})) + .OUTPUT(warp_img, TensorType({DT_UINT8, DT_FLOAT16, DT_FLOAT32})) + .OP_END_FACTORY_REG(IMGWarp) + +/** +*@brief Resizes "images" with "offset" using bilinear interpolation. \n + +*@par Inputs: +*@li img: input image, A 4-D tensor of shape `[n, h, w, c]`. +*@li map_offset: the resize offset A 4-D float tensor of shape `[n, h, w, 2]`, 2 means (x, y) for resize point. + +*@par Outputs: +*map_img: A Tensor after resize. \n +*/ +REG_OP(Remap) + .INPUT(img, TensorType({DT_UINT8, DT_FLOAT16, DT_FLOAT32})) + .INPUT(map_offset, TensorType({DT_FLOAT32})) + .OUTPUT(map_img, TensorType({DT_UINT8, DT_FLOAT16, DT_FLOAT32})) + .OP_END_FACTORY_REG(Remap) + +/** +*@brief Resizes "images" with "offset" using bilinear interpolation. \n + +*@par Inputs: +*@li img: input image, A 5-D tensor of shape `[n, 4, c, h, w]`, +and 4 mean input[(h_top, w_left), (h_top, w_right), (h_bottom, w_left), (h_bottom, w_right)]. +*@li warp_index: the resize offset A 4-D float tensor of shape `[n, 2, h, w]`, 2 means (x, y) for resize point. + +*@par Outputs: +*remap_img: A Tensor after ResizeBilinear, A 4-D tensor of shape `[n, c, h, w]`. \n +*/ +REG_OP(IMGWarpResize) + .INPUT(img, TensorType({DT_FLOAT32})) + .INPUT(warp_index, TensorType({DT_FLOAT32})) + .OUTPUT(warp_img, TensorType({DT_FLOAT32})) + .OP_END_FACTORY_REG(IMGWarpResize) + +/** *@brief Function spatial transformer . \n *@par Inputs: @@ -1342,6 +1558,383 @@ REG_OP(SpatialTransformerD) .ATTR(use_default_theta, ListBool, {}) .OP_END_FACTORY_REG(SpatialTransformerD) -} // namespace ge +/** +* @brief Resize the input tensor. \n +currently, only support resize image tensor using nearest neighbor and linear interpolation. + +* @par Inputs: +* Input x must be a 4-D tensor. Inputs include: \n +* @li x: A Tensor. Must be one of the following types: uint8, int8, int16, \n +int32, int64, float16, float, double. 4-D with shape [batch, height, width, channels] \n +or shape [batch, channels, height, width]. +* @li roi: A 1-D float Tensor. only takes effect when attr coordinate_transformation_mode \n +is "tf_crop_and_resize" +* @li scales: A 1-D float Tensor, the scale array along each dimension, Only one of \n +'scales' and 'sizes' can be specified. +* @li sizes: A 1-D int64 Tensor, The size of the output tensor. nly one of \n +'scales' and 'sizes' can be specified. If 'size' is specified, then set scales \n +to empty data (zero shape) in this operator's input list. + +* @par Attributes: +* @li coordinate_transformation_mode: String. Defaults to half_pixel. how to transform \n +the coordinate in the resized tensor to the coordinate in the original tensor. \n +other optional: pytorch_half_pixel, align_corners, asymmetric, tf_half_pixel_for_nn, \n +tf_crop_and_resize. +* @li cubic_coeff_a: Float. Defaults to -0.75, only used in cubic interpolation. \n +other optional: -0.5 +* @li exclude_outside: Int. Defaults to 0, If set to 1, the weight of sampling \n +locations outside the tensor will be set to 0 and the weight will be renormalized \n +so that their sum is 1.0. +* @li extrapolation_value: Float. Defaults to 0.0f. When coordinate_transformation_mode \n +is "tf_crop_and_resize" and x_original is outside the range [0, length_original - 1], \n +this value is used as the corresponding output value. +* @li mode: String. Defaults to nearest. Three interpolation modes: nearest (default), \n +linear and cubic. +* @li nearest_mode: String. Defaults to round_prefer_floor. Four modes: round_prefer_floor, \n +round_prefer_ceil, floor, ceil. Only used by nearest interpolation. + +* @par Outputs: +* y: A Tensor. Has the same type as x. + +* @attention Constraints: \n +* Input x must be a 4-D tensor. + +* @par Third-party framework compatibility +* Compatible with tensorflow ResizeNearestNeighborV2 operator. +*/ + +REG_OP(Resize) + .INPUT(x, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, + DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .INPUT(roi, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .INPUT(scales, TensorType({DT_FLOAT})) + .OPTIONAL_INPUT(sizes, TensorType({DT_INT64})) + .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, + DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .ATTR(coordinate_transformation_mode, String, "half_pixel") + .ATTR(cubic_coeff_a, Float, -0.75) + .ATTR(exclude_outside, Int, 0) + .ATTR(extrapolation_value, Float, 0) + .ATTR(mode, String, "nearest") + .ATTR(nearest_mode, String, "round_prefer_floor") + .OP_END_FACTORY_REG(Resize) + +/** +*@brief Function parse image from string to int. \n + +*@par Inputs: +*@li contents: A Tensor of type string. 0-D. The JPEG-encoded image. \n + +*@par Attributes: +*@li channels: An optional int. Defaults to 0. Number of color channels for the decoded image. +*@li ratio: An optional int. Defaults to 1. Downscaling ratio. +*@li fancy_upscaling: An optional bool. Defaults to True. If true use a slower but nicer upscaling of the chroma planes +*@li try_recover_truncated: An optional bool. Defaults to False. If true try to recover an image from truncated input. +*@li acceptable_fraction: An optional float. Defaults to 1. The minimum required fraction of lines before a truncated input is accepted. +*@li dct_method: An optional string. Defaults to "". string specifying a hint about the algorithm used for decompression. \n + +*@par Outputs: +*image: A Tensor dtype of uint8. +*/ +REG_OP(DecodeJpeg) + .INPUT(contents, TensorType({DT_STRING})) + .OUTPUT(image, TensorType({DT_UINT8})) + .ATTR(channels, Int, 0) + .ATTR(ratio, Int, 1) + .ATTR(fancy_upscaling, Bool, true) + .ATTR(try_recover_truncated, Bool, false) + .ATTR(acceptable_fraction, Float, 1.0) + .ATTR(dct_method, String, "") + .OP_END_FACTORY_REG(DecodeJpeg) + +/** +*@brief Image warping using per-pixel flow vectors. \n + +*@par Inputs: +*@li image: 4-D Tensor with shape `[batch, height, width, channels]`. +*@li flow: 4-D Tensor with shape `[batch, height, width, 2]`. \n + +*@par Outputs: +*y: Returns 4-D with the same shape and dtype as `image`. \n +*/ +REG_OP(DenseImageWarp) + .INPUT(image, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(flow, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) + .OP_END_FACTORY_REG(DenseImageWarp) + +/** +*@brief Calculate the resize_d function. \n + +*@par Inputs: +*One inputs, including: +* @li x: A tensor. Must be one of the following types: +* float16, float32. \n + +*@par Attributes: +*@li sizes: An optional listInt. \n +*@li scales: An optional listFloat. + Defaults to none. \n +*@li roi: An optional listInt. + Defaults to none. \n +*@li coordinate_transformation_mode: An optional String. + Defaults to "half_pixel". \n +*@li cubic_coeff_a: An optional float. + Defaults to -0.75. \n +*@li exclude_outside: An optional int. + Defaults to 0. \n +*@li extrapolation_value: An optional float. + Defaults to 0.0. \n +*@li mode: An optional String. + Defaults to "nearest". \n +*@li nearest_mode: An optional String. + Defaults to "round_prefer_floor". \n + +*@par Outputs: +*y: A Tensor with the same type of x's, + shape depends on x and sizes. \n +*/ +REG_OP(ResizeD) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .REQUIRED_ATTR(sizes, ListInt) + .ATTR(scales, ListFloat, {}) + .ATTR(roi, ListInt, {}) + .ATTR(coordinate_transformation_mode, String, "half_pixel") + .ATTR(cubic_coeff_a, Float, -0.75) + .ATTR(exclude_outside, Int, 0) + .ATTR(extrapolation_value, Float, 0.0) + .ATTR(mode, String, "nearest") + .ATTR(nearest_mode, String, "round_prefer_floor") + .OP_END_FACTORY_REG(ResizeD) + +/** +*@brief Calculate the resize_grad_d function. \n + +*@par Inputs: +*One inputs, including: +* @li grads: A tensor. Must be one of the following types: +* float16, float32. \n + +*@par Attributes: +*@li original_size: An optional listInt. \n +*@li roi: An optional listInt. + Defaults to none. \n +*@li scales: An optional listFloat. + Defaults to none. \n +*@li coordinate_transformation_mode: An optional String. + Defaults to "half_pixel". \n +*@li cubic_coeff_a: An optional float. + Defaults to -0.75. \n +*@li exclude_outside: An optional int. + Defaults to 0. \n +*@li extrapolation_value: An optional float. + Defaults to 0.0. \n +*@li mode: An optional String. + Defaults to "nearest". \n +*@li nearest_mode: An optional String. + Defaults to "round_prefer_floor". \n + +*@par Outputs: +*y: A Tensor with the same type of x's, + shape depends on x and sizes. \n +*/ +REG_OP(ResizeGradD) + .INPUT(grads, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .REQUIRED_ATTR(original_size, ListInt) + .ATTR(roi, ListInt, {}) + .ATTR(scales, ListFloat, {}) + .ATTR(coordinate_transformation_mode, String, "half_pixel") + .ATTR(cubic_coeff_a, Float, -0.75) + .ATTR(exclude_outside, Int, 0) + .ATTR(extrapolation_value, Float, 0.0) + .ATTR(mode, String, "nearest") + .ATTR(nearest_mode, String, "round_prefer_floor") + .OP_END_FACTORY_REG(ResizeGradD) + +/** +*@brief Computes the gradients of DenseImageWarp with respect to image and flow. \n + +*@par Inputs: +*@li grad: gradients with respect to DenseImageWarp output. +*@li image: 4-D Tensor with shape `[batch, height, width, channels]`. +*@li flow: 4-D Tensor with shape `[batch, height, width, 2]`. \n + +*@par Outputs: +*grad_image: Returns 4-D with the same shape and dtype as `image`. +*grad_flow: Returns 4-D with the same shape and dtype as `flow`. \n +*/ +REG_OP(DenseImageWarpGrad) + .INPUT(grad, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(image, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(flow, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(grad_image, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(grad_flow, TensorType({DT_FLOAT, DT_FLOAT16})) + .OP_END_FACTORY_REG(DenseImageWarpGrad) + +/** +*@brief This operation samples input X by using interpolation based on flow field grid, + which is usually gennerated by affine_grid. The grid of shape [N, H, W, 2] is the concatenation of + (x, y) coordinates with shape [N, H, W] each, where x is indexing the 4th dimension (in width dimension) of + input data x and y is indexng the 3rd dimention (in height dimension), finally results is + the interpolation value of 4 nearest corner points. The output tensor shape will be [N, C, H, W]. + +*@par Inputs: +*@li x: 4-D Tensor with shape `[batch, channels, height, width]`. +*@li grid: flow field grid, 4-D Tensor with shape `[batch, height, width, 2]`. + +*@par Attributes: +*@li interpolation_mode: An optional string specifying the interpolation method. Only 'bilinear' is + supported for now . +*@li padding_mode: An optional string specifying the pad method. Only 'zeros' is supported for now . +*@li align_corners: An optional bool. If "true", the centers of the corner + pixels of the input and output tensors are aligned. Defaults to "false" . + +*@par Outputs: +*y: Returns 4-D Tensor with the same dtype as `X`. + +*@par Third-party framework compatibility +*Compatible with pytorch GridSampler2D operator. + +*@par Restrictions: +*Warning:THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(GridSampler2D) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(grid, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(interpolation_mode, String, "bilinear") + .ATTR(padding_mode, String, "zeros") + .ATTR(align_corners, Bool, false) + .OP_END_FACTORY_REG(GridSampler2D) + +/** +*@brief This operation unnormalize input Grid, which is usually gennerated by affine_grid. + +*@par Inputs: +*@li grid: flow field grid, 4-D Tensor with shape `[batch, height, width, 2]`. +*@li assist: Assist matrix, a 4-D tensor of type float16. + +*@par Attributes: +*@li align_corners: An optional bool. If "true", the centers of the corner + pixels of the input and output tensors are aligned. Defaults to "false" . + +*@par Outputs: +*diff: Returns 4-D Tensor with the same shape and dtype as `grid`. +*position: Returns 4-D Tensor with the same shape as `grid`. +*/ +REG_OP(GridUnnormal) + .INPUT(grid, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(assist, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(diff, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(position, TensorType({DT_INT32})) + .ATTR(align_corners, Bool, false) + .OP_END_FACTORY_REG(GridUnnormal) + +/** +*@brief This operation unfold input X based on unnormalized grid, which is gennerated by GridUnnormal. + +*@par Inputs: +*@li x: 4-D Tensor with shape `[batch, channels, height, width]`. +*@li position: 4-D Tensor with shape `[batch, output_height, output_width, 2]`. + +*@par Attributes: +*@li padding_mode: An optional string specifying the pad method. Only 'zeros' is supported for now . + +*@par Outputs: +*y: Returns 4-D Tensor with the same dtype as `x`. +*/ +REG_OP(ImageUnfold) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(position, TensorType({DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(padding_mode, String, "zeros") + .OP_END_FACTORY_REG(ImageUnfold) + +/** +*@brief This operation select images to warp_images according to offsets. + +*@par Inputs: +*@li images: 4-D Tensor with shape `[batch, height, width, 3]`. +*@li offsets: 4-D Tensor with shape `[batch, 4, new_height, new_width]`. + +*@par Outputs: +*warp_images: Returns 5-D Tensor with shape +`[batch, 4, new_height, new_width, 3]` and the same dtype as `images`. +*/ +REG_OP(IMGWarpOffsets) + .INPUT(images, TensorType({DT_UINT8, DT_FLOAT16, DT_FLOAT})) + .INPUT(offsets, TensorType({DT_FLOAT, DT_INT32})) + .OUTPUT(warp_images, TensorType({DT_UINT8, DT_FLOAT16, DT_FLOAT})) + .OP_END_FACTORY_REG(IMGWarpOffsets) + +/** +*@brief This operation samples 3d input x by using interpolation based on flow field grid, + which is usually gennerated by affine_grid. + +*@par Inputs: +*@li x: 5-D Tensor with shape `[batch, channels, depth, height, width]`. +*@li grid: flow field grid, 5-D Tensor with shape `[batch, depth, height, width, 2]`. + +*@par Attributes: +*@li interpolation_mode: An optional string specifying the interpolation method. +*@li padding_mode: An optional string specifying the pad method. +*@li align_corners: An optional bool. If "true", the centers of the corner + pixels of the input and output tensors are aligned. Defaults to "false" . + +*@par Outputs: +*y: Returns 5-D Tensor with the same dtype as `x`. + +*@par Third-party framework compatibility +*Compatible with pytorch GridSampler3D operator. + +*@par Restrictions: +*Warning:THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(GridSampler3D) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .INPUT(grid, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .ATTR(interpolation_mode, String, "bilinear") + .ATTR(padding_mode, String, "zeros") + .ATTR(align_corners, Bool, false) + .OP_END_FACTORY_REG(GridSampler3D) +/** +*@brief Computes the gradients of GridSampler3D. + +*@par Inputs: +*@li grad: 5-D Tensor with shape `[batch, channels, depth, height, width]`. +*@li x: 5-D Tensor with shape `[batch, channels, depth, height, width]`. +*@li grid: flow field grid, 5-D Tensor with shape `[batch, depth, height, width, 2]`. + +*@par Attributes: +*@li interpolation_mode: An optional string specifying the interpolation method. +*@li padding_mode: An optional string specifying the pad method. +*@li align_corners: An optional bool. If "true", the centers of the corner + pixels of the input and output tensors are aligned. Defaults to "false" . + +*@par Outputs: +*dx: Returns 5-D Tensor with the same dtype and shape as `x`. +*dgrid: Returns 5-D Tensor with the same dtype and shape as `grid`. + +*@par Third-party framework compatibility +*Compatible with pytorch GridSampler3DGrad operator. + +*@par Restrictions: +*Warning:THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(GridSampler3DGrad) + .INPUT(grad, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .INPUT(grid, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .OUTPUT(dx, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .OUTPUT(dgrid, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .ATTR(interpolation_mode, String, "bilinear") + .ATTR(padding_mode, String, "zeros") + .ATTR(align_corners, Bool, false) + .OP_END_FACTORY_REG(GridSampler3DGrad) + +} // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_IMAGE_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/internal_ops.h b/third_party/fwkacllib/inc/ops/internal_ops.h index 9dde14a5..bcc3f1c3 100644 --- a/third_party/fwkacllib/inc/ops/internal_ops.h +++ b/third_party/fwkacllib/inc/ops/internal_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/linalg_ops.h b/third_party/fwkacllib/inc/ops/linalg_ops.h index 7a6fbc59..69c77bf6 100644 --- a/third_party/fwkacllib/inc/ops/linalg_ops.h +++ b/third_party/fwkacllib/inc/ops/linalg_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -61,8 +61,8 @@ REG_OP(CholeskyGrad) *@par Inputs: *The input x has to be symmetric and positive definite.Inputs include: -*x:A Tensor. Must be one of the following types: double, float32. Shape -is [..., M, M] . \n +*x:A Tensor. Must be one of the following types: double, float32, float16, +complex64, complex128. Shape is [..., M, M] . \n *@par Outputs: *y:A Tensor. Has the same type as x . \n @@ -76,19 +76,40 @@ form square matrices. */ REG_OP(Cholesky) - .INPUT(x, TensorType({DT_FLOAT, DT_DOUBLE})) - .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(x, TensorType({DT_FLOAT, DT_DOUBLE, \ + DT_FLOAT16, DT_COMPLEX64, DT_COMPLEX128})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, \ + DT_FLOAT16, DT_COMPLEX64, DT_COMPLEX128})) .OP_END_FACTORY_REG(Cholesky) /** +*@brief Computes the outer product of two 1D vectors . \n + +*@par Inputs: +*The input x1 and x2 has to be a 1D vector.Inputs include: +*@li x1:A Tensor. Must be one of the following types: float16, float32. +Shape is [N] . \n +*@li x2:A Tensor. Must have the same type as x. Shape is [M] . \n + +*@par Outputs: +*y:A Tensor. Has the same type as x . \n +*/ + +REG_OP(Ger) + .INPUT(x1, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(x2, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .OP_END_FACTORY_REG(Ger) + +/** *@brief Computes the sign and the log of the absolute value of the determinant of one or more square matrices . \n *@par Inputs: *The input x is a tensor of shape [N, M, M] whose inner-most 2 dimensions form square matrices. Inputs include: -*x:A Tensor. Must be one of the following types: double, float32. Shape is -[..., M, M] . \n +*x:A Tensor. Must be one of the following types: double, float32, +complex64, complex128. Shape is [..., M, M] . \n *@par Outputs: *@li y:A Tensor. Has the same type as x. @@ -103,9 +124,9 @@ form square matrices. \n */ REG_OP(LogMatrixDeterminant) - .INPUT(x, TensorType({DT_FLOAT, DT_DOUBLE})) - .OUTPUT(sign, TensorType({DT_FLOAT, DT_DOUBLE})) - .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(x, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) + .OUTPUT(sign, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) .OP_END_FACTORY_REG(LogMatrixDeterminant) /** @@ -114,8 +135,8 @@ REG_OP(LogMatrixDeterminant) *@par Inputs: *The input x is a tensor of shape [N, M, M] whose inner-most 2 dimensions form square matrices. Inputs include: -*x:A Tensor. Must be one of the following types: double, float32. Shape is -[..., M, M] . \n +*x:A Tensor. Must be one of the following types: double, float32, complex64, +complex128. Shape is [..., M, M] . \n *@par Outputs: *y:A Tensor. Has the same type as x . \n @@ -129,8 +150,8 @@ form square matrices. */ REG_OP(MatrixDeterminant) - .INPUT(x, TensorType({DT_FLOAT, DT_DOUBLE})) - .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(x, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) .OP_END_FACTORY_REG(MatrixDeterminant) /** @@ -140,8 +161,7 @@ their adjoints (conjugate transposes) . \n *@par Inputs: *The input x is a tensor of shape [..., M, M] whose inner-most 2 dimensions form square matrices. Inputs include: -*x:A Tensor. Must be one of the following types: double, float. Shape is -[..., M, M] . \n +*x:A Tensor of input. Shape is [..., M, M] . \n *@par Attributes: *adjoint:An optional bool. Defaults to False.Boolean indicating whether to @@ -159,8 +179,8 @@ form square matrices. \n */ REG_OP(MatrixInverse) - .INPUT(x, TensorType({DT_FLOAT, DT_DOUBLE})) - .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(x, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) .ATTR(adjoint, Bool, false) .OP_END_FACTORY_REG(MatrixInverse) @@ -169,8 +189,7 @@ REG_OP(MatrixInverse) *@par Inputs: *The input rhs must have the same type as matrix. Inputs include: -*@li matrix:A Tensor. Must be one of the following types: double, float. -Shape is [..., M, M]. +*@li matrix:A Tensor of input. Shape is [..., M, M]. *@li rhs:A Tensor. Must have the same type as matrix. Shape is [..., M, K] . \n *@par Attributes: @@ -189,9 +208,9 @@ dimensions form square matrices. \n */ REG_OP(MatrixSolve) - .INPUT(matrix, TensorType({DT_FLOAT, DT_DOUBLE})) - .INPUT(rhs, TensorType({DT_FLOAT, DT_DOUBLE})) - .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(matrix, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) + .INPUT(rhs, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) .ATTR(adjoint, Bool, false) .OP_END_FACTORY_REG(MatrixSolve) @@ -221,8 +240,8 @@ dimensions form square matrices. \n */ REG_OP(MatrixSolveLs) - .INPUT(matrix, TensorType({DT_FLOAT, DT_DOUBLE})) - .INPUT(rhs, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(matrix, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) + .INPUT(rhs, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) .INPUT(l2, TensorType({DT_DOUBLE})) .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE})) .ATTR(fast, Bool, true) @@ -234,8 +253,7 @@ matrices by backsubstitution . \n *@par Inputs: *The input rhs must have the same type as matrix. Inputs include: -*@li matrix: A Tensor. Must be one of the following types: double, float. -Shape is [..., M, M]. +*@li matrix: A Tensor. Shape is [..., M, M]. *@li rhs:A Tensor. Must have the same type as matrix. Shape is [..., M, K] . \n *@par Attributes: @@ -256,9 +274,9 @@ dimensions form square matrices. \n */ REG_OP(MatrixTriangularSolve) - .INPUT(matrix, TensorType({DT_FLOAT, DT_DOUBLE})) - .INPUT(rhs, TensorType({DT_FLOAT, DT_DOUBLE})) - .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(matrix, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) + .INPUT(rhs, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) .ATTR(lower, Bool, true) .ATTR(adjoint, Bool, false) .OP_END_FACTORY_REG(MatrixTriangularSolve) @@ -268,8 +286,7 @@ REG_OP(MatrixTriangularSolve) *@par Inputs: *The input shape of x must be [..., M, N]. Inputs include: -*x:A Tensor whose shape is [..., M, N]. Must be one of the following types: -double, float . \n +*x:A Tensor whose shape is [..., M, N]. \n *@par Attributes: *full_matrices: An optional bool. Defaults to False. If true, compute @@ -289,9 +306,12 @@ dimensions form matrices of size [M, N]. \n */ REG_OP(Qr) - .INPUT(x, TensorType({ DT_FLOAT16, DT_FLOAT, DT_DOUBLE })) - .OUTPUT(q, TensorType({ DT_FLOAT16, DT_FLOAT, DT_DOUBLE })) - .OUTPUT(r, TensorType({ DT_FLOAT16, DT_FLOAT, DT_DOUBLE })) + .INPUT(x, TensorType({ DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \ + DT_COMPLEX64, DT_COMPLEX128 })) + .OUTPUT(q, TensorType({ DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \ + DT_COMPLEX64, DT_COMPLEX128 })) + .OUTPUT(r, TensorType({ DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \ + DT_COMPLEX64, DT_COMPLEX128 })) .ATTR(full_matrices, Bool, false) .OP_END_FACTORY_REG(Qr) @@ -320,13 +340,41 @@ form square matrices. \n */ REG_OP(SelfAdjointEig) - .INPUT(x, TensorType({ DT_DOUBLE, DT_FLOAT })) - .OUTPUT(eigen_value, TensorType({ DT_DOUBLE, DT_FLOAT })) - .OUTPUT(eigen_vector, TensorType({ DT_DOUBLE, DT_FLOAT })) + .INPUT(x, TensorType({ DT_DOUBLE, DT_FLOAT, DT_COMPLEX64, DT_COMPLEX128 })) + .OUTPUT(eigen_value, TensorType({ DT_DOUBLE, DT_FLOAT, DT_COMPLEX64, DT_COMPLEX128 })) + .OUTPUT(eigen_vector, TensorType({ DT_DOUBLE, DT_FLOAT, DT_COMPLEX64, DT_COMPLEX128 })) .ATTR(compute_v, Bool, true) .OP_END_FACTORY_REG(SelfAdjointEig) /** +*@brief Computes the sign and the log of the absolute value of the determinant +of one or more square matrices . \n + +*@par Inputs: +*The input x is a tensor of shape [N, M, M] whose inner-most 2 dimensions +form square matrices. Inputs include: +*x:A Tensor. Must be one of the following types: double, float32, float16 +Shape is [..., M, M] . \n + +*@par Outputs: +*@li y:A Tensor. Has the same type as x. +*@li sign:A Tensor. Has the same type as x . \n + +*@attention Constraints: +*The input x is a tensor of shape [N, M, M] whose inner-most 2 dimensions +form square matrices. \n + +*@par Third-party framework compatibility +*Compatible with tensorflow LogMatrixDeterminant operator. +*/ + +REG_OP(Slogdet) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .OUTPUT(sign, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .OP_END_FACTORY_REG(Slogdet) + +/** *@brief Computes the singular value decompositions of one or more matrices . \n *@par Inputs: @@ -384,8 +432,8 @@ of the rows encoded as a list of indices in `0..M-1`. Shape is `[..., M]` . \n */ REG_OP(Lu) - .INPUT(input, TensorType({DT_FLOAT, DT_DOUBLE})) - .OUTPUT(lu, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(input, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) + .OUTPUT(lu, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) .OUTPUT(p, TensorType({DT_INT32, DT_INT64})) .REQUIRED_ATTR(output_idx_type, Type) .OP_END_FACTORY_REG(Lu) @@ -404,8 +452,8 @@ y: Shape is `[..., M, M]` . \n */ REG_OP(MatrixSquareRoot) - .INPUT(input, TensorType({DT_FLOAT, DT_DOUBLE})) - .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(input, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) .OP_END_FACTORY_REG(MatrixSquareRoot) /** @@ -424,9 +472,9 @@ y: Tensor of shape `[..., M, K]` containing the solutions \n */ REG_OP(TridiagonalSolve) - .INPUT(diagonals, TensorType({DT_FLOAT, DT_DOUBLE})) - .INPUT(rhs, TensorType({DT_FLOAT, DT_DOUBLE})) - .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(diagonals, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) + .INPUT(rhs, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) .ATTR(partial_pivoting, Bool, true) .OP_END_FACTORY_REG(TridiagonalSolve) diff --git a/third_party/fwkacllib/inc/ops/list_ops.h b/third_party/fwkacllib/inc/ops/list_ops.h new file mode 100644 index 00000000..a1b622e9 --- /dev/null +++ b/third_party/fwkacllib/inc/ops/list_ops.h @@ -0,0 +1,504 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! + * \file list_ops.h + * \brief + */ +#ifndef OPS_BUILT_IN_OP_PROTO_INC_LIST_OPS_H_ +#define OPS_BUILT_IN_OP_PROTO_INC_LIST_OPS_H_ + +#include +#include "graph/operator_reg.h" +#include "graph/operator.h" + +namespace ge { + +/** +*@brief Creates and returns an empty tensor list. \n + +*@par Inputs: +*@li element_shape: A shape compatible with that of elements in the list. +*@li max_num_elements: The maximum number of elements. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. \n + +*@par Outputs: +*@li handle: An empty tensor list . \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow EmptyTensorList operator. +*/ +REG_OP(EmptyTensorList) + .INPUT(element_shape, TensorType({DT_INT32,DT_INT64})) + .INPUT(max_num_elements, TensorType({DT_INT32})) + .OUTPUT(handle, TensorType({DT_VARIANT})) + .ATTR(element_dtype, Type, DT_INT32) + .OP_END_FACTORY_REG(EmptyTensorList) + +/** +*@brief Returns a list which has the passed-in `Tensor` as last element +and the other elements of the given list in `input_handle`. \n + +*@par Inputs: +*@li input_handle: The old list. +*@li tensor: The tensor to put on the list. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. \n + +*@par Outputs: +*@li output_handle:A list with the elements of old list followed by tensor. \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListPushBack operator. +*/ +REG_OP(TensorListPushBack) + .INPUT(input_handle, TensorType({DT_VARIANT})) + .INPUT(tensor, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,DT_INT8, + DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_QINT8,DT_QUINT8, + DT_QINT16,DT_QUINT16,DT_QINT32,DT_BOOL,DT_RESOURCE, + DT_STRING,DT_COMPLEX64,DT_COMPLEX128})) + .OUTPUT(output_handle, TensorType({DT_VARIANT})) + .ATTR(element_dtype, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListPushBack) + +/** +*@brief The last element of the input list as well as a +list with all but that element. \n + +*@par Inputs: +*@li input_handle: The input list. +*@li element_shape: A shape compatible with that of elements in the list. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. \n + +*@par Outputs: +*@li output_handle:A list with the elements of the old list followed by tensor. +*@li tensor:The withdrawn last element of the list. \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListPopBack operator. +*/ +REG_OP(TensorListPopBack) + .INPUT(input_handle, TensorType({DT_VARIANT})) + .INPUT(element_shape, TensorType({DT_INT32})) + .OUTPUT(output_handle, TensorType({DT_VARIANT})) + .OUTPUT(tensor, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,DT_INT8, + DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_QINT8,DT_QUINT8, + DT_QINT16,DT_QUINT16,DT_QINT32,DT_BOOL,DT_RESOURCE, + DT_STRING,DT_COMPLEX64,DT_COMPLEX128})) + .ATTR(element_dtype, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListPopBack) + +/** +*@brief The number of tensors in the input tensor list. \n + +*@par Inputs: +*@li input_handle: The input list. \n + +*@par Outputs: +*@li length:The number of tensors in the list. \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListLength operator. +*/ +REG_OP(TensorListLength) + .INPUT(input_handle, TensorType({DT_VARIANT})) + .OUTPUT(length, TensorType({DT_INT32})) + .OP_END_FACTORY_REG(TensorListLength) + +/** +*@brief The shape of elements in the input tensor list. \n + +*@par Inputs: +*@li input_handle: The input list. \n + +*@par Attributes: +*@li shape_type: The type of shape in the list. \n + +*@par Outputs: +*@li element_shape:A shape compatible with that of elements in the list. \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListElementShape operator. +*/ +REG_OP(TensorListElementShape) + .INPUT(input_handle, TensorType({DT_VARIANT})) + .OUTPUT(element_shape, TensorType({DT_INT32,DT_INT64})) + .ATTR(shape_type, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListElementShape) + +/** +*@brief List of the given size with empty elements. \n + +*@par Inputs: +*@li element_shape: A shape compatible with that of elements in the list. +*@li num_elements: The number of elements to reserve. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. +*@li shape_type: The type of shape in the list. \n + +*@par Outputs: +*@li handle: An output tensor list . \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListReserve operator. +*/ +REG_OP(TensorListReserve) + .INPUT(element_shape, TensorType({DT_INT32,DT_INT64})) + .INPUT(num_elements, TensorType({DT_INT32})) + .OUTPUT(handle, TensorType({DT_VARIANT})) + .ATTR(element_dtype, Type, DT_INT32) + .ATTR(shape_type, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListReserve) + +/** +*@brief Get input tensor list elements of index position. \n + +*@par Inputs: +*@li input_handle: The input list. +*@li index: A tensor of position. +*@li element_shape: A shape compatible with that of elements in the list. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. \n + +*@par Outputs: +*@li item: An output tensor value of index position . \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListGetItem operator. +*/ +REG_OP(TensorListGetItem) + .INPUT(input_handle, TensorType({DT_VARIANT})) + .INPUT(index, TensorType({DT_INT32})) + .INPUT(element_shape, TensorType({DT_INT32})) + .OUTPUT(item, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,DT_INT8, + DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_QINT8,DT_QUINT8, + DT_QINT16,DT_QUINT16,DT_QINT32,DT_BOOL, + DT_STRING,DT_COMPLEX64,DT_COMPLEX128})) + .ATTR(element_dtype, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListGetItem) + +/** +*@brief Sets the index-th position of the list to contain the given tensor. \n + +*@par Inputs: +*@li input_handle: The input list. +*@li index: The position in the list to which the tensor will be assigned. +*@li item: The element to be assigned to that position. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. \n + +*@par Outputs: +*@li output_handle: An output tensor list . \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListSetItem operator. +*/ +REG_OP(TensorListSetItem) + .INPUT(input_handle, TensorType({DT_VARIANT})) + .INPUT(index, TensorType({DT_INT32})) + .INPUT(item, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,DT_INT8, + DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_QINT8,DT_QUINT8, + DT_QINT16,DT_QUINT16,DT_QINT32,DT_BOOL,DT_RESOURCE, + DT_STRING,DT_COMPLEX64,DT_COMPLEX128})) + .OUTPUT(output_handle, TensorType({DT_VARIANT})) + .ATTR(element_dtype, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListSetItem) + +/** +*@brief Push tensor to list. \n + +*@par Inputs: +*@li input_handles: The input tensor lists. +*@li tensor: The tensor push into tensor list. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. \n + +*@par Outputs: +*@li output_handles: The output tensor lists. \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListPushBackBatch operator. +*/ +REG_OP(TensorListPushBackBatch) + .INPUT(input_handles, TensorType({DT_VARIANT})) + .INPUT(tensor, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,DT_INT8, + DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_QINT8,DT_QUINT8, + DT_QINT16,DT_QUINT16,DT_QINT32,DT_BOOL, + DT_STRING,DT_COMPLEX64,DT_COMPLEX128})) + .OUTPUT(output_handles, TensorType({DT_VARIANT})) + .ATTR(element_dtype, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListPushBackBatch) + +/** +*@brief Stacks all tensors in the list. \n + +*@par Inputs: +*@li input_handle: The input tensor list. +*@li element_shape: A shape compatible with that of elements in the tensor. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. +*@li num_elements: The number of elements in the list. \n + +*@par Outputs: +*@li tensor: The tensor of list. \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListStack operator. +*/ +REG_OP(TensorListStack) + .INPUT(input_handle, TensorType({DT_VARIANT})) + .INPUT(element_shape, TensorType({DT_INT32})) + .OUTPUT(tensor, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,DT_INT8, + DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_QINT8,DT_QUINT8, + DT_QINT16,DT_QUINT16,DT_QINT32,DT_BOOL, + DT_STRING,DT_COMPLEX64,DT_COMPLEX128})) + .ATTR(element_dtype, Type, DT_INT32) + .ATTR(num_elements, Int, -1) + .OP_END_FACTORY_REG(TensorListStack) + +/** +*@brief Concats all tensors in the list along the 0th dimension. +Requires that all tensors have the same shape except the first dimension. \n + +*@par Inputs: +*@li input_handle: The input list. +*@li element_shape: The shape of the uninitialized elements in the list. +If the first dimension is not -1, it is assumed that all list elements have +the same leading dim. +*@li leading_dims: The list of leading dims of uninitialized list elements. Used if +the leading dim of input_handle.element_shape or the element_shape input arg +is not already set. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. \n + +*@par Outputs: +*@li tensor: The concated result. +*@li lengths: Output tensor containing sizes of the 0th dimension of tensors +in the list, used for computing the gradient. \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListConcatV2 operator. +*/ +REG_OP(TensorListConcatV2) + .INPUT(input_handle, TensorType({DT_VARIANT})) + .INPUT(element_shape, TensorType({DT_INT32,DT_INT64})) + .INPUT(leading_dims, TensorType({DT_INT64})) + .OUTPUT(tensor, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,DT_INT8, + DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_QINT8,DT_QUINT8, + DT_QINT16,DT_QUINT16,DT_QINT32,DT_BOOL, + DT_STRING,DT_COMPLEX64,DT_COMPLEX128})) + .OUTPUT(lengths, TensorType({DT_INT64})) + .ATTR(element_dtype, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListConcatV2) + +/** +*@brief Splits a tensor into a list. \n + +*@par Inputs: +*@li tensor: The input tensor. +*@li element_shape: A shape compatible with that of elements in the tensor. +*@li lengths: Vector of sizes of the 0th dimension of tensors in the list. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. \n + +*@par Outputs: +*@li output_handle: The list. \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListSplit operator. +*/ +REG_OP(TensorListSplit) + .INPUT(tensor, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,DT_INT8, + DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_QINT8,DT_QUINT8, + DT_QINT16,DT_QUINT16,DT_QINT32,DT_BOOL, + DT_STRING,DT_COMPLEX64,DT_COMPLEX128})) + .INPUT(element_shape, TensorType({DT_INT32,DT_INT64})) + .INPUT(lengths, TensorType({DT_INT64})) + .OUTPUT(output_handle, TensorType({DT_VARIANT})) + .ATTR(element_dtype, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListSplit) + +/** +*@brief Creates a TensorList which, when stacked, has the value of `tensor`. \n + +*@par Inputs: +*@li tensor: The input tensor. +*@li element_shape: The shape of elements in the list. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. \n + +*@par Outputs: +*@li output_handle: An output tensor list . \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListFromTensor operator. +*/ +REG_OP(TensorListFromTensor) + .INPUT(tensor, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,DT_INT8, + DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_QINT8,DT_QUINT8, + DT_QINT16,DT_QUINT16,DT_QINT32,DT_BOOL, + DT_STRING,DT_COMPLEX64,DT_COMPLEX128})) + .INPUT(element_shape, TensorType({DT_INT32,DT_INT64})) + .OUTPUT(output_handle, TensorType({DT_VARIANT})) + .ATTR(element_dtype, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListFromTensor) + +/** +*@brief Resizes the list. \n + +*@par Inputs: +*@li input_handle: The input tensor list. +*@li size: size of the output list. \n + +*@par Outputs: +*@li output_handle: The output tensor list. \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListResize operator. +*/ +REG_OP(TensorListResize) + .INPUT(input_handle, TensorType({DT_VARIANT})) + .INPUT(size, TensorType({DT_INT32})) + .OUTPUT(output_handle, TensorType({DT_VARIANT})) + .OP_END_FACTORY_REG(TensorListResize) + +/** +*@brief Creates a Tensor by indexing into the TensorList. \n + +*@par Inputs: +*@li input_handle: The input tensor list. +*@li indices: The indices used to index into the list. +*@li element_shape: The shape of elements in the list. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. \n + +*@par Outputs: +*@li values: The tensor. \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListGather operator. +*/ +REG_OP(TensorListGather) + .INPUT(input_handle, TensorType({DT_VARIANT})) + .INPUT(indices, TensorType({DT_INT32})) + .INPUT(element_shape, TensorType({DT_INT32})) + .OUTPUT(values, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,DT_INT8, + DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_QINT8,DT_QUINT8, + DT_QINT16,DT_QUINT16,DT_QINT32,DT_BOOL, + DT_STRING,DT_COMPLEX64,DT_COMPLEX128})) + .ATTR(element_dtype, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListGather) + +/** +*@brief Creates a TensorList by indexing into a Tensor. \n + +*@par Inputs: +*@li tensor: The input tensor. +*@li indices: The indices used to index into the list. +*@li element_shape: The shape of the elements in the list (can be less specified than +the shape of the tensor). +*@li num_elements: The size of the output list. Must be large enough to accommodate +the largest index in indices. If -1, the list is just large enough to include +the largest index in indices. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. \n + +*@par Outputs: +*@li output_handle: The TensorList. \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListScatterV2 operator. +*/ +REG_OP(TensorListScatterV2) + .INPUT(tensor, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,DT_INT8, + DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_QINT8,DT_QUINT8, + DT_QINT16,DT_QUINT16,DT_QINT32,DT_BOOL, + DT_STRING,DT_COMPLEX64,DT_COMPLEX128})) + .INPUT(indices, TensorType({DT_INT32})) + .INPUT(element_shape, TensorType({DT_INT32,DT_INT64})) + .INPUT(num_elements, TensorType({DT_INT32})) + .OUTPUT(output_handle, TensorType({DT_VARIANT})) + .ATTR(element_dtype, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListScatterV2) + +/** +*@brief Scatters tensor at indices in an input list. \n + +*@par Inputs: +*@li input_handle: The input tensor list. +*@li tensor: The input tensor. +*@li indices: The indices used to index into the list. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. \n + +*@par Outputs: +*@li output_handle: The TensorList. \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListScatterIntoExistingList operator. +*/ +REG_OP(TensorListScatterIntoExistingList) + .INPUT(input_handle, TensorType({DT_VARIANT})) + .INPUT(tensor, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,DT_INT8, + DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_QINT8,DT_QUINT8, + DT_QINT16,DT_QUINT16,DT_QINT32,DT_BOOL, + DT_STRING,DT_COMPLEX64,DT_COMPLEX128})) + .INPUT(indices, TensorType({DT_INT32})) + .OUTPUT(output_handle, TensorType({DT_VARIANT})) + .ATTR(element_dtype, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListScatterIntoExistingList) + +/** +*@brief Concat two tensor lists to a new tensor list. \n + +*@par Inputs: +*@li input_a: The input tensor list A. +*@li input_b: The input tensor list B. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. \n + +*@par Outputs: +*@li output: The output list. \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListConcatLists operator. +*/ +REG_OP(TensorListConcatLists) + .INPUT(input_a, TensorType({DT_VARIANT})) + .INPUT(input_b, TensorType({DT_VARIANT})) + .OUTPUT(output, TensorType({DT_VARIANT})) + .ATTR(element_dtype, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListConcatLists) +} // namespace ge + +#endif // OPS_BUILT_IN_OP_PROTO_INC_LIST_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/logging_ops.h b/third_party/fwkacllib/inc/ops/logging_ops.h index bc8ae2b8..03be7757 100644 --- a/third_party/fwkacllib/inc/ops/logging_ops.h +++ b/third_party/fwkacllib/inc/ops/logging_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/lookup_ops.h b/third_party/fwkacllib/inc/ops/lookup_ops.h index b37ab048..5d928e5a 100644 --- a/third_party/fwkacllib/inc/ops/lookup_ops.h +++ b/third_party/fwkacllib/inc/ops/lookup_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/math_ops.h b/third_party/fwkacllib/inc/ops/math_ops.h index 149e0e37..319bcf70 100644 --- a/third_party/fwkacllib/inc/ops/math_ops.h +++ b/third_party/fwkacllib/inc/ops/math_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -223,6 +223,24 @@ REG_OP(Bucketize) .OP_END_FACTORY_REG(Bucketize) /** +*@brief Returns a new tensor with the truncated integer values of the elements of input. \n + +*@par Inputs: +*One inputs, including: +* @li input_x: A tensor. Must be one of the following types: float16, float32, int8, uint8, int32. \n + +*@par Outputs: +*y: A tensor with the same type and shape of input_x \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator Trunc. \n +*/ +REG_OP(Trunc) + .INPUT(input_x, TensorType({DT_FLOAT16,DT_FLOAT, DT_INT8, DT_INT32, DT_UINT8})) + .OUTPUT(output_y, TensorType({DT_FLOAT16,DT_FLOAT, DT_INT8, DT_INT32, DT_UINT8})) + .OP_END_FACTORY_REG(Trunc) + +/** *@brief Computes the sum along sparse segments of a tensor . \n *@par Inputs: @@ -366,6 +384,27 @@ REG_OP(GetNext) .OP_END_FACTORY_REG(GetNext) /** +*@brief Get dynamic dims after GetNext. \n + +*@par Inputs: +*input: A nested structure of Tensor objects, from GetNext's output. \n + +*@par Attributes: +*@li shape_info: GE shape_info for each inputs, -1 means unknow dim. +*@li N: Inputs number. \n + +*@par Outputs: +*dims: GE unknow dims, a vector of int64. \n +*/ + +REG_OP(GetDynamicDims) + .DYNAMIC_INPUT(input, TensorType({DT_INT32, DT_INT64})) + .OUTPUT(dims, TensorType({DT_INT32, DT_INT64})) + .REQUIRED_ATTR(shape_info, ListInt) + .REQUIRED_ATTR(N, Int) + .OP_END_FACTORY_REG(GetDynamicDims) + +/** *@brief End of sequence . \n *@par Inputs: @@ -495,6 +534,29 @@ REG_OP(NextAfter) .OP_END_FACTORY_REG(NextAfter) /** +*@brief Calculate the P-norm distance between vectors function. \n + +*@par Inputs: +*One inputs, including: +* @li input_x: A tensor. Must be one of the following types: +* float16, float32. \n + +*@par Attributes: +*@li p: An optional float.Defaults to 2. \n + +*@par Outputs: +*y: A Tensor with the same type and shape of input_x's. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator Pdist. \n +*/ +REG_OP(Pdist) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(p, Float, 2.0) + .OP_END_FACTORY_REG(Pdist) + +/** *@brief Compute element-wise finiteness, return a boolean tensor. *@par Inputs: @@ -624,6 +686,7 @@ REG_OP(NLLLoss) .OUTPUT(y, TensorType({DT_FLOAT})) .OUTPUT(total_weight, TensorType({DT_FLOAT})) .ATTR(reduction, String, "mean") + .ATTR(ignore_index, Int, -100) .OP_END_FACTORY_REG(NLLLoss) /** @@ -653,6 +716,7 @@ REG_OP(NLLLossGrad) .INPUT(total_weight, TensorType({DT_FLOAT})) .OUTPUT(x_grad, TensorType({DT_FLOAT})) .ATTR(reduction, String, "mean") + .ATTR(ignore_index, Int, -100) .OP_END_FACTORY_REG(NLLLossGrad) /** @@ -710,6 +774,9 @@ REG_OP(IFMR) *@par Third-party framework compatibility *Compatible with mindspore + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(WtsARQ) @@ -741,6 +808,9 @@ REG_OP(WtsARQ) *@par Third-party framework compatibility *Compatible with mindspore + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(ActsULQ) @@ -748,8 +818,8 @@ REG_OP(ActsULQ) .INPUT(clamp_min, TensorType({DT_FLOAT16, DT_FLOAT})) .INPUT(clamp_max, TensorType({DT_FLOAT16, DT_FLOAT})) .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) - .OUTPUT(clamp_min_mask, TensorType({DT_BOOL})) - .OUTPUT(clamp_max_mask, TensorType({DT_BOOL})) + .OUTPUT(clamp_min_mask, TensorType({DT_BOOL, DT_FLOAT16, DT_FLOAT})) + .OUTPUT(clamp_max_mask, TensorType({DT_BOOL, DT_FLOAT16, DT_FLOAT})) .OUTPUT(x_clamped_loss, TensorType({DT_FLOAT16, DT_FLOAT})) .ATTR(fixed_min, Bool, false) .ATTR(num_bits, Int, 8) @@ -768,12 +838,15 @@ REG_OP(ActsULQ) *@par Third-party framework compatibility *Compatible with mindspore + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(ActsULQInputGrad) .INPUT(y_grad, TensorType({DT_FLOAT16, DT_FLOAT})) - .INPUT(clamp_min_mask, TensorType({DT_BOOL})) - .INPUT(clamp_max_mask, TensorType({DT_BOOL})) + .INPUT(clamp_min_mask, TensorType({DT_BOOL, DT_FLOAT16, DT_FLOAT})) + .INPUT(clamp_max_mask, TensorType({DT_BOOL, DT_FLOAT16, DT_FLOAT})) .OUTPUT(x_grad, TensorType({DT_FLOAT16, DT_FLOAT})) .OP_END_FACTORY_REG(ActsULQInputGrad) @@ -790,11 +863,14 @@ REG_OP(ActsULQInputGrad) *@par Third-party framework compatibility *Compatible with mindspore + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(ActULQClampMaxGrad) .INPUT(y_grad, TensorType({DT_FLOAT16, DT_FLOAT})) - .INPUT(clamp_max_mask, TensorType({DT_BOOL})) + .INPUT(clamp_max_mask, TensorType({DT_BOOL, DT_FLOAT16, DT_FLOAT})) .INPUT(x_clamped_loss, TensorType({DT_FLOAT16, DT_FLOAT})) .OUTPUT(clamp_max_grad, TensorType({DT_FLOAT16, DT_FLOAT})) .OP_END_FACTORY_REG(ActULQClampMaxGrad) @@ -812,15 +888,208 @@ REG_OP(ActULQClampMaxGrad) *@par Third-party framework compatibility *Compatible with mindspore + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(ActULQClampMinGrad) .INPUT(y_grad, TensorType({DT_FLOAT16, DT_FLOAT})) - .INPUT(clamp_min_mask, TensorType({DT_BOOL})) + .INPUT(clamp_min_mask, TensorType({DT_BOOL, DT_FLOAT16, DT_FLOAT})) .INPUT(x_clamped_loss, TensorType({DT_FLOAT16, DT_FLOAT})) .OUTPUT(clamp_min_grad, TensorType({DT_FLOAT16, DT_FLOAT})) .OP_END_FACTORY_REG(ActULQClampMinGrad) +/** +* @brief Computes Lp norm. + +* @par Inputs: +* @li x: An ND tensor of type float16, float32. \n +* +* @par Attributes: +* @li p: Int, "inf" or "-inf", default value is 2. +* @li axes: ListInt, {} means all axes will be computed. +* @li keepdim: Bool, default is false. +* @li epsilon: Float, default is 1e-12. \n + +* @par Outputs: +* @li y: An ND tensor of type float16, float32. The shape of y is depending +* on axes and keepdim. \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator LpNorm. +*/ +REG_OP(LpNorm) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(p, Int, 2) + .ATTR(axes, ListInt, {}) + .ATTR(keepdim, Bool, false) + .ATTR(epsilon, Float, 1e-12) + .OP_END_FACTORY_REG(LpNorm) + +/** +* @brief get complex. + +* @par Inputs: +* @li real: An ND tensor of type float32. double +* @li imag: An ND tensor of type float32. double \n +* +* @par Outputs: +* @li out: An ND tensor of type complex64, complex128 \n +*/ +REG_OP(Complex) + .INPUT(real, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(imag, TensorType({DT_FLOAT, DT_DOUBLE})) + .OUTPUT(out, TensorType({DT_COMPLEX64, DT_COMPLEX128})) + .ATTR(Tout, Type, DT_COMPLEX64) + .OP_END_FACTORY_REG(Complex) + +/** +* @brief deal complex. + +* @par Inputs: +* @li input: An ND tensor of type complex64, complex128 \n +* +* @par Outputs: +* @li output: An ND tensor of type float32. double \n +*/ +REG_OP(Imag) + .INPUT(input, TensorType({DT_COMPLEX64, DT_COMPLEX128})) + .OUTPUT(output, TensorType({DT_FLOAT, DT_DOUBLE})) + .ATTR(Tout, Type, DT_FLOAT) + .OP_END_FACTORY_REG(Imag) + +/** +* @brief deal complex. + +* @par Inputs: +* @li input: An ND tensor of type complex64, complex128 \n +* +* @par Outputs: +* @li output: An ND tensor of type float32. double \n +*/ +REG_OP(Angle) + .INPUT(input, TensorType({DT_COMPLEX64, DT_COMPLEX128})) + .OUTPUT(output, TensorType({DT_FLOAT, DT_DOUBLE})) + .ATTR(Tout, Type, DT_FLOAT) + .OP_END_FACTORY_REG(Angle) + +/** +*@brief Computes the gradient of SoftMarginLossGrad. \n + +*@par Inputs: +*Three inputs, including: +* @li predict: A tensor. Must be one of the following types: +* float16, float32. \n +* @li label: A tensor with same shape of predict. Must be one of the following types: +* float16, float32. \n +* @li dout: A tensor with same shpae of predcit. Must be one of the following types: +* float16, float32. \n + +*@par Attributes: +* @li reduction: Specifies the reduction to apply to the output: +* 'none' | 'mean' | 'sum'. Default: 'mean'. \n + +*@par Outputs: +* gradient: A Tensor with the same type of predict. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator SoftMarginLoss Backward. \n +*/ +REG_OP(SoftMarginLossGrad) + .INPUT(predict, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(label, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(dout, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(gradient, TensorType({DT_FLOAT16,DT_FLOAT})) + .ATTR(reduction, String, "mean") + .OP_END_FACTORY_REG(SoftMarginLossGrad) + +/** +*@brief Calculate the cross product of two tensors. \n + +*@par Inputs: +*One inputs, including: +* @li x1: A tensor. Must be one of the following types: +* float16, float32, int32, int8, uint8, int16. \n +* @li x2: A tensor. Must be one of the following types: +* float16, float32, int32, int8, uint8, int16. \n + +*@par Attributes: +*@li dim: the dimination of compute.Defaults to -65530. \n + +*@par Outputs: +*y: A Tensor with the same type and shape of x1's. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator cross. \n +*/ +REG_OP(Cross) + .INPUT(x1, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8, DT_INT16})) + .INPUT(x2, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8, DT_INT16})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8, DT_INT16})) + .ATTR(dim, Int, -65530) + .OP_END_FACTORY_REG(Cross) + +/** + *@brief Computes batched the p-norm distance between each pair of + *the two collections of row vectors. \n + + *@par Inputs: + *Two inputs, including: + * @li x1: A tensor with shpae: BxPXM. Must be one of the following types: + * float16, float32. \n + * @li x2: A tensor with shpae: BxRxM. Must be one of the following types: + * float16, float32. \n + + *@par Attributes: + * @li p: An optional float >= 0 or inf. Defaults to 2.0. \n + + *@par Outputs: + * y: A Tensor with the same type of x1's and with shape BxPxR. \n + + *@par Third-party framework compatibility + *Compatible with the Pytorch operator Cdist. \n + */ +REG_OP(Cdist) + .INPUT(x1, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(x2, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(p, Float, 2.0) + .OP_END_FACTORY_REG(Cdist) + +/** +*@brief Computes the grad of x1 in cdist. \n + +*@par Inputs: +*Four inputs, including: + * @li grad: Grad with shape BxPxR. Must be one of the following types: +* float16, float32. \n +* @li x1: A tensor with shpae: BxPXM. Must be one of the following types: +* float16, float32. \n +* @li x2: A tensor with shpae: BxRxM. Must be one of the following types: +* float16, float32. \n +* @li cdist: Output tensor of cdist forward with shpae: BxPXR. +* Must be one of the following types: float16, float32. \n + +*@par Attributes: +* @li p: An optional float >= 0 or inf. Defaults to 2.0. \n + +*@par Outputs: +* y: A Tensor with the same type and shape of x1's. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator Cdist Backward. \n +*/ +REG_OP(CdistGrad) + .INPUT(grad, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(x1, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(x2, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(cdist, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT})) + .ATTR(p, Float, 2.0) + .OP_END_FACTORY_REG(CdistGrad) + } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_MATH_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/matrix_calculation_ops.h b/third_party/fwkacllib/inc/ops/matrix_calculation_ops.h index ed23d3f6..b317be37 100644 --- a/third_party/fwkacllib/inc/ops/matrix_calculation_ops.h +++ b/third_party/fwkacllib/inc/ops/matrix_calculation_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -38,8 +38,8 @@ namespace ge { * float32, int32. Has format [ND, NHWC] . \n *@par Attributes: -*@li transpose_a: A bool. If True, changes the shape of "x1" from [M, K] to [K, M]. -*@li transpose_b: A bool. If True, changes the shape of "x2" from [M, K] to [K, M] . \n +*@li transpose_x1: A bool. If True, changes the shape of "x1" from [M, K] to [K, M]. +*@li transpose_x2: A bool. If True, changes the shape of "x2" from [M, K] to [K, M] . \n *@par Outputs: *y: The result matrix Tensor. 2D. Must be one of the following types: float16, @@ -70,8 +70,8 @@ REG_OP(MatMul) * float32, int32. Has format [ND, NHWC] . \n *@par Attributes: -*@li transpose_a: A bool. If True, changes the shape of "x1" from [M, K] to [K, M]. -*@li transpose_b: A bool. If True, changes the shape of "x2" from [M, K] to [K, M] . \n +*@li transpose_x1: A bool. If True, changes the shape of "x1" from [M, K] to [K, M]. +*@li transpose_x2: A bool. If True, changes the shape of "x2" from [M, K] to [K, M] . \n *@par Outputs: *y: The result matrix Tensor. 2D. Must be one of the following types: float16, @@ -91,6 +91,36 @@ REG_OP(MatMulV2) .ATTR(offset_x, Int, 0) .OP_END_FACTORY_REG(MatMulV2) +/** +*@brief Multiplies matrix "a" by matrix "b", producing "a * b" . \n + +*@par Inputs: +*Two inputs, including: +* @li x1: A matrix Tensor. 2D. Must be one of the following types: int8. +* @li x2: A matrix Tensor. 2D. Must be one of the following types: int8. +* @li compress_index: A compress index matrix of type int8. +* @li bias: A 1D Tensor. Must be one of the following types: int32, float16. + +*@par Attributes: +*@li transpose_x1: A bool. If True, changes the shape of "x1" from [M, K] to [K, M]. +*@li transpose_x2: A bool. If True, changes the shape of "x2" from [M, K] to [K, M] . \n + +*@par Outputs: +*y: The result matrix Tensor. 2D. Must be one of the following types: float16, +* int32. \n + +*/ +REG_OP(MatMulV2Compress) + .INPUT(x1, TensorType({DT_INT8})) + .INPUT(x2, TensorType({DT_INT8})) + .INPUT(compress_index, TensorType({DT_INT8})) + .OPTIONAL_INPUT(bias, TensorType({DT_INT32, DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_INT32, DT_FLOAT16})) + .OPTIONAL_INPUT(offset_w, TensorType({DT_INT8})) + .ATTR(transpose_x1, Bool, false) + .ATTR(transpose_x2, Bool, false) + .ATTR(offset_x, Int, 0) + .OP_END_FACTORY_REG(MatMulV2Compress) /** *@brief Performs Matrix-to-matrix Multiply, producing c=alpha[0]*a*b+beta[0]*c . \n @@ -149,15 +179,15 @@ REG_OP(GEMM) *@brief Multiplies matrix "a" by matrix "b", producing "a * b" . \n *@par Inputs: -*Three inputs, including: +*Two inputs, including: * @li x1: A matrix Tensor. Must be one of the following types: float16, * float32, int32. 2D or higher. Has format [ND, NHWC, FRACTAL_NZ]. * @li x2: A matrix Tensor. Must be one of the following types: float16, * float32, int32. 2D or higher. Has format [ND, NHWC, FRACTAL_NZ] . \n *@par Attributes: -*@li adj_x: A bool. If True, changes the shape of "x1" from [B, M, K] to [B, K, M]. -*@li adj_y: A bool. If True, changes the shape of "x2" from [B, M, K] to [B, K, M] . \n +*@li adj_x1: A bool. If True, changes the shape of "x1" from [B, M, K] to [B, K, M]. +*@li adj_x2: A bool. If True, changes the shape of "x2" from [B, M, K] to [B, K, M] . \n *@par Outputs: *y: The result matrix Tensor. 2D or higher. Must be one of the following types: float16, @@ -175,6 +205,42 @@ REG_OP(BatchMatMul) .ATTR(adj_x2, Bool, false) .OP_END_FACTORY_REG(BatchMatMul) + +/** +* @brief Multiplies matrix "a" by matrix "b", producing "a * b" . \n + +* @par Inputs: +* Three inputs, including: +* @li x1: A matrix Tensor. Must be one of the following types: float16, +* float32, int32. 2D or higher. Has format [ND, NHWC, FRACTAL_NZ]. +* @li x2: A matrix Tensor. Must be one of the following types: float16, +* float32, int32. 2D or higher. Has format [ND, NHWC, FRACTAL_NZ] . \n +* @li bias: A matrix Tensor. Must be one of the following types: float16, +* float32, int32. 2D or higher. Has format [ND, NHWC, FRACTAL_NZ] . \n + +* @par Attributes: +* @li adj_x1: A bool. If True, changes the shape of "x1" from [B, M, K] to [B, K, M]. +* @li adj_x2: A bool. If True, changes the shape of "x2" from [B, M, K] to [B, K, M] . \n + +* @par Outputs: +* y: The result matrix Tensor. 2D or higher. Must be one of the following types: float16, +* float32, int32. 2D or higher. Has format [ND, NHWC, FRACTAL_NZ]. Has the same shape length as "x1" and "x2" . \n + +* @par Third-party framework compatibility +* Compatible with the TensorFlow operator BatchMatmul. +*/ + +REG_OP(BatchMatMulV2) + .INPUT(x1, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8})) + .INPUT(x2, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8})) + .OPTIONAL_INPUT(bias, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32})) + .OPTIONAL_INPUT(offset_w, TensorType({DT_INT8})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32})) + .ATTR(adj_x1, Bool, false) + .ATTR(adj_x2, Bool, false) + .ATTR(offset_x, Int, 0) + .OP_END_FACTORY_REG(BatchMatMulV2) + /** *@brief Computes half the L2 norm of a tensor without the sqrt . \n @@ -334,7 +400,7 @@ REG_OP(MatrixSetDiagD) * int64, complex64, qint8, quint8, qint32, uint16, complex128, half, uint32, * uint64 *@li indices: An ND Tensor. -*Must be one of the following types: int32, int64 +*Must be one of the following types: int32 or int64 *@li updates: An ND Tensor. *Must be one of the following types: float16, float32, int8, uint8, double, * int64, complex64, qint8, quint8, qint32, uint16, complex128, half, uint32, @@ -378,6 +444,9 @@ REG_OP(ScatterNdUpdate) *@par Third-party framework compatibility * Compatible with the TensorFlow operator TensorScatterUpdate. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(TensorScatterUpdate) .INPUT(x, TensorType::BasicType()) @@ -387,6 +456,34 @@ REG_OP(TensorScatterUpdate) .OP_END_FACTORY_REG(TensorScatterUpdate) /** +*@brief Uses "updates" to update tensor "data" by "indices". \n + +*@par Inputs: +* Three inputs, including: +*@li data: An ND Tensor . \n +*Must be one of the following types: float16, float32, int32, int8, uint8 +*@li indices: An ND Tensor of type int32 or int64 +*@li updates: An Tensor. Same shape as indices. format:NCHW, NHWC . \n +*Must be one of the following types: float16, float32, int32, int8, uint8 + +*@par Attributes: +*@li axis: An optional attribute. Defaults to 0. + +*@par Outputs: +*y: A Tensor. Has the same type and format as input "data" . \n + +*@par Third-party framework compatibility +* Compatible with the ONNX operator ScatterElements. +*/ +REG_OP(ScatterElements) + .INPUT(data, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(indices, TensorType::IndexNumberType()) + .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .ATTR(axis, Int, 0) + .OP_END_FACTORY_REG(ScatterElements) + +/** *@brief Adds sparse "updates" to a variable reference . \n *@par Inputs: @@ -394,7 +491,7 @@ REG_OP(TensorScatterUpdate) *@li var: An ND Tensor . \n *Must be one of the following types: float16, float32, int32, int8, uint8 -*@li indices: An ND Tensor of type int32 or int64. +*@li indices: An ND Tensor of type int32 or int64 *@li updates: An Tensor. format:NCHW, NHWC . \n @@ -412,10 +509,10 @@ REG_OP(TensorScatterUpdate) * Compatible with the TensorFlow operator ScatterAdd. */ REG_OP(ScatterAdd) - .INPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .INPUT(indices, TensorType::IndexNumberType()) - .INPUT(updates, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) - .OUTPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .ATTR(use_locking, Bool, false) .OP_END_FACTORY_REG(ScatterAdd) @@ -428,7 +525,7 @@ REG_OP(ScatterAdd) *Must be one of the following types: float16, float, int32, int8, uint8 *@li indices: An ND Tensor. -*Must be one of the following types: int32 +*Must be one of the following types: int32 or int64 *@li updates: An ND Tensor. *Must be one of the following types: float16, float, int32, int8, uint8 @@ -443,10 +540,10 @@ REG_OP(ScatterAdd) * Compatible with the TensorFlow operator ScatterDiv. */ REG_OP(ScatterDiv) - .INPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) - .INPUT(indices, TensorType({DT_INT32})) - .INPUT(updates, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) - .OUTPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(indices, TensorType::IndexNumberType()) + .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .ATTR(use_locking, Bool, false) .OP_END_FACTORY_REG(ScatterDiv) @@ -458,7 +555,7 @@ REG_OP(ScatterDiv) *@li var: An ND Tensor. *Must be one of the following types: float16, float, int32, int8, uint8 *@li indices: An ND Tensor. -*Must be one of the following types: int32 +*Must be one of the following types: int32 or int64 *@li updates: An ND Tensor. *Must be one of the following types: float16, float, int32, int8, uint8 *@par Attributes: @@ -472,10 +569,10 @@ REG_OP(ScatterDiv) * Compatible with the TensorFlow operator ScatterNdAdd. */ REG_OP(ScatterNdAdd) - .INPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .INPUT(indices, TensorType::IndexNumberType()) - .INPUT(updates, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) - .OUTPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .ATTR(use_locking, Bool, false) .OP_END_FACTORY_REG(ScatterNdAdd) @@ -499,6 +596,9 @@ REG_OP(ScatterNdAdd) *@par Third-party framework compatibility * Compatible with the TensorFlow operator TensorScatterAdd. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(TensorScatterAdd) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) @@ -515,7 +615,7 @@ REG_OP(TensorScatterAdd) *@li var: An ND Tensor. *Must be one of the following types: float16, float, int32, int8, uint8 *@li indices: An ND Tensor. -*Must be one of the following types: int32, int64 +*Must be one of the following types: int32 or int64 *@li updates: An ND Tensor. *Must be one of the following types: float16, float, int32, int8, uint8 @@ -530,10 +630,10 @@ REG_OP(TensorScatterAdd) * Compatible with the TensorFlow operator ScatterNdSub. */ REG_OP(ScatterNdSub) - .INPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .INPUT(indices, TensorType::IndexNumberType()) - .INPUT(updates, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) - .OUTPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .ATTR(use_locking, Bool, false) .OP_END_FACTORY_REG(ScatterNdSub) @@ -557,6 +657,9 @@ REG_OP(ScatterNdSub) *@par Third-party framework compatibility * Compatible with the TensorFlow operator TensorScatterSub. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(TensorScatterSub) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) @@ -573,7 +676,7 @@ REG_OP(TensorScatterSub) *@li var: An ND Tensor. *Must be one of the following types: float16, float, int32, int8, uint8 *@li indices: An ND Tensor. -*Must be one of the following types: int32, int64 +*Must be one of the following types: int32 or int64 *@li updates: An ND Tensor. *Must be one of the following types: float16, float, int32, int8, uint8 *@par Attributes: @@ -587,10 +690,10 @@ REG_OP(TensorScatterSub) * Compatible with the TensorFlow operator ScatterSub. */ REG_OP(ScatterSub) - .INPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .INPUT(indices, TensorType::IndexNumberType()) - .INPUT(updates, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) - .OUTPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .ATTR(use_locking, Bool, false) .OP_END_FACTORY_REG(ScatterSub) @@ -761,7 +864,7 @@ REG_OP(ConfusionMatrix) *@li var: An ND Tensor. *Must be one of the following types: float16, float, int32, int8, uint8 *@li indices: An ND Tensor. -*Must be one of the following types: int32 +*Must be one of the following types: int32 or int64 *@li updates: An ND Tensor . \n *Must be one of the following types: float16, float, int32, int8, uint8 @@ -778,7 +881,7 @@ REG_OP(ConfusionMatrix) */ REG_OP(ScatterMul) .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) - .INPUT(indices, TensorType({DT_INT32})) + .INPUT(indices, TensorType::IndexNumberType()) .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .ATTR(use_locking, Bool, false) @@ -791,13 +894,13 @@ REG_OP(ScatterMul) *@par Inputs: * Three inputs, including: *@li var: An ND Tensor. -*Must be one of the following types: float16, float, int32 +*Must be one of the following types: float16, float, int32, int8, uint8 *@li indices: An ND Tensor. -*Must be one of the following types: int32 +*Must be one of the following types: int32 or int64 *@li updates: An ND Tensor. -*Must be one of the following types: float16, float, int32 +*Must be one of the following types: float16, float, int32, int8, uint8 *@par Attributes: *use_locking: An optional bool. Defaults to "False". If "True", the operation @@ -810,10 +913,10 @@ REG_OP(ScatterMul) * Compatible with the TensorFlow operator ScatterMin. */ REG_OP(ScatterMin) - .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32})) - .INPUT(indices, TensorType({DT_INT32})) - .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32})) - .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32})) + .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(indices, TensorType::IndexNumberType()) + .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .ATTR(use_locking, Bool, false) .OP_END_FACTORY_REG(ScatterMin) @@ -824,13 +927,13 @@ REG_OP(ScatterMin) * Three inputs, including: *@li var: An ND Tensor . \n -*Must be one of the following types: float16, float, int32 +*Must be one of the following types: float16, float, int32, int8, uint8 *@li indices: An NCHW, NHWC, or ND Tensor . \n -*Must be one of the following types: int32 +*Must be one of the following types: int32 or int64 *@li updates: An NCHW, NHWC, or ND Tensor . \n -*Must be one of the following types: float16, float, int32 +*Must be one of the following types: float16, float, int32, int8, uint8 *@par Attributes: *use_locking: An optional bool. Defaults to "False". @@ -843,10 +946,10 @@ REG_OP(ScatterMin) * Compatible with the TensorFlow operator ScatterMax. */ REG_OP(ScatterMax) - .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32})) - .INPUT(indices, TensorType({DT_INT32})) - .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32})) - .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32})) + .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(indices, TensorType::IndexNumberType()) + .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .ATTR(use_locking, Bool, false) .OP_END_FACTORY_REG(ScatterMax) @@ -860,7 +963,7 @@ REG_OP(ScatterMax) *Must be one of the following types: float16, float, int32, int8, uint8 *@li indices: An ND Tensor . \n -*Must be one of the following types: int32 +*Must be one of the following types: int32 or int64 *@li updates: An ND Tensor . \n *Must be one of the following types: float16, float, int32, int8, uint8 @@ -876,10 +979,10 @@ REG_OP(ScatterMax) * Compatible with the TensorFlow operator ScatterUpdate. */ REG_OP(ScatterUpdate) - .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT8,DT_UINT8})) - .INPUT(indices, TensorType({DT_INT32})) - .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT8,DT_UINT8})) - .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT8,DT_UINT8})) + .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(indices, TensorType::IndexNumberType()) + .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .ATTR(use_locking, Bool, false) .OP_END_FACTORY_REG(ScatterUpdate) @@ -979,6 +1082,137 @@ REG_OP(MatrixDiagV2) .OUTPUT(output, TensorType::BasicType()) .OP_END_FACTORY_REG(MatrixDiagV2) +/** +* @brief Add updates to var_out according to axis and indices. + +* @par Inputs: +* Three inputs, including: +* @li var: A Tensor. Must be one of the following types: +* float16, float32, int32, int8, uint8. +* @li indices: A Tensor of the indices, type should be int32. +* @li updates: A Tensor of the same type as "var". + +* @par Attributes: +* @li axis: An required int to specify the axis to perform indices add. + +* @par Outputs: +* @li var_out: A Tensor. Same as input "var". + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator index_add. + +* @par Restrictions: +* Warning:THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(IndexAdd) + .INPUT(var, TensorType({DT_INT32, DT_INT8, DT_UINT8, DT_FLOAT32, DT_FLOAT16})) + .INPUT(indices, TensorType({DT_INT32})) + .INPUT(updates, TensorType({DT_INT32, DT_INT8, DT_UINT8, DT_FLOAT32, DT_FLOAT16})) + .OUTPUT(var_out, TensorType({DT_INT32, DT_INT8, DT_UINT8, DT_FLOAT32, DT_FLOAT16})) + .ATTR(axis, Int, 0) + .OP_END_FACTORY_REG(IndexAdd) + +/** +*@brief: Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices input \n + +*@par Inputs: +* Two inputs, including: +*@li x: A Tensor. Must be one of the following types: +* float16, float32, double, int32, uint8, int16, int8, complex64, int64, +* qint8, quint8, qint32, uint16, complex128, uint32, uint64. +*@li diagonal:(int, optional) – the diagonal to consider。\n + +*@par Outputs: +*y: A Tensor. Has the same type as "x" . \n + +*@par Third-party framework compatibility +* Compatible with the Pytorch operator Triu. +*/ +REG_OP(Triu) + .INPUT(x, TensorType::BasicType()) + .ATTR(diagonal, Int, 0) + .OUTPUT(y, TensorType::BasicType()) + .OP_END_FACTORY_REG(Triu) + +/** +*@brief: Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices input \n + +*@par Inputs: +* Two inputs, including: +*@li x: A Tensor. Must be one of the following types: +* float16, float32, double, int32, uint8, int16, int8, complex64, int64, +* qint8, quint8, qint32, uint16, complex128, uint32, uint64. +*@li diagonal:(int, optional) – the diagonal to consider。\n + +*@par Outputs: +*y: A Tensor. Has the same type as "x" . \n + +*@par Third-party framework compatibility +* Compatible with the Pytorch operator Tril. +*/ +REG_OP(Tril) + .INPUT(x, TensorType::BasicType()) + .ATTR(diagonal, Int, 0) + .OUTPUT(y, TensorType::BasicType()) + .OP_END_FACTORY_REG(Tril) +/** +*@brief Concatenates a list of N tensors along the first dimension. +*@par Inputs: +* Two inputs, including: +* @li values: A list of Tensors. Must be one of the following types: int32, float16, float32. +* Tensors to be concatenated. All must have size 1 in the first dimension and same shape. +* It's a dynamic input. +* @li shape: A Tensor of the same type as "x". +* The final shape of the result. Should be equal to the shapes of any input +* but with the number of input values in the first dimension . \n + +*@par Attributes: +*equation: The subscripts for the Einstein summation. \n +*N: tensor size of input \n + +*@par Outputs: +*@li y: Sums the product of the elements of the input operands along dimensions specified + using a notation based on the Einstein summation convention. \n + +*@attention Constraints: +*Input N must be Int. \n + +*@par Third-party framework compatibility +*Compatible with Pytorch einsum operator. +*/ +REG_OP(Einsum) + .DYNAMIC_INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .REQUIRED_ATTR(equation, String) + .REQUIRED_ATTR(N, Int) + .OP_END_FACTORY_REG(Einsum) + +/** +*@brief Returns a 2-D tensor with ones on the diagonal and zeros elsewhere. \n + +*@par Inputs: +*No inputs + +*@par Attributes: +*@li num_rows: An required int. \n +*@li num_columns: An optional int.Defaults to 0. \n +*@li batch_shape: An optional ListInt.Defaults to []. \n +*@li dtype: An optional int.Defaults to 0. \n + +*@par Outputs: +*y: A Tensor with targeted type and shape. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator Eye. \n +*/ +REG_OP(Eye) + .OUTPUT(y, TensorType::BasicType()) /* "Result, has targeted element type" */ + .REQUIRED_ATTR(num_rows, Int) + .ATTR(num_columns, Int, 0) + .ATTR(batch_shape, ListInt, {}) + .ATTR(dtype, Int, 0) + .OP_END_FACTORY_REG(Eye) + } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_MATRIX_CALCULATION_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/nn_batch_norm_ops.h b/third_party/fwkacllib/inc/ops/nn_batch_norm_ops.h index 0c6a5dff..9629976e 100644 --- a/third_party/fwkacllib/inc/ops/nn_batch_norm_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_batch_norm_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -145,6 +145,64 @@ REG_OP(BatchNorm) *@brief Performs batch normalization . \n *@par Inputs: +* Five inputs, including: (NHWC, NCHW, or NC1HWC0 supported) +*@li x: A 3D or 6D Tensor of type float16 or float32, with format NDHWC or NCDHW for 4D or NDC1HWC0 for 6D. +*@li scale: A Tensor of type float32. Must be 1D if input "x" is with format NDHWC or NCDHW. Must be 6D +if input "x" is with format NDC1HWC0. Specifies the scaling factor. +*@li offset: A Tensor of type float32. Must be 3D if input "x" is with format NDHWC or NCDHW. Must be 6D +if input "x" is with format NC1HWC0. Specifies the offset. +*@li mean: A Tensor of type float32. Must be 3D if input "x" is with format NDHWC or NCDHW. Must be 6D +if input "x" is with format NC1HWC0. Specifies the mean used for inference. Must be "None" if the +operation is used for training. +*@li variance: A Tensor of type float32. Must be 3D if input "x" is with format NHWC or NCHW. Must be +5D if input "x" is with format NC1HWC0. Specifies the variance used for inference. Must be "None" +if the operation is used for training . \n + +*@par Attributes: +*@li epsilon: An optional float32, specifying the small value added to variance to avoid dividing by zero. Defaults to "0.0001". +*@li data_format: An optional string, specifying the format of "x". Defaults to "NHWC". +*@li is_training: An optional bool, specifying if the operation is used for training or inference. Defaults to "True" . \n + +*@par Outputs: +* Five outputs, including: (NHWC, NCHW, or NC1HWC0 supported) +*@li y: A 3D or 6D Tensor of type float16 or float32 for the normalized "x", with format NDHWC or NCDHW for 4D or NDC1HWC0 for 6D. +*@li batch_mean: A Tensor of type float32. Must be 3D if input "x" is with format NDHWC or NCDHW. Must be 6D +if input "x" is with format NDC1HWC0. Specifies the mean of "x". +*@li batch_variance: A Tensor of type float32. Must be 1D if input "x" is with format NDHWC or NCDHW. +Must be 6D if input "x" is with format NDC1HWC0. Specifies the variance of "x". +*@li reserve_space_1: An optional Tensor of type float32. Must be 1D if input "x" is with format NDHWC or NCDHW. +Must be 6D if input "x" is with format NDC1HWC0. Specifies the mean of "x" for gradient computation. Pass "None" to skip this output. +*@li reserve_space_2: An optional Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. +Must be 6D if input "x" is with format NDC1HWC0. Specifies the variance of "x" for gradient computation. Pass "None" to skip this output . \n + +*@attention Constraints: +*@li If the operation is used for inference and outputs "reserve_space_1" and "reserve_space_2" are available, +then "reserve_space_1" has the same value as "mean" and "reserve_space_2" has the same value as "variance". +*@li For Ascend 310, the result accuracy fails to reach 1‰ due to the square root instruction . \n + +*@par Third-party framework compatibility +*@li Compatible with the TensorFlow operator fused_batch_norm. +*@li Compatible with the TensorFlow operator fused_batch_norm_v2. +*/ +REG_OP(BatchNorm3D) + .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(scale, TensorType({DT_FLOAT})) + .INPUT(offset, TensorType({DT_FLOAT})) + .OPTIONAL_INPUT(mean, TensorType({DT_FLOAT})) + .OPTIONAL_INPUT(variance, TensorType({DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(batch_mean, TensorType({DT_FLOAT})) + .OUTPUT(batch_variance, TensorType({DT_FLOAT})) + .OUTPUT(reserve_space_1, TensorType({DT_FLOAT})) + .OUTPUT(reserve_space_2, TensorType({DT_FLOAT})) + .ATTR(epsilon, Float, 0.0001) + .ATTR(data_format, String, "NCDHW") + .ATTR(is_training, Bool, true) + .OP_END_FACTORY_REG(BatchNorm3D) +/** +*@brief Performs batch normalization . \n + +*@par Inputs: * Five inputs, including: (NHWC or NCHW supported) *@li x: A 4D Tensor of type float16 or float32. *@li scale: A 1D Tensor of type float32, for the scaling factor. @@ -244,6 +302,52 @@ REG_OP(BatchNormGrad) *@par Inputs: * Five inputs, including: +*@li y_backprop: A 3D or 6D Tensor of type float16 or float32, with format NDHWC, NCDHW, or NDC1HWC0, for the gradient. +*@li x: A 3D or 6D Tensor of type float16 or float32, with format NDHWC, NCDHW, or NDC1HWC0. +*@li scale: A 3D or 6D Tensor of type float32, with format NDHWC, NCDHW, or NDC1HWC0. +*@li reserve_space_1: A 3D or 6D Tensor of type float32, with format NDHWC, NCDHW, or NC1HWC0. It is an output of BatchNorm. +*@li reserve_space_2: A 3D or 6D Tensor of type float32, with format NDHWC, NCDHW, or NC1HWC0. It is an output of BatchNorm . \n + +*@par Attributes: +*@li epsilon: An optional float32. Defaults to "0.0001". A small float number added to the variance of "x". +*@li data_format: An optional string. Defaults to "NCDHW". +*@li is_training: An optional bool. Defaults to "true". Specifies the operation is for training (default) or inference . \n + +*@par Outputs: +*@li x_backprop: A Tensor of type float16 or float32, with format NHWC, NCHW, or NC1HWC0, for the offset of "x". +*@li scale_backprop: A Tensor of type float32, with format NDHWC, NCDHW, or NDC1HWC0, for the offset of "scale". +*@li *offset_backprop: A Tensor of type float32, with format NDHWC, NCDHW, or NDC1HWC0, for the offset of "offset". +*@li *reserve_space_4: A Tensor of type float32, with shape NDHWC, NCDHW, or NDC1HWC0. Pass "None" to skip this output. +*@li *reserve_space_5: A Tensor of type float32, with shape NDHWC, NCDHW, or NDC1HWC0. Pass "None" to skip this output . \n + +*@attention Constraints: +* The preceding layer of this operator must be operator BatchNorm . \n + +*@see BatchNorm +*@par Third-party framework compatibility +* Compatible with the TensorFlow operators FusedBatchNormGradV2 and FusedBatchNorm3DGrad. +*/ +REG_OP(BatchNorm3DGrad) + .INPUT(y_backprop, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(scale, TensorType({DT_FLOAT})) + .INPUT(reserve_space_1, TensorType({DT_FLOAT})) + .INPUT(reserve_space_2, TensorType({DT_FLOAT})) + .OUTPUT(x_backprop, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(scale_backprop, TensorType({DT_FLOAT})) + .OUTPUT(offset_backprop, TensorType({DT_FLOAT})) + .OUTPUT(reserve_space_4, TensorType({DT_FLOAT})) + .OUTPUT(reserve_space_5, TensorType({DT_FLOAT})) + .ATTR(epsilon, Float, 0.0001) + .ATTR(data_format, String, "NCDHW") + .ATTR(is_training, Bool, true) + .OP_END_FACTORY_REG(BatchNorm3DGrad) + +/** +*@brief Performs the backpropagation of BatchNorm . \n + +*@par Inputs: +* Five inputs, including: *@li y_backprop: A 4D Tensor of type float16 or float32, with format NHWC or NCHW, for the gradient. *@li x: A 4D Tensor of type float16 or float32, with format NHWC or NCHW. *@li scale: A 4D Tensor of type float32, with format NHWC or NCHW. @@ -315,35 +419,7 @@ REG_OP(BNInference) .ATTR(use_global_stats, Bool,true) .ATTR(mode, Int,1) .OP_END_FACTORY_REG(BNInference) -/** -*@brief aicpu batch normalization host . \n -*@par Inputs: - -*@li mean: A Tensor of type float32 or float16. Must be 1D if input "x" Specifies the mean used for inference. -*@li variance: A Tensor of type float32 or float16 . Must be 1D if input "x" Specifies the variance used for inference. -*@li momentum: An optional float, mean and variance's Scale factor -*@par Attributes: -*@li epsilon: An optional float32, specifying the small value added to variance to avoid dividing by zero. Defaults to "0.00001". -*@li use_global_stats: mean inference mode , only can be "True". -*@li mode: An optional attr, not use -*@par Outputs: -*@li alpha: A Tensor of type float16 or float32 for the cpu calculate mean -*@li beta: A Tensor of type float16 or float32 for the cpu calculate variance -*/ -REG_OP(BnHost) - .INPUT(mean, TensorType({DT_FLOAT, DT_FLOAT16})) - .INPUT(variance, TensorType({DT_FLOAT, DT_FLOAT16})) - .INPUT(momentum, TensorType({DT_FLOAT16,DT_FLOAT})) - .OPTIONAL_INPUT(scale, TensorType({DT_FLOAT16,DT_FLOAT})) - .OPTIONAL_INPUT(offset, TensorType({DT_FLOAT16,DT_FLOAT})) - .ATTR(epsilon, Float, 0.00001) - .ATTR(mode, Int, 1) - .ATTR(use_global_stats, Bool, true) - .OUTPUT(alpha, TensorType({DT_FLOAT, DT_FLOAT16})) - .OUTPUT(beta, TensorType({DT_FLOAT, DT_FLOAT16})) - .OUTPUT(mu, TensorType({DT_FLOAT16,DT_FLOAT})) - .OP_END_FACTORY_REG(BnHost) /** *@brief Performs batch normalization . \n diff --git a/third_party/fwkacllib/inc/ops/nn_calculation_ops.h b/third_party/fwkacllib/inc/ops/nn_calculation_ops.h index 35296870..98473c65 100644 --- a/third_party/fwkacllib/inc/ops/nn_calculation_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_calculation_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -365,6 +365,25 @@ REG_OP(BiasAddGrad) * 4-D with shape [batch, out_height, out_width, out_channels] * or [batch, out_channels, out_height, out_width]. * Gradients with respect to the output of the convolution. + *\n + *\n + * The following are the supported data types and data formats: +*@verbatim + | Tensor | out_bckprop | filter | y + ------------|-------------|---------|-------- + | Data Type | float16 | float16 | float16 + | |-------------|---------|-------- + | | float32 | float32 | float32 + | |-------------|---------|-------- + | | float64 | float64 | float64 + ------------|-------------|---------|-------- + | Format | NCHW | NCHW | NCHW + | | NHWC | HWCN | NHWC +@endverbatim + * For float32 and float64 type, the actual calculation on the chip is based on + * float16. + *\n + * *@par Attributes: * Five attributes: * @li strides: A tuple/list of 4 integers. The stride of the sliding window @@ -377,8 +396,53 @@ REG_OP(BiasAddGrad) * channels. * @li data_format: An optional string from: "NHWC", "NCHW". Defaults to * "NHWC". Specify the data format of the input and output data. + *\n + *\n + * The following value range restrictions must be met: +*@verbatim + | Name | Field | Scope + -------------------|----------|-------------- + | input_size | H | [1, 4096] + | | W | [1, 4096] + -------------------|----------|-------------- + | Filter | H | [1, 255] + | | W | [1, 255] + -------------------|----------|-------------- + | out_backprop | H*strideH| [1, 4096] + | | W*strideW| [1, 4096] + -------------------|----------|-------------- + | y(fmap) | H | [1, 4096] + | | W | [1, 4096] + -------------------|----------|-------------- + | Stride | H | [1, 63] + | | W | [1, 63] + -------------------|----------|-------------- + | Padding | Top | [0, 255] + | | Bottom | [0, 255] + | | Left | [0, 255] + | | Right | [0, 255] + -------------------|----------|-------------- + | Dilation | H | [1, 255] + | | W | [1, 255] + +@endverbatim + * In Ascend910, fmap or out_backprop's H and W not support 1 when + * fmap_h + pad_top + pad_bottom != (filter_height - 1) * dilation_h + 1 + * If filter_h = 1 and filter_w = 1, out_backprop_w * stride_h * stride_w < 4096 + *\n + * *@par Outputs: * y: A Tensor. Has the same type as filter,and has same format as input_size. + *\n + * out_backprop_height = (fmap_height + pad_top + pad_bottom - + * (dilation_h * (filter_height - 1) + 1)) + * / stride_h + 1 + *\n + * out_backprop_width = (fmap_width + pad_left + pad_right - + * (dilation_w * (filter_width - 1) + 1)) + * / stride_w + 1 + *\n + * *@par Third-party framework compatibility * Compatible with Tensorflow's conv2d_backprop_input */ @@ -454,6 +518,21 @@ REG_OP(Conv2DBackpropInputD) * @li bias: An optional tensor. Must have the same type as "y". * @li offset_w: An optional 1D tensor for quantized deconvolution. * Type is int8. Reserved.\n + *\n + *\n + * The following are the supported data types and data formats: +*@verbatim + | Tensor | x | filter | bias | y + ------------|---------|---------|---------|-------- + | Data Type | float16 | float16 | float16 | float16 + | |---------|---------|---------|-------- + | | int8 | int8 | int32 | int32 + ------------|---------|---------|---------|-------- + | Format | NCHW | NCHW | ND | NCHW +@endverbatim + * For int8, a dequant or requant operator must be followed. + *\n + * *@par Attributes: * Six attributes: * @li strides: A tuple or list of 2 integers. The stride of the sliding window @@ -467,9 +546,54 @@ REG_OP(Conv2DBackpropInputD) * @li data_format: An optional string from: "NCHW". Defaults to "NCHW". \n Specify the data format of the input and output data. * @li offset_x: An optional integer for quantized deconvolution. - * Defaults to "0". + * The negative offset added to the input image for int8 type. Ensure offset_x + * within the effective range of int8 [-128, 127]. Defaults to "0". + *\n + *\n + * The following value range restrictions must be met: +*@verbatim + | Name | Field | Scope + -------------------|----------|-------------- + | x (out_backprop) | H*strideH| [1, 4096] + | | W*strideW| [1, 4096] + -------------------|----------|-------------- + | Filter | H | [1, 255] + | | W | [1, 255] + -------------------|----------|-------------- + | y (fmap) | H | [1, 4096] + | | W | [1, 4096] + -------------------|----------|-------------- + | Stride | H | [1, 63] + | | W | [1, 63] + -------------------|----------|-------------- + | Padding | Top | [0, 255] + | | Bottom | [0, 255] + | | Left | [0, 255] + | | Right | [0, 255] + -------------------|----------|-------------- + | Dilation | H | [1, 255] + | | W | [1, 255] + -------------------|----------|-------------- + | Offset_x | | [-128, 127] + +@endverbatim + * In Ascend910, fmap or out_backprop's H and W not support 1 when + * fmap_h + pad_top + pad_bottom != (filter_height - 1) * dilation_h + 1 + * If filter_h = 1 and filter_w = 1, out_backprop_w * stride_h * stride_w < 4096 + *\n + * *@par Outputs: * y: A Tensor. 4D tensor with shape [batch, channels, height, width]. + *\n + * out_backprop_height = (fmap_height + pad_top + pad_bottom - + * (dilation_h * (filter_height - 1) + 1)) + * / stride_h + 1 + *\n + * out_backprop_width = (fmap_width + pad_left + pad_right - + * (dilation_w * (filter_width - 1) + 1)) + * / stride_w + 1 + *\n + * * When type of x is float16, the type of y must be float16. * When type of x is int8, the type of y must be int32. */ @@ -502,6 +626,25 @@ REG_OP(Deconvolution) * [batch, out_height, out_width, out_channels] or [batch, out_channels, * out_height, out_width]. Gradients with respect to the output of the * convolution. + *\n + *\n + * The following are the supported data types and data formats: +*@verbatim + | Tensor | x | out_backprop | y + ------------|---------|--------------|--------- + | Data Type | float16 | float16 | float16 + | |---------|--------------|--------- + | | float32 | float32 | float32 + | |---------|--------------|--------- + | | float64 | float64 | float64 + |-----------|---------|--------------|--------- + | Format | NCHW | NCHW | NCHW + | | NHWC | NHWC | HWCN +@endverbatim + * For float32 and float64 type of x and outbackprop, the actual calculation on the chip + * is based on float16. + *\n + * *@par Attributes: * Five attributes: * @li strides: A tuple/list of 4 integers. The stride of the sliding window @@ -514,8 +657,52 @@ REG_OP(Deconvolution) * channels. * @li data_format: An optional string from: "NHWC", "NCHW". Defaults to * "NHWC". Specify the data format of the input and output data. + *\n +*\n +* The following value range restrictions must be met: +*@verbatim + | Name | Field | Scope + -------------------|----------|-------------- + | x(fmap) | H | [1, 4096] + | | W | [1, 4096] + -------------------|----------|-------------- + | Filter Size | H | [1, 255] + | | W | [1, 255] + -------------------|----------|-------------- + | out_backprop | H | [1, 4096] + | | W | [1, 4096] + -------------------|----------|-------------- + | y | H | [1, 4096] + | | W | [1, 4096] + -------------------|----------|-------------- + | Stride | H | [1, 63] + | | W | [1, 63] + -------------------|----------|-------------- + | Padding | Top | [0, 255] + | | Bottom | [0, 255] + | | Left | [0, 255] + | | Right | [0, 255] + -------------------|----------|-------------- + | Dilation | H | [1, 255] + | | W | [1, 255] + +@endverbatim + * In Ascend910, out_backprop's H and W not support 1 when + * fmap_h + pad_top + pad_bottom != (filter_height - 1) * dilation_h + 1 + *\n + * *@par Outputs: * y: A Tensor. Has the same type as x, has the same format as filter_size. + *\n + * out_backprop_height = (in_height + pad_top + pad_bottom - + * (dilation_h * (filter_height - 1) + 1)) + * / stride_h + 1 + *\n + * out_backprop_width = (in_width + pad_left + pad_right - + * (dilation_w * (filter_width - 1) + 1)) + * / stride_w + 1 + *\n + * *@par Third-party framework compatibility * Compatible with Tensorflow's conv2d_backprop_filter */ @@ -597,16 +784,14 @@ REG_OP(Conv2DBackpropFilterD) | Tensor | x | filter | bias | y ------------|---------|---------|---------|-------- | Data Type | float16 | float16 | float16 | float16 - | |---------|---------|---------|-------- | | float32 | float32 | float32 | float32 - | |---------|---------|---------|-------- | | int8 | int8 | int32 | int32 ------------|---------|---------|---------|-------- | Format | NCHW | NCHW | ND | NCHW | | NHWC | HWCN | | NHWC @endverbatim * For float32 type, the actual calculation on the chip is based on -* float16. For int8, a dequant or requant operator must be followed. +* float16. *\n * *@par Attributes: @@ -617,8 +802,7 @@ REG_OP(Conv2DBackpropFilterD) * (top, bottom, left, right) side of the input. *@li dilations: Optional. A list of 4 integers. The dilation factor for each * dimension of input. The dimension order is determined by the data format of -* "x". The N and C dimensions must be set to 1. The H and W dimensions must be -* set to 1 for int8 type. Defaults to [1, 1, 1, 1]. +* "x". The N and C dimensions must be set to 1. Defaults to [1, 1, 1, 1]. *@li groups: Optional. An integer of type int32. The number of blocked * connections from input channels to output channels. In_channels and * out_channels must both be divisible by "groups". Defaults to 1. @@ -652,6 +836,8 @@ REG_OP(Conv2DBackpropFilterD) | Offset_x | | [-128, 127] @endverbatim +* The W dimension of the input image supports cases exceeding 4096, but it may +* cause compilation errors. *\n * *@par Outputs: @@ -666,21 +852,6 @@ REG_OP(Conv2DBackpropFilterD) * out_width = (in_width + pad_left + pad_right - * (dilation_w * (filter_width - 1) + 1)) * / stride_w + 1 -* -*@attention Constraints: -*@li The following restrictions on the output must be met: -*@verbatim - | Output | Restrictions - ----------|-------------------------------- - | H == 1 | H * W(input) == H * W(filter) - | W == 1 | - ----------|-------------------------------- - | H != 1 | W(input) == W(filter) - | W == 1 | Only for Ascend310 Hi3796V300CS -@endverbatim -* "H * W (input)" indicates the image size after padding and "H * W (filter)" -* indicates the filter size after dilation."W(input)" and W(filter) indicate -* the same rule on the W dimension. *\n * *@par Quantization supported or not @@ -778,7 +949,7 @@ REG_OP(Conv2DCompress) * With the format "HWCN" , the data is stored in the order of: [filter_height, * filter_width, in_channels / groups, out_channels]. *@li offsets: A 4D tensor of x-y coordinates offset and mask. With the format -* "NHWC", the data is stored in the order of: [batch, in_height, in_width, +* "NHWC", the data is stored in the order of: [batch, out_height, out_width, * deformable_groups * filter_height * filter_width * 3]. *@li bias: An optional 1D tensor of additive biases to the filter outputs. * The data is stored in the order of: [out_channels]. @@ -816,31 +987,20 @@ REG_OP(Conv2DCompress) *@li deformable_groups: Optional. An integer of type int32. The number of * deformable group partitions. In_channels must be divisible by * "deformable_groups". Defaults to 1. +*@li modulated: Optional. Specify version of DeformableConv2D, true means v2, +* false means v1, currently only support v2. *\n *\n * The following value range restrictions must be met: *@verbatim | Name | Field | Scope --------------------|--------|---------------------------- - | Input Image Size | H | [1, 100000] - | | W | [1, 4096] - --------------------|--------|---------------------------- - | Filter Size | H | [1, 255] - | | W | [1, 255] + | Input Image Size | H | [1, 100000 / filter_height] + | | W | [1, 4096 / filter_width] --------------------|--------|---------------------------- - | Stride | H | [1, 63] + | Filter Size | H | [1, 63] | | W | [1, 63] - --------------------|--------|---------------------------- - | Padding | Top | [0, 255] - | | Bottom | [0, 255] - | | Left | [0, 255] - | | Right | [0, 255] - ------------ -------|--------|---------------------------- - | Dilation | H | [1, 255] - | | W | [1, 255] @endverbatim -* "W(input)" indicate the image width after padding and W(filter) indicates the -* filter width after dilation. *\n * *@par Outputs: @@ -855,21 +1015,7 @@ REG_OP(Conv2DCompress) * out_width = (in_width + pad_left + pad_right - * (dilation_w * (filter_width - 1) + 1)) * / stride_w + 1 -* -*@attention Constraints: -*@li The following restrictions on the output must be met: -*@verbatim - | Output | Restrictions - ----------|-------------------------------- - | H == 1 | H * W(input) == H * W(filter) - | W == 1 | - ----------|-------------------------------- - | H != 1 | W(input) == W(filter) - | W == 1 | Only for Ascend310 Hi3796V300CS -@endverbatim -* "H * W(input)" indicates the image size after padding and "H * W(filter)" -* indicates the filter size after dilation. "W(input)" and W(filter) indicate -* the same rule on the W dimension. +*\n * *@par Quantization supported or not *@li No @@ -891,6 +1037,7 @@ REG_OP(DeformableConv2D) .ATTR(groups, Int, 1) .ATTR(data_format, String, "NHWC") .ATTR(deformable_groups, Int, 1) + .ATTR(modulated, Bool, true) .OP_END_FACTORY_REG(DeformableConv2D) /** @@ -916,12 +1063,12 @@ REG_OP(DeformableConv2D) *@par Attributes: * @li groups: Number of blocked connections from input channels to output - * channels. Reserved. + * channels. * @li data_format: An optional string from: "NDHWC", "NCDHW". * Defaults to "NDHWC". Specify the data format of the input and output data. * @li dilations: A list of 5 integers. Specifies the dilation factor for each - * dimension of "x", now only support [1,1,1,1,1] - * The N and C dimensions must be 1. Has the same format as "x". + * dimension of "x". + * The N, C and D dimensions must be 1. Has the same format as "x". * @li offset_x: An optional int. Input offset, used for quantized inference. * Defaults to 0. Reserved . \n @@ -967,8 +1114,8 @@ REG_OP(Conv3D) *@par Required Attributes: * @li strides: A list of 5 integers. Specifies the stride of the sliding window - * for each dimension of "x". - * The N and C dimensions must be 1. Has the same format as "x". + * for each dimension of "out_backprop". + * The N and C dimensions must be 1. Has the same format as "out_backprop". * @li pads: A list of 6 integers. * Supports only padding along the D, H and W dimensions in sequence of head, * tail, top, bottom, left and right . \n @@ -976,14 +1123,15 @@ REG_OP(Conv3D) *@par Attributes: * Three attributes: * @li groups: Number of blocked connections from input channels to output - * channels. Reserved. + * channels. * @li data_format: An optional string from: "NDHWC", "NCDHW". * Defaults to "NDHWC". Specify the data format of the input and output data. * @li dilations: A tuple/list of 5 integers, The dilation factor for each - * dimension of the input, now only support [1,1,1,1,1] + * dimension of the input. + * The N, C and D dimensions must be 1. Has the same format as "out_backprop". *@par Outputs: - * y: A Tensor. Has the same type as filter,and has same format as input_size + * y: A Tensor. Has the same type as filter,and has same format as "input_size" *@par Third-party framework compatibility * Compatible with Tensorflow's conv3d_backprop_input @@ -1011,8 +1159,8 @@ REG_OP(Conv3DBackpropInput) *@par Required Attributes: * @li strides: A list of 5 integers. Specifies the stride of the sliding window - * for each dimension of "x". - * The N and C dimensions must be 1. Has the same format as "x". + * for each dimension of "out_backprop". + * The N and C dimensions must be 1. Has the same format as "out_backprop". * @li pads: A list of 6 integers. Supports only padding along the D, H and W * dimensions in sequence of head, tail, top, bottom, left and right. * @li input_size: A tuple/list of type int32, int64. An integer vector @@ -1023,13 +1171,14 @@ REG_OP(Conv3DBackpropInput) *@par Attributes: * Three attributes: * @li groups: Number of blocked connections from input channels to output - * channels. Reserved. + * channels. * @li data_format: An optional string from: "NDHWC", "NCDHW". * Defaults to "NDHWC". Specify the data format of the input and output data. * @li dilations: A tuple/list of 5 integers, The dilation factor for each - * dimension of input, now only support [1,1,1,1,1] + * dimension of input. + * The N, C and D dimensions must be 1. Has the same format as "out_backprop". *@par Outputs: - * y: A Tensor. Has the same type and data format as out_backprop. + * y: A Tensor. Has the same type and data format as "out_backprop". *@par Third-party framework compatibility * Compatible with Tensorflow's conv3d_backprop_input @@ -1072,9 +1221,7 @@ REG_OP(Conv3DBackpropInputD) * @li c_t: A optinal Tensor dtype of float16, float32. The cell state at time t . \n *@par Third-party framework compatibility: -* Compatible with the Pytorch operator adds. -*@par Restrictions: -*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +* Compatible with the Caffe operator LSTM. */ REG_OP(LSTM) .INPUT(x, TensorType({DT_FLOAT16})) @@ -1121,14 +1268,15 @@ REG_OP(LSTM) *@par Attributes: * Three attributes: * @li dilations: A tuple/list of 5 integers, The dilation factor for each - * dimension of input, now only support [1,1,1,1,1]. + * dimension of input. + * The N, C and D dimensions must be 1. Has the same format as "x". * @li groups: Number of blocked connections from input channels to output - * channels. Reserved. + * channels. * @li data_format: An optional string from: "NDHWC", "NCDHW". * Defaults to "NDHWC". Specify the data format of the input and output data. *@par Outputs: - * y: A Tensor that has the same type as x + * y: A Tensor that has the same type as "x" * and the format is NDHWC, NCDHW or DHWCN. *@par Third-party framework compatibility * Compatible with Tensorflow's conv3d_backprop_filter @@ -1172,9 +1320,10 @@ REG_OP(Conv3DBackpropFilter) *@par Attributes: * Three attributes: * @li dilations: A tuple/list of 5 integers, The dilation factor for each - * dimension of input, now only support [1,1,1,1,1]. + * dimension of input. + * The N, C and D dimensions must be 1. Has the same format as "x". * @li groups: Number of blocked connections from input channels to output - * channels. Reserved. + * channels. * @li data_format: An optional string from: "NDHWC", "NCDHW". * Defaults to "NDHWC". Specify the data format of the input and output data. @@ -1224,15 +1373,16 @@ REG_OP(Conv3DBackpropFilterD) *@par Attributes: * Five attributes: * @li groups: Number of blocked connections from input channels to output - * channels. Reserved. + * channels. * @li dilations: A tuple/list of 5 integers, - * The dilation factor for each dimension of input, now only support [1,1,1,1,1] + * The dilation factor for each dimension of input. + * The N, C and D dimensions must be 1. Has the same format as "x". * @li data_format: An optional string from: "NDHWC", "NCDHW". * Defaults to "NDHWC". Specify the data format of the input and output data. * @li output_padding: The size will be added in the output shape. * @li offset_x: Input offset_x value. Reserved. *@par Outputs: - * y: A Tensor. Has the same type and format as x. + * y: A Tensor. Has the same type and format as "x". */ REG_OP(Conv3DTranspose) .INPUT(input_size, TensorType({DT_INT32, DT_INT64})) @@ -1273,15 +1423,16 @@ REG_OP(Conv3DTranspose) *@par Attributes: * Five attributes: * @li dilations: A tuple/list of 5 integers, The dilation factor for each - * dimension of input, now only support [1,1,1,1,1] + * dimension of input. + * The N, C and D dimensions must be 1. Has the same format as "x". * @li groups: Number of blocked connections from input channels to output - * channels. Reserved. + * channels. * @li data_format: An optional string from: "NDHWC", "NCDHW". * Defaults to "NDHWC". Specify the data format of the input and output data. * @li output_padding: The size will be added in the output shape. * @li offset_x: Input offset_x value. Reserved. *@par Outputs: - * y: A Tensor. Has the same type and format as x. + * y: A Tensor. Has the same type and format as "x". *@par Restrictions: * Warning: THIS FUNCTION IS DEPRECATED. Please use Conv3DTranspose instead. */ @@ -1316,6 +1467,22 @@ REG_OP(Conv3DTransposeD) * or [out_channels, in_channel, filter_height, filter_width]. * @li bias: An optional 1D tensor of type float16 or int32. Format is "ND". * @li offset_w: An optional 1D tensor for quantized inference. Reserved. + *\n + *\n + * The following are the supported data types and data formats: +*@verbatim + | Tensor | x | filter | bias | y + ------------|---------|---------|---------|-------- + | Data Type | float16 | float16 | float16 | float16 + | |---------|---------|---------|-------- + | | int8 | int8 | int32 | int32 + ------------|---------|---------|---------|-------- + | Format | NCHW | NCHW | ND | NCHW + | | NHWC | HWCN | | NHWC +@endverbatim + * For int8, a dequant or requant operator must be followed. + *\n + * *@par Required Attributes: * @li strides: A required tuple/list of 4 integers. The stride of the sliding * window for H/W dimension. The index of H/W is same as data_format. @@ -1333,10 +1500,58 @@ REG_OP(Conv3DTransposeD) * @li output_padding: The size will be added in the output shape. Defaults * to [0, 0, 0, 0]. * @li offset_x: An optional int. Input offset, used for quantized inference. - * Defaults to "0". + * The negative offset added to the input image for int8 type. Ensure offset_x + * within the effective range of int8 [-128, 127]. Defaults to "0". + *\n + *\n + * The following value range restrictions must be met: +*@verbatim + | Name | Field | Scope + -------------------|----------|-------------- + | input_size | H | [1, 4096] + | | W | [1, 4096] + -------------------|----------|-------------- + | x (out_backprop) | H*strideH| [1, 4096] + | | W*strideW| [1, 4096] + -------------------|----------|-------------- + | filter | H | [1, 255] + | | W | [1, 255] + -------------------|----------|-------------- + | y (fmap) | H | [1, 4096] + | | W | [1, 4096] + -------------------|----------|-------------- + | Stride | H | [1, 63] + | | W | [1, 63] + -------------------|----------|-------------- + | Padding | Top | [0, 255] + | | Bottom | [0, 255] + | | Left | [0, 255] + | | Right | [0, 255] + -------------------|----------|-------------- + | Dilation | H | [1, 255] + | | W | [1, 255] + -------------------|----------|-------------- + | Offset_x | | [-128, 127] + +@endverbatim + * In Ascend910, fmap or out_backprop's H and W not support 1 when + * fmap_h + pad_top + pad_bottom != (filter_height - 1) * dilation_h + 1 + * If filter_h = 1 and filter_w = 1, out_backprop_w * stride_h * stride_w < 4096 + *\n + * *@par Outputs: * y: A Tensor. A Tensor of type float16 or int32, and has same format as * input_size. + *\n + * out_backprop_height = (fmap_height + pad_top + pad_bottom - + * (dilation_h * (filter_height - 1) + 1)) + * / stride_h + 1 + *\n + * out_backprop_width = (fmap_width + pad_left + pad_right - + * (dilation_w * (filter_width - 1) + 1)) + * / stride_w + 1 + *\n + * */ REG_OP(Conv2DTranspose) .INPUT(input_size, TensorType({DT_INT32, DT_INT64})) @@ -1405,21 +1620,22 @@ REG_OP(Conv2DTransposeD) /** *@brief Computes the deformed convolution output with the expected input *@par Inputs: - * Four inputs: + * Two inputs: * @li x: A Tensor of type float16,float32 * @li offsets: A Tensor of type float16,float32.Deformation offset parameter. *@par Required Attributes: * @li strides: A tuple/list of 4 integers.The stride of the sliding window for * height and width for H/W dimension. - * @li pads: A tuple/list of 4 integers.Padding added to each dimension + * @li pads: A tuple/list of 4 integers.Padding added to H/W dimension * of the input. * @li ksize: A tuple/list of 2 integers.kernel size. *@par Attributes: - * Three attributes: + * Four attributes: * @li dilations: A tuple/list of 4 integers, The dilation factor for each dimension * of input. Defaults to [1, 1, 1, 1] * @li data_format: An optional string from: "NCHW", "NHWC". Defaults to "NCHW". Specify the data format of the input x. * @li deformable_groups: Specify the c-axis grouping number of input x. + * @li modulated: Specify version of DeformableConv2D, true means v2, false means v1 *@par Outputs: * y: A Tensor. A Tensor of type float16, float32. */ @@ -1433,7 +1649,69 @@ REG_OP(DeformableOffsets) .ATTR(dilations, ListInt, {1, 1, 1, 1}) .ATTR(data_format, String, "NCHW") .ATTR(deformable_groups, Int, 1) + .ATTR(modulated, Bool, true) .OP_END_FACTORY_REG(DeformableOffsets) +/** +*@brief Computes the gradients of DeformableOffsets with respect to input and offsets +*@par Inputs: + * Three inputs: + * @li grad: A Tensor of type float16,float32. gradients with respect to DeformableOffsets output + * @li x: A Tensor of type float16,float32. + * @li offsets: A Tensor of type float16,float32.Deformation offset parameter. +*@par Required Attributes: + * @li strides: A tuple/list of 4 integers.The stride of the sliding window for + * height and width for H/W dimension. + * @li pads: A tuple/list of 4 integers.Padding added to H/W dimension + * of the input. + * @li ksize: A tuple/list of 2 integers.kernel size. +*@par Attributes: + * Three attributes: + * @li dilations: A tuple/list of 4 integers, The dilation factor for each dimension + * of input. Defaults to [1, 1, 1, 1] + * @li data_format: An optional string from: "NCHW", "NHWC". Defaults to "NCHW". Specify the data format of the input x. + * @li deformable_groups: Specify the c-axis grouping number of input x. + * @li modulated: Specify version of DeformableConv2D, true means v2, false means v1. +*@par Outputs: + * grad_x: A Tensor of type float16, float32. Gradients with respect to input_x + * grad_offsets: A Tensor of type float16, float32. Gradients with respect to input_offsets +*/ +REG_OP(DeformableOffsetsGrad) + .INPUT(grad, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(offsets, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(grad_x, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(grad_offsets, TensorType({DT_FLOAT16, DT_FLOAT})) + .REQUIRED_ATTR(strides, ListInt) + .REQUIRED_ATTR(pads, ListInt) + .REQUIRED_ATTR(ksize, ListInt) + .ATTR(dilations, ListInt, {1, 1, 1, 1}) + .ATTR(data_format, String, "NCHW") + .ATTR(deformable_groups, Int, 1) + .ATTR(modulated, Bool, true) + .OP_END_FACTORY_REG(DeformableOffsetsGrad) + +/** +*@brief Computes the deformed dilation output with the expected input +*@par Inputs: + * One inputs: + * @li x: A Tensor of type int8, float16, float32 +*@par Required Attributes: + * @li dilations: A tuple/list of integers. +*@par Attributes: + * Two attributes: + * @li padding_value: default value filling in blank + * @li pads: A tuple/list of integers. +*@par Outputs: + * y: A Tensor. A Tensor of type int8, float16, float32. +*/ +REG_OP(Dilation) + .INPUT(x, TensorType({DT_INT8, DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_INT8, DT_FLOAT16, DT_FLOAT})) + .REQUIRED_ATTR(dilations, ListInt) + .ATTR(pads, ListInt, {}) + .ATTR(padding_value, Float, 0.0) + .OP_END_FACTORY_REG(Dilation) + } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_NN_CALCULATION_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/nn_detect_ops.h b/third_party/fwkacllib/inc/ops/nn_detect_ops.h index a013fb33..5fa40ad6 100644 --- a/third_party/fwkacllib/inc/ops/nn_detect_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_detect_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -254,22 +254,22 @@ is min_size/sqrt(aspect_ratio), the width is min_size*sqrt(aspect_ratio). Defaul *@par Third-party framework compatibility * It is a custom operator. It has no corresponding operator in Caffe. */ - REG_OP(PriorBox) - .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) - .INPUT(img, TensorType({DT_FLOAT16, DT_FLOAT})) - .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) - .REQUIRED_ATTR(min_size, ListFloat) - .REQUIRED_ATTR(max_size, ListFloat) - .REQUIRED_ATTR(aspect_ratio, ListFloat) - .ATTR(img_h, Int, 0) - .ATTR(img_w, Int, 0) - .ATTR(step_h, Float, 0.0) - .ATTR(step_w, Float, 0.0) - .ATTR(flip, Bool, true) - .ATTR(clip, Bool, false) - .ATTR(offset, Float, 0.5) - .ATTR(variance, ListFloat, {0.1}) - .OP_END_FACTORY_REG(PriorBox); +REG_OP(PriorBox) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(img, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .REQUIRED_ATTR(min_size, ListFloat) + .REQUIRED_ATTR(max_size, ListFloat) + .REQUIRED_ATTR(aspect_ratio, ListFloat) + .ATTR(img_h, Int, 0) + .ATTR(img_w, Int, 0) + .ATTR(step_h, Float, 0.0) + .ATTR(step_w, Float, 0.0) + .ATTR(flip, Bool, true) + .ATTR(clip, Bool, false) + .ATTR(offset, Float, 0.5) + .ATTR(variance, ListFloat, {0.1}) + .OP_END_FACTORY_REG(PriorBox); /** *@brief Performs SSD prior box detection, with four additional matrices and the "aspect_ratio" attribute deleted compared to PriorBox . \n @@ -306,25 +306,25 @@ is min_size/sqrt(aspect_ratio), the width is min_size*sqrt(aspect_ratio). Defaul *@par Restrictions: *Warning: THIS FUNCTION IS DEPRECATED. Please use PriorBox instead. */ - REG_OP(PriorBoxD) - .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) - .INPUT(img, TensorType({DT_FLOAT16, DT_FLOAT})) - .INPUT(data_h, TensorType({DT_FLOAT16, DT_FLOAT})) - .INPUT(data_w, TensorType({DT_FLOAT16, DT_FLOAT})) - .INPUT(box_height, TensorType({DT_FLOAT16, DT_FLOAT})) - .INPUT(box_width, TensorType({DT_FLOAT16, DT_FLOAT})) - .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) - .REQUIRED_ATTR(min_size, ListFloat) - .REQUIRED_ATTR(max_size, ListFloat) - .ATTR(img_h, Int, 0) - .ATTR(img_w, Int, 0) - .ATTR(step_h, Float, 0.0) - .ATTR(step_w, Float, 0.0) - .ATTR(flip, Bool, true) - .ATTR(clip, Bool, false) - .ATTR(offset, Float, 0.5) - .ATTR(variance, ListFloat, {0.1}) - .OP_END_FACTORY_REG(PriorBoxD); +REG_OP(PriorBoxD) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(img, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(data_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(data_w, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(box_height, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(box_width, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .REQUIRED_ATTR(min_size, ListFloat) + .REQUIRED_ATTR(max_size, ListFloat) + .ATTR(img_h, Int, 0) + .ATTR(img_w, Int, 0) + .ATTR(step_h, Float, 0.0) + .ATTR(step_w, Float, 0.0) + .ATTR(flip, Bool, true) + .ATTR(clip, Bool, false) + .ATTR(offset, Float, 0.5) + .ATTR(variance, ListFloat, {0.1}) + .OP_END_FACTORY_REG(PriorBoxD); /** *@brief Performs SSD prior box detection, with four additional matrices and the "aspect_ratio" attribute deleted compared to PriorBox . \n @@ -358,22 +358,22 @@ is min_size/sqrt(aspect_ratio), the width is min_size*sqrt(aspect_ratio). Defaul *@par Restrictions: *Warning: THIS FUNCTION IS DEPRECATED. Please use PriorBox instead. */ - REG_OP(PriorBoxDV2) - .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) - .INPUT(img, TensorType({DT_FLOAT16, DT_FLOAT})) - .INPUT(boxes, TensorType({DT_FLOAT16, DT_FLOAT})) - .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) - .REQUIRED_ATTR(min_size, ListFloat) - .REQUIRED_ATTR(max_size, ListFloat) - .ATTR(img_h, Int, 0) - .ATTR(img_w, Int, 0) - .ATTR(step_h, Float, 0.0) - .ATTR(step_w, Float, 0.0) - .ATTR(flip, Bool, true) - .ATTR(clip, Bool, false) - .ATTR(offset, Float, 0.5) - .ATTR(variance, ListFloat, {0.1}) - .OP_END_FACTORY_REG(PriorBoxDV2); +REG_OP(PriorBoxDV2) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(img, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(boxes, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .REQUIRED_ATTR(min_size, ListFloat) + .REQUIRED_ATTR(max_size, ListFloat) + .ATTR(img_h, Int, 0) + .ATTR(img_w, Int, 0) + .ATTR(step_h, Float, 0.0) + .ATTR(step_w, Float, 0.0) + .ATTR(flip, Bool, true) + .ATTR(clip, Bool, false) + .ATTR(offset, Float, 0.5) + .ATTR(variance, ListFloat, {0.1}) + .OP_END_FACTORY_REG(PriorBoxDV2); /** *@brief Performs Position Sensitive ROI Pooling . \n @@ -531,10 +531,10 @@ as xx...xyy...yww...whh...hbb...bc0c0..c0c1c1...c1......cncn...cn . \n * It is a custom operator. It has no corresponding operator in Caffe. */ REG_OP(Yolo) - .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) - .OUTPUT(coord_data, TensorType({DT_FLOAT16,DT_FLOAT})) - .OUTPUT(obj_prob, TensorType({DT_FLOAT16,DT_FLOAT})) - .OUTPUT(classes_prob, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(coord_data, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(obj_prob, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(classes_prob, TensorType({DT_FLOAT16, DT_FLOAT})) .ATTR(boxes, Int, 3) .ATTR(coords, Int, 4) .ATTR(classes, Int, 80) @@ -584,10 +584,10 @@ REG_OP(Yolo) * It is a custom operator. It has no corresponding operator in Caffe. */ REG_OP(YoloV2DetectionOutput) - .INPUT(coord_data, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(obj_prob, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(classes_prob, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(img_info, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(coord_data, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(obj_prob, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(classes_prob, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(img_info, TensorType({DT_FLOAT16, DT_FLOAT})) .REQUIRED_ATTR(biases, ListFloat) .ATTR(boxes, Int, 5) .ATTR(coords, Int, 4) @@ -598,7 +598,7 @@ REG_OP(YoloV2DetectionOutput) .ATTR(score_threshold, Float, 0.5) .ATTR(iou_threshold, Float, 0.45) .ATTR(pre_nms_topn, Int, 512) - .OUTPUT(box_out, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(box_out, TensorType({DT_FLOAT16, DT_FLOAT})) .OUTPUT(box_out_num, TensorType({DT_INT32})) .OP_END_FACTORY_REG(YoloV2DetectionOutput) @@ -647,12 +647,12 @@ REG_OP(YoloV2DetectionOutput) *Warning: THIS FUNCTION IS DEPRECATED. Please use YoloV2DetectionOutput instead. */ REG_OP(YoloV2DetectionOutputD) - .INPUT(coord_data, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(obj_prob, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(classes_prob, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(img_info, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(windex, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(hindex, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(coord_data, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(obj_prob, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(classes_prob, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(img_info, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(windex, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(hindex, TensorType({DT_FLOAT16, DT_FLOAT})) .REQUIRED_ATTR(biases, ListFloat) .ATTR(boxes, Int, 5) .ATTR(coords, Int, 4) @@ -663,7 +663,7 @@ REG_OP(YoloV2DetectionOutputD) .ATTR(score_threshold, Float, 0.5) .ATTR(iou_threshold, Float, 0.45) .ATTR(pre_nms_topn, Int, 512) - .OUTPUT(box_out, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(box_out, TensorType({DT_FLOAT16, DT_FLOAT})) .OUTPUT(box_out_num, TensorType({DT_INT32})) .OP_END_FACTORY_REG(YoloV2DetectionOutputD) @@ -707,16 +707,16 @@ REG_OP(YoloV2DetectionOutputD) * It is a custom operator. It has no corresponding operator in Caffe. */ REG_OP(YoloV3DetectionOutput) - .INPUT(coord_data_low, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(coord_data_mid, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(coord_data_high, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(obj_prob_low, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(obj_prob_mid, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(obj_prob_high, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(classes_prob_low, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(classes_prob_mid, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(classes_prob_high, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(img_info, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(coord_data_low, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(coord_data_mid, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(coord_data_high, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(obj_prob_low, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(obj_prob_mid, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(obj_prob_high, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(classes_prob_low, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(classes_prob_mid, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(classes_prob_high, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(img_info, TensorType({DT_FLOAT16, DT_FLOAT})) .REQUIRED_ATTR(biases_low, ListFloat) .REQUIRED_ATTR(biases_mid, ListFloat) .REQUIRED_ATTR(biases_high, ListFloat) @@ -729,7 +729,7 @@ REG_OP(YoloV3DetectionOutput) .ATTR(score_threshold, Float, 0.5) .ATTR(iou_threshold, Float, 0.45) .ATTR(pre_nms_topn, Int, 512) - .OUTPUT(box_out, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(box_out, TensorType({DT_FLOAT16, DT_FLOAT})) .OUTPUT(box_out_num, TensorType({DT_INT32})) .OP_END_FACTORY_REG(YoloV3DetectionOutput) @@ -776,22 +776,22 @@ s *Warning: THIS FUNCTION IS DEPRECATED. Please use YoloV3DetectionOutput instead. */ REG_OP(YoloV3DetectionOutputD) - .INPUT(coord_data_low, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(coord_data_mid, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(coord_data_high, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(obj_prob_low, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(obj_prob_mid, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(obj_prob_high, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(classes_prob_low, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(classes_prob_mid, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(classes_prob_high, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(img_info, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(windex1, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(windex2, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(windex3, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(hindex1, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(hindex2, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(hindex3, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(coord_data_low, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(coord_data_mid, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(coord_data_high, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(obj_prob_low, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(obj_prob_mid, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(obj_prob_high, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(classes_prob_low, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(classes_prob_mid, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(classes_prob_high, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(img_info, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(windex1, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(windex2, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(windex3, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(hindex1, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(hindex2, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(hindex3, TensorType({DT_FLOAT16, DT_FLOAT})) .REQUIRED_ATTR(biases_low, ListFloat) .REQUIRED_ATTR(biases_mid, ListFloat) .REQUIRED_ATTR(biases_high, ListFloat) @@ -804,7 +804,7 @@ REG_OP(YoloV3DetectionOutputD) .ATTR(score_threshold, Float, 0.5) .ATTR(iou_threshold, Float, 0.45) .ATTR(pre_nms_topn, Int, 512) - .OUTPUT(box_out, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(box_out, TensorType({DT_FLOAT16, DT_FLOAT})) .OUTPUT(box_out_num, TensorType({DT_INT32})) .OP_END_FACTORY_REG(YoloV3DetectionOutputD) @@ -848,7 +848,7 @@ There are three Yolo operators at Yolov3DetectionOutput's preceding layer on Yol * It is a custom operator. It has no corresponding operator in Caffe. */ REG_OP(YoloV3DetectionOutputV2) - .DYNAMIC_INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) + .DYNAMIC_INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) .REQUIRED_ATTR(biases, ListFloat) .ATTR(boxes, Int, 3) .ATTR(coords, Int, 4) @@ -862,7 +862,7 @@ REG_OP(YoloV3DetectionOutputV2) .ATTR(N, Int, 10) .ATTR(resize_origin_img_to_net, Bool, false) .ATTR(out_box_dim, Int, 3) - .OUTPUT(box_out, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(box_out, TensorType({DT_FLOAT16, DT_FLOAT})) .OUTPUT(box_out_num, TensorType({DT_INT32})) .OP_END_FACTORY_REG(YoloV3DetectionOutputV2) @@ -910,9 +910,9 @@ REG_OP(YoloV3DetectionOutputV2) * Warning: THIS FUNCTION IS DEPRECATED. Please use YoloV3DetectionOutputV2 instead. */ REG_OP(YoloV3DetectionOutputV2D) - .DYNAMIC_INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) - .DYNAMIC_INPUT(windex, TensorType({DT_FLOAT16,DT_FLOAT})) - .DYNAMIC_INPUT(hindex, TensorType({DT_FLOAT16,DT_FLOAT})) + .DYNAMIC_INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .DYNAMIC_INPUT(windex, TensorType({DT_FLOAT16, DT_FLOAT})) + .DYNAMIC_INPUT(hindex, TensorType({DT_FLOAT16, DT_FLOAT})) .REQUIRED_ATTR(biases, ListFloat) .ATTR(boxes, Int, 3) .ATTR(coords, Int, 4) @@ -926,7 +926,7 @@ REG_OP(YoloV3DetectionOutputV2D) .ATTR(N, Int, 10) .ATTR(resize_origin_img_to_net, Bool, false) .ATTR(out_box_dim, Int, 3) - .OUTPUT(box_out, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(box_out, TensorType({DT_FLOAT16, DT_FLOAT})) .OUTPUT(box_out_num, TensorType({DT_INT32})) .OP_END_FACTORY_REG(YoloV3DetectionOutputV2D) @@ -968,8 +968,9 @@ REG_OP(SPP) * Three inputs, including: *@li x: An NC1HWC0 tensor of type float16 or float32, describing the feature * map. -*@li rois: A tensor of type float16 or float32, with shape +*@li rois: A tensor of type float16 or float32, with 3D shape * [batch, 5, roi_max_num], describing the RIOs. +* roi_max_num must be less than or equal to 6000 and must be divided by 16. *@li roi_actual_num: A optional tensor of type int32, with shape [batch, 8], specifying * the number of ROIs per batch . \n @@ -1201,35 +1202,6 @@ REG_OP(RpnProposalsD) .OUTPUT(sorted_box, TensorType({DT_FLOAT16})) .OP_END_FACTORY_REG(RpnProposalsD) -/** -*@brief Computes Score Filte Pre-Sort function. - -*@par Inputs: -*Inputs include: -* @li rois: A Tensor. Must be float16. N-D with shape [N, 4]. -* @li cls_bg_prob: A Tensor. Must be float16. N-D with shape [N, 1]. - -*@par Attributes: -* @li score_threshold: required, float, threahold of topk process. -* @li k: required, Int, threahold of topk process. -* @li score_filter: bool, mark of score_filter. Defaults to "true" -* @li core_max_num: int, max number of core. Defaults to "8" -*@par Outputs: -* @li sorted_proposal: A Tensor. Must be float16. -* N-D with shape [8*6002, 8]. -* @li proposal_num: A Tensor. Must be uint32. N-D with shape [8, 8]. -*/ - -REG_OP(ScoreFiltePreSort) - .INPUT(rois, TensorType({DT_FLOAT16})) - .INPUT(cls_bg_prob, TensorType({DT_FLOAT16})) - .OUTPUT(sorted_proposal, TensorType({ DT_FLOAT16})) - .OUTPUT(proposal_num, TensorType({ DT_UINT32})) - .REQUIRED_ATTR(score_threshold, Float) - .REQUIRED_ATTR(k, Int) - .ATTR(score_filter, Bool, true) - .ATTR(core_max_num, Int, 8) - .OP_END_FACTORY_REG(ScoreFiltePreSort) /** *@brief Computes Score Filte Pre-Sort function. @@ -1383,6 +1355,7 @@ REG_OP(DecodeWheelsTarget) *@attention Constraints: * Only computation of float16 data is supported. +* Note: when the class num per image * max_size_per_class is too big, will compile fail with ERROR-insufficient memory */ REG_OP(BatchMultiClassNonMaxSuppression) .INPUT(boxes, TensorType({DT_FLOAT16})) @@ -1464,9 +1437,9 @@ REG_OP(NormalizeBBox) * y: A Tensor. Must have the same type as box_predictions. */ REG_OP(DecodeBboxV2) - .INPUT(boxes, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(anchors, TensorType({DT_FLOAT16,DT_FLOAT})) - .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(boxes, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(anchors, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) .ATTR(scales, ListFloat, {1.0, 1.0, 1.0, 1.0}) .ATTR(decode_clip, Float, 0.0) .ATTR(reversed_box, Bool, false) @@ -1477,7 +1450,8 @@ REG_OP(DecodeBboxV2) * *@par Inputs: *Inputs include: -* x: A Tensor. Must be float16 or float32. +* x: A Tensor. Dtype support: flaot16, flaot, int16, int8, + uint8, int32, int64. * *@par Attributes: * @li axis: optional, int. @@ -1485,16 +1459,364 @@ REG_OP(DecodeBboxV2) * *@par Outputs: * @li y1: A Tensor. Must have the same type as x. -* @li y2: A Tensor. Indices of y1 in x.Dtype must be int32. +* @li y2: A Tensor. Indices of y1 in x. Dtype must be int32. +* */ REG_OP(Sort) - .INPUT(x, TensorType({ DT_FLOAT16 })) - .OUTPUT(y1, TensorType({ DT_FLOAT16 })) - .OUTPUT(y2, TensorType({ DT_INT32 })) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT16, DT_INT8, + DT_UINT8, DT_INT32, DT_INT64})) + .OUTPUT(y1, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT16, DT_INT8, + DT_UINT8, DT_INT32, DT_INT64})) + .OUTPUT(y2, TensorType({DT_INT32})) .ATTR(axis, Int, -1) .ATTR(descending, Bool, false) .OP_END_FACTORY_REG(Sort) +/** +*@brief Computes iou for input bboxes and gtboxes. + +*@par Inputs: +* Two inputs, including: +*@li bboxes: boxes, a 4D Tensor of type float16 with the shape (x0, x1, y0, y1), +*@li gtboxes: boxes, a 4D Tensor of type float16 with the shape (x0, x1, y0, y1).\n + +*@par Attributes: +*@li mode: A optional attribute of type string, whether judge the mode of iou. \n + +*@par Outputs: +*@li overlap: A 2D Tensor of type float16 with shape [n, m]. \n + +*@attention Constraints: +* Only computation of float16 data is supported. + +*@par Restrictions: +*Warning:THIS FUNCTION IS DEPRECATED. Please use Iou instead. +*/ +REG_OP(PtIou) + .INPUT(bboxes, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(gtboxes, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(overlap, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(mode, String, "iou") + .OP_END_FACTORY_REG(PtIou) + +/** +*@brief Greedily selects a subset of bounding boxes in descending order of +score . \n + +*@par Inputs: +*Input boxes and scores must be float16 type. Inputs include: +*@li boxes: A input tensor with shape [num_batches,spatial_dimension,4]. +The single box data format is indicated by center_point_box. +*@li scores: A input tensor with shape [num_batches,num_classes,spatial_dimension] +*@li max_output_size: A scalar integer tensor representing the maximum number +of boxes to be selected by non max suppression. +*@li iou_threshold: A 0-D float tensor representing the threshold for deciding +whether boxes overlap too much with respect to IOU. +*@li score_threshold: A 0-D float tensor representing the threshold for +deciding when to remove boxes based on score . \n + +*@par Attributes: +*center_point_box:Integer indicate the format of the box data. +The default is 0. 0 - the box data is supplied as [y1, x1, y2, x2] +where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair +of box corners and the coordinates can be provided as normalized +(i.e., lying in the interval [0, 1]) or absolute.Mostly used for TF models. +1 - the box data is supplied as [x_center, y_center, width, height]. + Mostly used for Pytorch models. \n + +*@par Outputs: +*@li selected_indices: A 2-D integer tensor of shape [M] representing the +selected indices from the boxes tensor, where M <= max_output_size. \n + +*@attention Constraints: +*Input boxes and scores must be float16 type . \n + +*@par Third-party framework compatibility +*Compatible with onnx NonMaxSuppression operator. + +*@par Restrictions: +*Warning:THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ + +REG_OP(NonMaxSuppressionV6) + .INPUT(boxes, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(scores, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(max_output_size, TensorType({DT_INT32})) + .OPTIONAL_INPUT(iou_threshold, TensorType({DT_FLOAT})) + .OPTIONAL_INPUT(score_threshold, TensorType({DT_FLOAT})) + .OUTPUT(selected_indices, TensorType({DT_INT32})) + .ATTR(center_point_box, Int, 0) + .ATTR(max_boxes_size, Int, 0) + .OP_END_FACTORY_REG(NonMaxSuppressionV6) + +/** +*@brief Greedily selects a subset of bounding boxes in descending order of +score . \n + +*@par Inputs: +*Input boxes and scores must be float16 type. Inputs include: +*@li boxes: A input tensor with shape [num_batches,spatial_dimension,4]. +The single box data format is indicated by center_point_box. +*@li scores: A input tensor with shape [num_batches,num_classes,spatial_dimension] +*@li max_output_size: A scalar integer tensor representing the maximum number +of boxes to be selected by non max suppression. +*@li iou_threshold: A 0-D float tensor representing the threshold for deciding +whether boxes overlap too much with respect to IOU. +*@li score_threshold: A 0-D float tensor representing the threshold for +deciding when to remove boxes based on score . \n +*@li index_id: A input tensor with shape [num_batches,num_classes,spatial_dimension,3] +the last dim representing (batch_id,class_id,index_id) . \n + +*@par Attributes: +*center_point_box:Integer indicate the format of the box data. +The default is 0. 0 - the box data is supplied as [y1, x1, y2, x2] +where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair +of box corners and the coordinates can be provided as normalized +(i.e., lying in the interval [0, 1]) or absolute.Mostly used for TF models. +1 - the box data is supplied as [x_center, y_center, width, height]. + Mostly used for Pytorch models. \n + +*@par Outputs: +*@li selected_indices: A 2-D integer tensor of shape [M] representing the +selected indices from the boxes tensor, where M <= max_output_size. \n + +*@attention Constraints: +*Input boxes and scores must be float16 type . \n + +*@par Third-party framework compatibility +*Compatible with onnx NonMaxSuppression operator. +*/ + +REG_OP(NonMaxSuppressionV7) + .INPUT(boxes, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(scores, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(max_output_size, TensorType({DT_INT32})) + .OPTIONAL_INPUT(iou_threshold, TensorType({DT_FLOAT})) + .OPTIONAL_INPUT(score_threshold, TensorType({DT_FLOAT})) + .OPTIONAL_INPUT(index_id, TensorType({DT_FLOAT16})) + .OUTPUT(selected_indices, TensorType({DT_INT32})) + .ATTR(center_point_box, Int, 0) + .ATTR(max_boxes_size, Int, 0) + .OP_END_FACTORY_REG(NonMaxSuppressionV7) + +/** +*@brief Obtains the ROI feature matrix from the feature map list. It is a customized fused operator for mmdetection. \n + +*@par Inputs: +* Three inputs, including: +*@li features: A 5HD Tensor list of type float32 or float16. +*@li rois: ROI position. A 2D Tensor of float32 or float16 with shape (N, 5). "N" indicates the number of ROIs, +* the value "5" indicates the indexes of images where the ROIs are located, "x0", "y0", "x1", and "y1". + +*@par Attributes: +*@li finest_scale: A optional attribute of type int, specifying the scale of calculate levels of "rois". +*@li roi_scale_factor: A optional attribute of type float32, specifying the rescaling of "rois" coordinates. +*@li spatial_scale: A optional attribute of type list float32, specifying the scaling ratio of "features" +* to the original image. +*@li pooled_height: A optional attribute of type int32, specifying the H dimension. +*@li pooled_width: A optional attribute of type int32, specifying the W dimension. +*@li sample_num: An optional attribute of type int32, specifying the horizontal and vertical sampling frequency +* of each output. If this attribute is set to "0", the sampling frequency is equal to the rounded up value of "rois", +* which is a floating point number. Defaults to "0". +*@li pool_mode: An optional attribute of type string to indicate pooling mode. Defaults to "avg" . \n +*@li aligned: An optional attribute of type bool, specifying the align to corner. Defaults to true . \n + +*@par Outputs: +* output: Outputs the feature sample of each ROI position. The format is 5HD Tensor of type float32 or float16. +* The axis N is the number of input ROIs. Axes H, W, and C are consistent with the values of "pooled_height", +* "pooled_width", and "features", respectively. + +*@par Third-party framework compatibility +*Compatible with mmdetection SingleRoIExtractor operator. +*/ +REG_OP(RoiExtractor) + .DYNAMIC_INPUT(features, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(rois, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(finest_scale, Int, 56) + .ATTR(roi_scale_factor, Float, 0) + .ATTR(spatial_scale, ListFloat, {1.f / 4, 1.f / 8, 1.f / 16, 1.f / 32}) + .ATTR(pooled_height, Int, 7) + .ATTR(pooled_width, Int, 7) + .ATTR(sample_num, Int, 0) + .ATTR(pool_mode, String, "avg") + .ATTR(aligned, Bool, true) + .OP_END_FACTORY_REG(RoiExtractor) + +/** +*@brief Performs Position Sensitive PS ROI Pooling . \n + +*@par Inputs: +* Two inputs, including: +*@li x: An NC1HWC0 tensor of type float16 or float32, describing the feature +* map, dimension C1 must be equal to +* (int(output_dim+15)/C0))*group_size*group_size. +*@li rois: A tensor of type float16 or float32, with shape +* [batch, 5, rois_num], describing the ROIs, each ROI consists of five +* elements: "batch_id", "x1", "y1", "x2", and "y2", which "batch_id" indicates +* the index of the input feature map, "x1", "y1", "x2", or "y2" must be +* greater than or equal to "0.0" . \n + +*@par Attributes: +*@li output_dim: A required int32, specifying the number of output channels, +* must be greater than 0. +*@li group_size: A required int32, specifying the number of groups to encode +* position-sensitive score maps, must be within the range (0, 128). +*@li spatial_scale: A required float32, scaling factor for mapping the input +* coordinates to the ROI coordinates . \n + +*@par Outputs: +*y: An NC1HWC0 tensor of type float16 or float32, describing the result +* feature map . \n + +*@attention Constraints: +* HC1HWC0: channel must be Group_size squared, rois_num is a multiple of 16 +*/ +REG_OP(PSROIPoolingV2) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(rois, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .REQUIRED_ATTR(spatial_scale, Float) + .REQUIRED_ATTR(output_dim, Int) + .REQUIRED_ATTR(group_size, Int) + .OP_END_FACTORY_REG(PSROIPoolingV2) + +/** +*@brief Performs Position Sensitive PS ROI Pooling Grad . \n + +*@par Inputs: +* Two inputs, including: +*@li x: An NC1HWC0 tensor of type float16 or float32, describing the result +* feature map . \n +*@li rois: A tensor of type float16 or float32, with shape +* [batch, 5, rois_num], describing the ROIs, each ROI consists of five +* elements: "batch_id", "x1", "y1", "x2", and "y2", which "batch_id" indicates +* the index of the input feature map, "x1", "y1", "x2", or "y2" must be +* greater than or equal to "0.0" . \n + +*@par Attributes: +*@li output_dim: A required int32, specifying the number of output channels, +* must be greater than 0. +*@li group_size: A required int32, specifying the number of groups to encode +* position-sensitive score maps, must be within the range (0, 128). +*@li spatial_scale: A required float32, scaling factor for mapping the input +* coordinates to the ROI coordinates . \n +*@li input_size: A required listInt, mapping the gradinput size: (H, W) + +*@par Outputs: +*y: An NC1HWC0 tensor of type float16 or float32, describing the feature +* map, dimension C1 must be equal to +* (int(output_dim+15)/C0))*group_size*group_size. + +*@attention Constraints: +* HC1HWC0: channel must be Group_size squared, rois_num is a multiple of 16 +*/ +REG_OP(PSROIPoolingGradV2D) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(rois, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .REQUIRED_ATTR(spatial_scale, Float) + .REQUIRED_ATTR(output_dim, Int) + .REQUIRED_ATTR(group_size, Int) + .REQUIRED_ATTR(input_size, ListInt) + .OP_END_FACTORY_REG(PSROIPoolingGradV2D) + +/** +*@brief Generate the responsible flags of anchor in a single feature map. + +*@par Inputs: +*@li gt_bboxes: Ground truth box, 2-D Tensor with shape `[batch, 4]`. + +*@par Attributes: +*@li featmap_size: The size of feature maps, listint. +*@li strides: Stride of current level, listint. +*@li num_base_anchors: The number of base anchors. + +*@par Outputs: +*flags: The valid flags of each anchor in a single level. +*/ +REG_OP(AnchorResponseFlags) + .INPUT(gt_bboxes, TensorType({DT_FLOAT})) + .OUTPUT(flags, TensorType({DT_UINT8})) + .REQUIRED_ATTR(featmap_size, ListInt) + .REQUIRED_ATTR(strides, ListInt) + .REQUIRED_ATTR(num_base_anchors, Int) + .OP_END_FACTORY_REG(AnchorResponseFlags) + +/** +*@brief Generates bounding boxes based on yolo's "anchor" and "ground-truth" boxes. +* It is a customized mmdetection operator . \n + +*@par Inputs: +* Three inputs, including: +*@li anchor_boxes: anchor boxes generated by the yolo training set. +* A 2D Tensor of type float32 or float16 with shape (N, 4). "N" indicates the number +* of ROIs, "N" indicates the number of ROIs, and the value "4" refers to (tx, ty, tw, th). +*@li gt_bboxes: target of the transformation, e.g, ground-truth boxes. +* A 2D Tensor of type float32 or float16 with shape (N, 4). +* "N" indicates the number of ROIs, and 4 indicates "dx", "dy", "dw", and "dh" . +*@li stride: Scale for each box. +* A 1D Tensor of type int32 shape (N,). +* "N" indicates the number of ROIs. \n + +*@par Attributes: +*@li performance_mode: select performance mode, "high_precision" or "high_performance". +* select "high_precision" when input type is float32, the output tensor precision +* will be smaller than 0.0001, select "high_performance" when input type is float32, +* the ops will be best performance, but precision will be only smaller than 0.005. + +*@par Outputs: +*encoded_bboxes: Bboxes generated based on "anchor_boxes" and "gt_bboxes". Have the +* same format and type as "anchor_boxes". +* +*@attention Constraints: +* input anchor boxes only support maximum N=20480. \n +*/ +REG_OP(YoloBoxesEncode) + .INPUT(anchor_boxes, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(gt_bboxes, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(stride, TensorType({DT_INT32})) + .ATTR(performance_mode, String, "high_precision") + .OUTPUT(encoded_bboxes, TensorType({DT_FLOAT16, DT_FLOAT})) + .OP_END_FACTORY_REG(YoloBoxesEncode) + +/** +*@brief Performs Position Sensitive PS ROI Pooling Grad. + +*@par Inputs: +* Eight inputs, including: +*@li assigned_gt_inds: Tensor of type float16 or float32, shape (n, ) +*@li overlaps: A Tensor. Datatype is same as assigned_gt_inds. IOU between gt_bboxes and bboxes. shape(k, n) +*@li box_responsible_flags: A Tensor. Support uint8. Flag to indicate whether box is responsible. +*@li max_overlaps: A Tensor. Datatype is same as assigned_gt_inds. overlaps.max(axis=0). +*@li argmax_overlaps: A Tensor. Support int32. overlaps.argmax(axis=0). +*@li gt_max_overlaps: A Tensor. Datatype is same as assigned_gt_inds. overlaps.max(axis=1). +*@li gt_argmax_overlaps: A Tensor. Support int32. overlaps.argmax(axis=1). +*@li num_gts: A Tensor. Support int32. real k. shape (1, ) + +*@par Attributes: +*@li output_dim: float. IOU threshold for positive bboxes. +*@li group_size: float. minimum iou for a bbox to be considered as a positive bbox +*@li spatial_scale: bool. whether to assign all bboxes with the same highest overlap with some gt to that gt. + +*@par Outputs: +*@li assigned_gt_inds_pos: A Tensor. Support float16/float32. shape (n, ). +*/ +REG_OP(GridAssignPositive) + .INPUT(assigned_gt_inds, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .INPUT(overlaps, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .INPUT(box_responsible_flags, TensorType({ DT_UINT8 })) + .INPUT(max_overlaps, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .INPUT(argmax_overlaps, TensorType({ DT_INT32 })) + .INPUT(gt_max_overlaps, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .INPUT(gt_argmax_overlaps, TensorType({ DT_INT32 })) + .INPUT(num_gts, TensorType({ DT_INT32 })) + .OUTPUT(assigned_gt_inds_pos, TensorType({DT_FLOAT, DT_FLOAT16})) + .REQUIRED_ATTR(pos_iou_thr, Float) + .REQUIRED_ATTR(min_pos_iou, Float) + .REQUIRED_ATTR(gt_max_assign_all, Bool) + .OP_END_FACTORY_REG(GridAssignPositive) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_NN_DETECT_OPS_H_ + diff --git a/third_party/fwkacllib/inc/ops/nn_norm_ops.h b/third_party/fwkacllib/inc/ops/nn_norm_ops.h index 35c4c7d4..b44c0780 100644 --- a/third_party/fwkacllib/inc/ops/nn_norm_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_norm_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -55,7 +55,9 @@ REG_OP(LogSoftmaxGrad) *Two inputs, including: * @li features: A Tensor. Must be one of the following types: half, float32, double. * A "batch_size * num_classes" matrix. -* @li labels: A Tensor of the same type as "features". batch_size vector with values in [0, num_classes). +* @li labels: A Tensor. Must be one of the following types: 'int32', 'int64'. +* batch_size vector with values in [0, num_classes). +* This is the label for the given minibatch entry. *@par Outputs: @@ -105,6 +107,9 @@ REG_OP(SoftmaxCrossEntropyWithLogits) * @li grad_softmax: A Tensor. Has the same shape and type as "softmax". * The format is NC1HWC0 or DN . \n +*@par Attributes: +* axes: An optional list of ints. Defaults to "{-1}" . \n + *@par Outputs: *grad_x: A Tensor. Has the same shape and type as "softmax" . \n @@ -115,6 +120,7 @@ REG_OP(SoftmaxGrad) .INPUT(softmax, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .INPUT(grad_softmax, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .OUTPUT(grad_x, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .ATTR(axes, ListInt, {-1}) .OP_END_FACTORY_REG(SoftmaxGrad) /** @@ -160,20 +166,20 @@ REG_OP(SigmoidCrossEntropyWithLogits) .OP_END_FACTORY_REG(SigmoidCrossEntropyWithLogits) /** -*@brief Computes the sigmoid cross entropy loss of "predict" and "target" . \n +*@brief Computes the sigmoid cross entropy loss of "predict" and "target". *@par Inputs: * four inputs, including: *@li predict: A multi-dimensional Tensor of type float16 or float32, specifying the predictive value. -*@li target: A multi-dimensional Tensor of type float16 or float32, specifying the target value . \n -*@li weight: An multi-dimensional Tensor, specifying the weight value. \n +*@li target: A multi-dimensional Tensor of type float16 or float32, specifying the target value. +*@li weight: An multi-dimensional Tensor, specifying the weight value. *@li pos_weight: An multi-dimensional Tensor, specifying the pos weight value. \n *@par Attributes: -*reduction: A character string from "none", "mean", and "sum", specifying the reduction type to be applied to the output. Defaults to "mean" . \n +*reduction: A character string from "none", "mean", and "sum", specifying the reduction type to be applied to the output. Defaults to "mean". \n *@par Outputs: -*loss: Sigmoid cross entropy between the predictive value and target value. Has the same dimensions as "predict" . \n +*loss: Sigmoid cross entropy between the predictive value and target value. Has the same dimensions as "predict". \n *@par Third-party framework compatibility * Compatible with PyTorch operator BCEWithLogitsLoss. @@ -331,6 +337,41 @@ REG_OP(SoftmaxV2) .OP_END_FACTORY_REG(SoftmaxV2) /** +*@brief Function softmax with dropoutDoMaskV3D + +*@par Inputs: +*Two inputs, including: +* @li x: A mutable Tensor. The type only support float16. +* @li mask: A mutable Tensor. Must met all of the following rules: +* shape of mask should be 1D. +* dtype of mask should be uint8. +* value of shape should met the following algorithm: +* value = (size(x) + 128 - 1) // 128 * 128 + +*@par Attributes: +* @li keep_prob: A mutable Tensor. Must met all of the following rules: +* shape of "keep_prob" should be (1,) or [1,]. +* Has the same type as "x" . \n +* @li axes: A list of int. The dimension softmax would be performed on. Defaults +* to "[-1]" . \n + +*@par Outputs: +*y1: A mutable Tensor. Has the same type as "x". +*y2: A mutable Tensor. Has the same type as "x". \n + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(SoftmaxV2WithDropOutDoMaskV3D) + .INPUT(x, TensorType({DT_FLOAT16})) + .INPUT(mask, TensorType({DT_UINT8})) + .OUTPUT(y1, TensorType({DT_FLOAT16})) + .OUTPUT(y2, TensorType({DT_FLOAT16})) + .REQUIRED_ATTR(keep_prob, Float) + .ATTR(axes, ListInt, {-1}) + .OP_END_FACTORY_REG(SoftmaxV2WithDropOutDoMaskV3D) + +/** *@brief Computes log softmax activations . \n *@par Inputs: @@ -428,6 +469,33 @@ REG_OP(MVN) .OP_END_FACTORY_REG(MVN) /** +*@brief Normalizes the input . \n + +*@par Inputs: +* One input: +*x: An NCHW tensor of type float16 or float32 . \n + +*@par Attributes: +*@li eps: An optional float32 epsilon for not dividing by zero. Defaults to "1e-9" . \n +*@li axes: A list of Intefers, along which axis to reduce. Defaults to "[0, 2, 3]" . \n + +*@par Outputs: +*y: An NCHW tensor of type float16 or float32 . \n + +*@attention Constraints: +* The input tensor must have the NCHW format, whose shape length must be 4. +*@par Third-party framework compatibility +* Compatible with the ONNX operator MeanVarianceNormalization. +*/ + +REG_OP(MVNV2) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16})) /* "First operand." */ + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) /* "Result, has same element type as inputs" */ + .ATTR(eps, Float, 1e-9) + .ATTR(axes, ListInt, {0, 2, 3}) + .OP_END_FACTORY_REG(MVNV2) + +/** *@brief Normalizes the input "x1" . \n *@par Inputs: @@ -499,6 +567,31 @@ REG_OP(LayerNorm) .OP_END_FACTORY_REG(LayerNorm) /** +*@brief Returns a tensor where each sub-tensor of input along dimension +* dim is normalized such that the p-norm of the sub-tensor is lower than the value maxnorm. \n + +*@par Inputs: +*One input, including: +* @li x: A Tensor. Must be one of the following types: float16, float32 . \n + +*@par Attributes: +* @li p: Specify L_p norm, the type is float. +* @li dim: The processed dim, the type is int. +* @li maxnorm: Threshold for comparison, the type is float. \n + +*@par Outputs: +*One outputs, including: +* @li y: shape and dtype of output, should be same shape and type as input. +*/ +REG_OP(Renorm) + .INPUT(x, TensorType::BasicType()) + .OUTPUT(y, TensorType::BasicType()) + .REQUIRED_ATTR(p, Float) + .REQUIRED_ATTR(dim, Int) + .REQUIRED_ATTR(maxnorm, Float) + .OP_END_FACTORY_REG(Renorm) + +/** *@brief LayerNormGrad operator interface implementation * calculating: dy, x, variance, mean, gamma * pd_xl = data_dy*data_gamma @@ -587,6 +680,48 @@ REG_OP(LayerNormXBackprop) .OP_END_FACTORY_REG(LayerNormXBackprop) /** +*@brief LayerNormXBackpropV2 operator interface implementation +* calculating: dy, x, variance, mean, gamma +* pd_xl = data_dy*data_gamma +* pd_var = np.sum(((-0.5)*pd_xl*(data_x - data_mean) +* np.power((data_variance + EPSLON), (-1.5))), +* reduce_axis, keepdims=True) +* pd_mean = np.sum(((-1.0)*pd_xl +* np.power((data_variance + EPSLON), (-0.5))), +* reduce_axis, keepdims=True) +* + pd_var*(1.0/m) +* np.sum(((-2.0)*(data_x - data_mean)), reduce_axis, keepdims=True) +* pd_x = pd_xl*np.power((data_variance + EPSLON), (-0.5)) + +* pd_var*(2.0/m)*(data_x - data_mean) + pd_mean*(1.0/m) +* res_for_gamma = (data_x - data_mean) * np.power((data_variance + EPSLON), (-0.5)) + +*@par Inputs: +*Five inputs, including: +* @li dy: A Tensor. Must be one of the following types: float16, float32. +* @li x: A Tensor. Must be one of the following types: float16, float32. +* @li variance: A Tensor. Must be one of the following types: float16, float32. +* @li mean: A Tensor. Must be one of the following types: float16, float32. +* @li gamma: A Tensor. Must be one of the following types: float16, float32 . \n + +*@par Outputs: +*Three outputs, including: +* @li pd_x: A Tensor. Must be one of the following types: float16, float32. +* @li res_for_gamma: A Tensor. Must be one of the following types: float32. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(LayerNormXBackpropV2) + .INPUT(dy, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(variance, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(mean, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(gamma, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(pd_x, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(res_for_gamma, TensorType({DT_FLOAT})) + .OP_END_FACTORY_REG(LayerNormXBackpropV2) + +/** *@brief LayerNormBetaGammaBackprop operator interface implementation * calculating: dy, x, variance, mean * pd_xl = data_dy*data_gamma @@ -630,6 +765,35 @@ REG_OP(LayerNormBetaGammaBackprop) .OP_END_FACTORY_REG(LayerNormBetaGammaBackprop) /** +*@brief LayerNormBetaGammaBackpropV2 operator interface implementation +* calculating: dy, x, variance, mean +* pd_gamma = np.sum((data_dy*res_for_gamma), param_axis, keepdims=True) +* pd_beta = np.sum(data_dy, param_axis, keepdims=True) + +*@par Inputs: +*Three inputs, including: +* @li dy: A Tensor. Must be one of the following types: float16, float32. +* @li x: A Tensor. Must be one of the following types: float16, float32. +* @li variance: A Tensor. Must be one of the following types: float16, float32. +* @li mean: A Tensor. Must be one of the following types: float16, float32 . \n + +*@par Outputs: +*Three outputs, including: +* @li pd_gamma: A Tensor. Must be one of the following types: float16, float32. +* @li pd_beta: A Tensor. Must be one of the following types: float16, float32. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(LayerNormBetaGammaBackpropV2) + .INPUT(dy, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(res_for_gamma, TensorType({DT_FLOAT})) + .OUTPUT(pd_gamma, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(pd_beta, TensorType({DT_FLOAT, DT_FLOAT16})) + .REQUIRED_ATTR(shape_gamma, ListInt) + .OP_END_FACTORY_REG(LayerNormBetaGammaBackpropV2) + +/** *@brief Return "output" according to the algorithm of dropout_do_mask: * scale_x = x *(1 / keep_prob) * output = select(mask == 1, scale_x, 0) @@ -656,7 +820,68 @@ REG_OP(DropOutDoMask) .INPUT(keep_prob, TensorType({DT_FLOAT, DT_FLOAT16})) .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) .OP_END_FACTORY_REG(DropOutDoMask) - + +/** +*@brief Return "output" according to the algorithm of dropout_do_mask: +* scale_x = x *(1 / keep_prob) +* output = select(mask == 1, scale_x, 0) + +*@par Inputs: +*Three inputs, including: +* @li x: A mutable Tensor. Must be one of the following types: +* float16, float32 +* @li mask: A mutable Tensor. Must met all of the following rules: +* shape of mask should be 1D. +* dtype of mask should be uint8. +* value of shape should met the following algorithm: +* value = (size(x) + 128 - 1) // 128 * 128 +* @li keep_prob: A mutable Tensor. Must met all of the following rules: +* shape of "keep_prob" should be (1,) or [1,]. +* Has the same type as "x" . \n + +*@par Output: +*y: A mutable Tensor. Has the same type as "x". +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(DropOutDoMaskV3) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(mask, TensorType({DT_UINT8})) + .INPUT(keep_prob, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) + .OP_END_FACTORY_REG(DropOutDoMaskV3) + +/** +*@brief Return "output" according to the algorithm of dropout_do_mask: +* scale_x = x *(1 / keep_prob) +* output = select(mask == 1, scale_x, 0) + +*@par Inputs: +*Two inputs, including: +* @li x: A mutable Tensor. Must be one of the following types: +* float16, float32 +* @li mask: A mutable Tensor. Must met all of the following rules: +* shape of mask should be 1D. +* dtype of mask should be uint8. +* value of shape should met the following algorithm: +* value = (size(x) + 128 - 1) // 128 * 128 +*@par Attributes: +* @li keep_prob: A mutable Tensor. Must met all of the following rules: +* shape of "keep_prob" should be (1,) or [1,]. +* Has the same type as "x" . \n + +*@par Output: +*y: A mutable Tensor. Has the same type as "x". +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(DropOutDoMaskV3D) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(mask, TensorType({DT_UINT8})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) + .REQUIRED_ATTR(keep_prob, Float) + .OP_END_FACTORY_REG(DropOutDoMaskV3D) + /** *@brief Scales the input . \n @@ -703,7 +928,7 @@ REG_OP(Scale) *@par Inputs: *One input, including: -*@li x: A Tensor. Must be 4-D shape, and only support the following types: float16, float32 . \n +*x: A Tensor. Must be 4-D shape, and only support the following types: float16, float32 . \n *@par Attributes: *@li depth_radius: An optional int32, specifying the half-width of the normalization window. Defaults to "5". @@ -960,24 +1185,532 @@ REG_OP(INInferV2D) .OP_END_FACTORY_REG(INInferV2D) /** -*@brief Performs instance normalization for inference of InHost part. +* @brief InstanceNorm operator interface implementation. -*@par Inputs:\n -* One input, including: (NC1HWC0 supported) -* variance: A [N, C1, 1, 1, C0] Tensor of type float32, for the variance. +* @par Inputs: +* Three inputs, including: +* @li x: A Tensor. Must be one of the following types: float16, float32. +* @li gamma: A Tensor. Must be one of the following types: float16, float32. +* @li beta: A Tensor. Must be one of the following types: float16, float32. + +* @par Attributes: +* @li data_format: An attribute of type String \n +* @li epsilon: An attribute of type Float. \n + +* @par Outputs: +*Three outputs, including: +* @li y: A Tensor. Has the same type as "x". \n +* @li mean: A Tensor. Has the same type as "x". \n +* @li variance: A Tensor. Has the same type as "x". \n + +* @par Third-party framework compatibility +* Can be used by onnx InstanceNormalization +*/ +REG_OP(InstanceNorm) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(gamma, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(beta, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(mean, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(variance, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(data_format, String, "NDHWC") + .ATTR(epsilon, Float, 1e-6) + .OP_END_FACTORY_REG(InstanceNorm) + +/** +*@brief InstanceNormGrad operator interface implementation. + +*@par Inputs: +*Five inputs, including: +* @li dy: A Tensor. Must be one of the following types: float16, float32. +* @li x: A Tensor. Must be one of the following types: float16, float32. +* @li variance: A Tensor. Must be one of the following types: float16, float32. +* @li mean: A Tensor. Must be one of the following types: float16, float32. +* @li gamma: A Tensor. Must be one of the following types: float16, float32 . \n + +*@par Outputs: +*Three outputs, including: +* @li pd_x: A Tensor. Must be one of the following types: float16, float32. +* @li pd_gamma: A Tensor. Must be one of the following types: float16, float32. +* @li pd_beta: A Tensor. Must be one of the following types: float16, float32. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(InstanceNormGrad) + .INPUT(dy, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(variance, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(mean, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(gamma, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(pd_x, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(pd_gamma, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(pd_beta, TensorType({DT_FLOAT, DT_FLOAT16})) + .OP_END_FACTORY_REG(InstanceNormGrad) + +/** +*@brief InstanceNormXBackprop operator interface implementation. + +*@par Inputs: +*Five inputs, including: +* @li dy: A Tensor. Must be one of the following types: float16, float32. +* @li x: A Tensor. Must be one of the following types: float16, float32. +* @li variance: A Tensor. Must be one of the following types: float16, float32. +* @li mean: A Tensor. Must be one of the following types: float16, float32. +* @li gamma: A Tensor. Must be one of the following types: float16, float32 . \n + +*@par Outputs: +*Two outputs, including: +* @li pd_x: A Tensor. Must be one of the following types: float16, float32. +* @li res_for_gamma: A Tensor. Must be one of the following types: float32. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(InstanceNormXBackprop) + .INPUT(dy, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(variance, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(mean, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(gamma, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(pd_x, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(res_for_gamma, TensorType({DT_FLOAT})) + .OP_END_FACTORY_REG(InstanceNormXBackprop) + +/** +*@brief InstanceNormBetaGammaBackprop operator interface implementation. + +*@par Inputs: +*Two inputs, including: +* @li dy: A Tensor. Must be one of the following types: float16, float32. +* @li res_for_gamma: A Tensor. Must be one of the following types: float32.\n + +*@par Outputs: +*Two outputs, including: +* @li pd_gamma: A Tensor. Must be one of the following types: float16, float32. +* @li pd_beta: A Tensor. Must be one of the following types: float16, float32. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(InstanceNormBetaGammaBackprop) + .INPUT(dy, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(res_for_gamma, TensorType({DT_FLOAT})) + .OUTPUT(pd_gamma, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(pd_beta, TensorType({DT_FLOAT, DT_FLOAT16})) + .OP_END_FACTORY_REG(InstanceNormBetaGammaBackprop) + +/** +* @brief Computes Kl_div_loss_grad or Kl_div_loss_backward. \n + +* @par Inputs: +* Three inputs, including: +* @li grad: A Tensor. Must be one of the following types: float16, float32. +* Required. +* @li input: A Tensor. Has the same type as "grad". Required. +* @li target: A Tensor. Has the same type as "grad". Required. \n + +* @par Attributes: +* @li reduction: An optional attribute of type String. Defaults to "mean". \n +* @li log_target: An optional attribute of type Bool. Defaults to false. \n + +* @par Outputs: +* @li y: A Tensor. Has the same type as "grad". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator KlDivLossGrad. +*/ +REG_OP(KlDivLossGrad) + .INPUT(grad, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(input, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(target, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(reduction, String, "mean") + .ATTR(log_target, Bool, false) + .OP_END_FACTORY_REG(KlDivLossGrad) + +/** +* @brief Computes l1_loss_grad or l1_loss_backward. \n + +* @par Inputs: +* Three inputs, including: +* @li grads: A Tensor. Must be one of the following types: float16, float32. +* Required. +* @li predict: A Tensor. Has the same type as "grads". Required. +* @li label: A Tensor. Has the same type as "grads". Required. \n + +* @par Attributes: +* @li reduction: An optional attribute of type String. Defaults to "mean". \n + +* @par Outputs: +* @li y: A Tensor. Has the same type as "x". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator L1LossGrad. +*/ +REG_OP(L1LossGrad) + .INPUT(grads, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(predict, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(label, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(reduction, String, "mean") + .OP_END_FACTORY_REG(L1LossGrad) + +/** +* @brief Computes loss of lp, p=1,2,3.... + +* @par Inputs: +* @li predict: An ND tensor of type float16, float32. +* @li label: An ND tensor of type float16, float32. \n + +* @par Attributes: +* @li p: A required int attribute that decides which loss to compute, now the p only can be 1 to compute l1_loss. +* @li reduction: An optional string.Defaults to "mean". \n + +* @par Outputs: +* @li y: An ND tensor tensor with the same shape and type as "predict". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator LpLoss. +*/ +REG_OP(LpLoss) + .INPUT(predict, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(label, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .REQUIRED_ATTR(p, Int) + .ATTR(reduction, String, "mean") + .OP_END_FACTORY_REG(LpLoss) + +/** +* @brief Computes gradients of mse loss. + +* @par Inputs: +* @li predict: An ND tensor of type float16, float32. +* @li label: An ND tensor of type float16, float32. +* @li dout: An ND tensor of type float16, float32. \n + +* @par Attributes: +* @li reduction: An optional string.Defaults to "mean". \n + +* @par Outputs: +* @li y: An ND tensor tensor with the same shape and type as "predict". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator MseLossGrad. +*/ +REG_OP(MseLossGrad) + .INPUT(predict, TensorType({DT_FLOAT32, DT_FLOAT16})) + .INPUT(label, TensorType({DT_FLOAT32, DT_FLOAT16})) + .INPUT(dout, TensorType({DT_FLOAT32, DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_FLOAT32, DT_FLOAT16})) + .ATTR(reduction, String, "mean") + .OP_END_FACTORY_REG(MseLossGrad) + +/** +* @brief Computes mse loss. +* @par Inputs: +* two inputs, including: +* @li predict: An ND Tensor of dtype float16 or float32. +* @li label: An ND Tensor of dtype float16 or float32.\n +* +* @par Attributes: +* @li reduction:An optional str from sum, none, mean, Defaults to "mean".\n +* +* @par Outputs: +* @li y: when reduction=sum/mean, y is scale. when reduction=none, y has +* same type and shape as "predict".\n +*/ +REG_OP(MseLoss) + .INPUT(predict, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(label, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(reduction, String, "mean") + .OP_END_FACTORY_REG(MseLoss) + +/** +* @brief Calculates the reversed outputs of the function "smooth_l1_loss_v2". \n + +* @par Inputs: +* Three Inputs, including: +* @li predict: A Tensor. Must be one of the following types: +* float16, float32. +* @li label: A Tensor. Has the same type as "predict". +* @li dout: A Tensor. Has the same type as "predict". \n + +* @par Attributes: +* Two Attributes, including: +* @li sigma: An optional float. Defaults to 1.0. \n + +* @li reduction: An optional string. Defaults to "mean", +* Must be one of the following: "none", "mean", "sum". \n + +* @par Outputs: +* @li gradient: A Tensor. Has the same type as "predict". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator SmoothL1LossBackward. +*/ +REG_OP(SmoothL1LossGradV2) + .INPUT(predict, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(label, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(dout, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(gradient, TensorType({DT_FLOAT, DT_FLOAT16})) + .ATTR(sigma, Float, 1.0) + .ATTR(reduction, String, "mean") + .OP_END_FACTORY_REG(SmoothL1LossGradV2) + +/** +* @brief Creates a criterion that uses a squared term if the absolute +* element-wise error falls below beta and an L1 term otherwise. It is +* less sensitive to outliers than the MSELoss and in some cases prevents +* exploding gradients. + +* @par Inputs: +* @li predict: A multi-dimensional Tensor of type float16 or float32, +* specifying the predictive value. \n +* @li label: A multi-dimensional Tensor of type float16 or float32, +* specifying the target value. \n + +* @par Attributes: +* @li sigma: An optional int. Specifies the threshold of loss. Defaults +* to "1.0". \n +* @li reduction: An optional str. Specifies the reduction to apply to +* the output: 'none' | 'mean' | 'sum'. 'none': no reduction will be applied, +* 'mean': the sum of the output will be divided by the number of elements in +* the output,'sum': the output will be summed. Default: 'mean'. \n + +* @par Outputs: +* @li loss: Indicates the loss between the predictive value and target value. +* Has the same dimensions as "predict". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator smooth_l1_loss. \n +*/ +REG_OP(SmoothL1LossV2) + .INPUT(predict, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .INPUT(label, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .OUTPUT(loss, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .ATTR(sigma, Float, 1.0) + .ATTR(reduction, String, "mean") + .OP_END_FACTORY_REG(SmoothL1LossV2) + +/** +* @brief Computes Centralization. result = x - mean(x, axes) + +* @par Inputs: +* @li x: An ND tensor of type float16, float32. +* @par Attributes: +* @li axes: The dimensions to reduce. Must be one of the following types: int, list, tuple, NoneType. +* Must be in the range [-rank(x), rank(x)). +* @par Outputs: +* @li y: A Tensor. Has the same type as "x". \n + +* @par Third-party framework compatibility +* custom operator \n +*/ +REG_OP(Centralization) + .INPUT(x, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .OUTPUT(y, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .ATTR(axes, ListInt, {-1}) + .OP_END_FACTORY_REG(Centralization) + +/** +*@brief Roll the tensor along the given dimension(s). +* Elements that are shifted beyond the last position are re-introduced at the first position. +* If a dimension is not specified, the tensor will be flattened before rolling and then restored to the original shape. \n + +*@par Inputs: +*One inputs, including: +* @li x: A tensor . Must be one of the following types: +* float16, float32, int32, uint32, int8, uint8. \n *@par Attributes: -* epsilon: An optional float32, specifying the small value added to -variance to avoid dividing by zero. Defaults to "0.00001" . \n +* @li shifts: The number of places by which the elements of the tensor are shifted. \n +* @li dims: Axis along which to roll. \n -*@par Outputs:\n -* variance_sqrt: A [N, C1, 1, 1, C0] Tensor of type float32, for the variance_sqrt. +*@par Outputs: +* y: A Tensor with the same type and shape of x's. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator Roll. \n */ -REG_OP(InHost) - .INPUT(variance, TensorType({DT_FLOAT})) - .OUTPUT(variance_sqrt, TensorType({DT_FLOAT})) - .ATTR(epsilon, Float, 0.00001) - .OP_END_FACTORY_REG(InHost) +REG_OP(Roll) + .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_UINT32,DT_INT8,DT_UINT8})) + .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_UINT32,DT_INT8,DT_UINT8})) + .REQUIRED_ATTR(shifts, ListInt) + .ATTR(dims, ListInt, {}) + .OP_END_FACTORY_REG(Roll) + +/** + *@brief Calculate the loss. Creates a criterion that optimizes a two-class classification + logistic loss between input_x and input_y (containing 1 or -1). \n + + *@par Inputs: + *One inputs, including: + * @li input_x: A tensor. Must be one of the following types: + * float16, float32. \n + * @li input_y: A tensor. Must be one of the following types: + * float16, float32. \n + + *@par Attributes: + *@li lambd: An optional string.Defaults to "mean". \n + + *@par Outputs: + *output_z: while reduction == "none", A Tensor with the same type and shape of input_x's. \n + * while reduction == "sum" or "mean", A Tensor with the same type of input_x , shape of which is (1,) + + *@par Third-party framework compatibility + *Compatible with the Pytorch operator SoftMarginLoss. \n + */ +REG_OP(SoftMarginLoss) + .INPUT(input_x, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(input_y, TensorType({DT_FLOAT, DT_FLOAT16})) + .ATTR(reduction, String, "mean") + .OUTPUT(output_z, TensorType({DT_FLOAT, DT_FLOAT16})) + .OP_END_FACTORY_REG(SoftMarginLoss) + +/** +* @brief Computes gradients of sigmoid_cross_entropy_with_logits_v2. + +* @par Inputs: +* @li predict: An ND tensor of type float16, float32. +* @li target: An ND tensor of type float16, float32. +* @li dout: An ND tensor of type float16, float32. +* @li weight: An optional ND tensor of type float16, float32. +* @li pos_weight: An optional ND tensor of type float16, float32. \n + +* @par Attributes: +* @li reduction: An optional string.Defaults to "mean". \n + +* @par Outputs: +* @li gradient: An ND tensor tensor with the same shape and type as "predict". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator SigmoidCrossEntropyWithLogitsGrad. +*/ +REG_OP(SigmoidCrossEntropyWithLogitsGradV2) + .INPUT(predict, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(target, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(dout, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(weight, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(pos_weight, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(gradient, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(reduction, String, "mean") + .OP_END_FACTORY_REG(SigmoidCrossEntropyWithLogitsGradV2) +/** + * @brief Calculate the PoissonNllLoss function. + * target∼Poisson(input)loss(input,target)=input−target∗log(input)+log(target!) \n + + * @par Inputs: + * Two inputs, including: + * @li input_x: A tensor. Must be one of the following types: + * float16, float32. \n + * + * @par Inputs: + * @li target: A tensor. Must be one of the following types: + * float16, float32. \n + + * @par Attributes: + * four Attributes, including: + * @li log_input: An optional bool. Defaults to "True" \n + * + * @par Attributes: + * @li full: An optional bool. Defaults to "False" \n + * + * @par Attributes: + * @li eps: An optional float. Defaults to "1e-8" \n + * + * @par Attributes: + * @li reduction: An optional string. Defaults to "mean" \n + + * @par Outputs: + * loss: A Tensor has same element type as two inputs. \n + + * @par Third-party framework compatibility + * Compatible with the Pytorch operator PoissonNllLoss. \n + */ +REG_OP(PoissonNllLoss) + .INPUT(input_x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(target, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(loss, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(log_input, Bool, true) + .ATTR(full, Bool, false) + .ATTR(eps, Float, 1e-8) + .ATTR(reduction, String, "mean") + .OP_END_FACTORY_REG(PoissonNllLoss) +/** + *@brief rnn_gen_mask + * @par Inputs: + * @li seq_length: A ND Tensor of type int32. Recoed the current length of each batch.\n + * + * @par Attributes: + * @li num_step: A required int.\n + * @li hidden_size: A required int. \n + * + * + * @par Output: + * y: A mutable Tensor of type float16, with the shape of [num_step, batch_size, hidden_size]. \n + * + */ +REG_OP(RnnGenMask) + .INPUT(seq_length, TensorType({DT_INT32})) + .OUTPUT(seq_mask, TensorType({DT_FLOAT16})) + .REQUIRED_ATTR(num_step, Int) + .REQUIRED_ATTR(hidden_size, Int) + .OP_END_FACTORY_REG(RnnGenMask) + +/** +* @brief Creates a criterion that optimizes a multi-class multi-classification hinge loss (margin-based loss) +* between input x (a 2D mini-batch Tensor) and output y (which is a 2D Tensor of target class indices) \n + +* @par Inputs: +* Two inputs, including: +* @li x: A tensor. Must be one of the following types: +* float16, float32. \n +* +* @par Inputs: +* @li target: A tensor. Must be the following types: +* int32. \n + +* @par Attributes: +* @li reduction: An optional string. Defaults to "mean" \n + +* @par Outputs: +* y: A Tensor has same element type as input x. \n +* is_target: A Tensor has same element type as input target. \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator MultiLabelMarginLoss. \n +*/ +REG_OP(MultilabelMarginLoss) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(target, TensorType({DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(is_target, TensorType({DT_INT32})) + .ATTR(reduction, String, "mean") + .OP_END_FACTORY_REG(MultilabelMarginLoss) + +/** +*@brief Performs batch normalization . \n +*@par Inputs: +* Two inputs +*@li input_x: A Tensor. Support float32. shape (n, c, d). +*@li seq_len: A Tensor. Each batch normalize data num. Support Int32. Shape (n, ). \n +*@par Attributes: +*@li normalize_type: Str. Support "per_feature" or "all_features". +*@li epsilon: An optional float32, specifying the small value added to +variance to avoid dividing by zero. Defaults to "0.00001" . \n +*@par Outputs: +* One outputs +*@li output_y: A Tensor for the normalized "x".Support float32. shape (n, c, d).\n +*/ +REG_OP(NormalizeBatch) + .INPUT(input_x, TensorType({ DT_FLOAT })) + .INPUT(seq_len, TensorType({ DT_INT32 })) + .OUTPUT(output_y, TensorType({ DT_FLOAT })) + .REQUIRED_ATTR(normalize_type, String) + .ATTR(epsilon, Float, 0.00001) + .OP_END_FACTORY_REG(NormalizeBatch) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_NN_NORM_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/nn_ops.h b/third_party/fwkacllib/inc/ops/nn_ops.h index 9edc469a..49fd02fa 100644 --- a/third_party/fwkacllib/inc/ops/nn_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,7 +20,144 @@ */ #ifndef OPS_BUILT_IN_OP_PROTO_INC_NN_OPS_H_ #define OPS_BUILT_IN_OP_PROTO_INC_NN_OPS_H_ - +#include "graph/operator_reg.h" #include "nn_pooling_ops.h" +namespace ge { +/** +* @brief Says whether the targets are in the top "k" predictions . \n + +* @par Inputs: +* Three inputs, including: +* @li predictions: A 2D Tensor of type float32. A "batch_size * classes" tensor. +* @li targets: A 1D Tensor of type IndexNumberType. A batch_size tensor of class ids. +* @li k: A 1D Tensor of the same type as "targets". +* Specifies the number of top elements to look at for computing precision . \n + +* @par Outputs: +* precision: A Tensor of type bool . \n + +* @attention Constraints: +* @li targets must be non-negative tensor. + +* @par Third-party framework compatibility +* @li Compatible with the TensorFlow operator InTopKV2. +*/ +REG_OP(InTopKV2) + .INPUT(predictions, TensorType({DT_FLOAT})) + .INPUT(targets, TensorType(IndexNumberType)) + .INPUT(k, TensorType({IndexNumberType})) + .OUTPUT(precision, TensorType({DT_BOOL})) + .OP_END_FACTORY_REG(InTopKV2) + +/** +*@brief Performs batch normalization . \n + +*@par Inputs: +* Five inputs, including: (NHWC, NCHW, or NC1HWC0 supported) +*@li x: A 4D or 5D Tensor of type float16 or float32, with format NHWC or NCHW for 4D or NC1HWC0 for 5D. +*@li scale: A Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. Must be 5D +if input "x" is with format NC1HWC0. Specifies the scaling factor. +*@li offset: A Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. Must be 5D +if input "x" is with format NC1HWC0. Specifies the offset. +*@li mean: A Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. Must be 5D +if input "x" is with format NC1HWC0. Specifies the mean used for inference. Must be "None" if the +operation is used for training. +*@li variance: A Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. Must be +5D if input "x" is with format NC1HWC0. Specifies the variance used for inference. Must be "None" +if the operation is used for training . \n + +*@par Attributes: +*@li epsilon: An optional float32, specifying the small value added to variance to avoid dividing by zero. Defaults to "0.0001". +*@li data_format: An optional string, specifying the format of "x". Defaults to "NHWC". +*@li is_training: An optional bool, specifying if the operation is used for training or inference. Defaults to "True" . \n + +*@par Outputs: +* Five outputs, including: (NHWC, NCHW, or NC1HWC0 supported) +*@li y: A 4D or 5D Tensor of type float16 or float32 for the normalized "x", with format NHWC or NCHW for 4D or NC1HWC0 for 5D. +*@li batch_mean: A Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. Must be 5D +if input "x" is with format NC1HWC0. Specifies the mean of "x". +*@li batch_variance: A Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. +Must be 5D if input "x" is with format NC1HWC0. Specifies the variance of "x". +*@li reserve_space_1: An optional Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. +Must be 5D if input "x" is with format NC1HWC0. Specifies the mean of "x" for gradient computation. Pass "None" to skip this output. +*@li reserve_space_2: An optional Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. +Must be 5D if input "x" is with format NC1HWC0. Specifies the variance of "x" for gradient computation. Pass "None" to skip this output . \n + +*@attention Constraints: +*@li If the operation is used for inference and outputs "reserve_space_1" and "reserve_space_2" are available, +then "reserve_space_1" has the same value as "mean" and "reserve_space_2" has the same value as "variance". +*@li For Ascend 310, the result accuracy fails to reach 1‰ due to the square root instruction . \n +*/ +REG_OP(FusedBatchNormV2) + .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(scale, TensorType({DT_FLOAT})) + .INPUT(offset, TensorType({DT_FLOAT})) + .OPTIONAL_INPUT(mean, TensorType({DT_FLOAT})) + .OPTIONAL_INPUT(variance, TensorType({DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(batch_mean, TensorType({DT_FLOAT})) + .OUTPUT(batch_variance, TensorType({DT_FLOAT})) + .OUTPUT(reserve_space_1, TensorType({DT_FLOAT})) + .OUTPUT(reserve_space_2, TensorType({DT_FLOAT})) + .ATTR(epsilon, Float, 0.0001) + .ATTR(data_format, String, "NHWC") + .ATTR(is_training, Bool, true) + .OP_END_FACTORY_REG(FusedBatchNormV2) + +/** + * @brief: Large amount of data sort.First operator of TopK. + * @par Inputs: + * two input, including: + * @li input_data: A Tensor. Data to be sorted. Support float16 + * @li input_index: A Tensor. Range(0, 2048). Datatype and format is same as input_data. + * @par Attributes: + * @li k_num: Int.Number to be sorted. + * @par Outputs: + * 1 output, including: + * @li output_proposal: A Tensor. Datatype and format is same as input_data. Proposal sorted for each channel. + */ +REG_OP(SegmentSort) + .INPUT(input_data, TensorType({DT_FLOAT16})) + .INPUT(input_index, TensorType({DT_FLOAT16})) + .OUTPUT(output_proposal, TensorType({DT_FLOAT16})) + .REQUIRED_ATTR(k_num, Int) + .OP_END_FACTORY_REG(SegmentSort) + +/** + * @brief: Large amount of data sort.Second operator of TopK. + * @par Inputs: + * two input, including: + * @li input_proposal: A Tensor. Proposal sorted for each channel. Support float16 + * @par Attributes: + * @li k_num: Int.Number to be sorted. + * @par Outputs: + * 1 output, including: + * @li output_proposal: A Tensor. Datatype and format is same as input_data. Proposal sorted for each channel. + */ +REG_OP(MultiMerge) + .INPUT(input_proposal, TensorType({DT_FLOAT16})) + .OUTPUT(output_proposal, TensorType({DT_FLOAT16})) + .REQUIRED_ATTR(k_num, Int) + .OP_END_FACTORY_REG(MultiMerge) + +/** + * @brief: Large amount of data sort.Third operator of TopK. + * @par Inputs: + * two input, including: + * @li input_proposal: A Tensor. Proposal sorted for each channel. Support float16 + * @par Attributes: + * @li k_num: Int.Number to be sorted. + * @par Outputs: + * 2 output, including: + * @li output_data: A Tensor. Datatype and format is same as input_data. Data sorted. + * @li output_index: A Tensor. int32. Data index. + */ +REG_OP(SingleMerge) + .INPUT(input_proposal, TensorType({DT_FLOAT16})) + .OUTPUT(output_data, TensorType({DT_FLOAT16})) + .OUTPUT(output_index, TensorType({DT_INT32})) + .REQUIRED_ATTR(k_num, Int) + .OP_END_FACTORY_REG(SingleMerge) +}// namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_NN_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/nn_pooling_ops.h b/third_party/fwkacllib/inc/ops/nn_pooling_ops.h index ab35ba47..80a21333 100644 --- a/third_party/fwkacllib/inc/ops/nn_pooling_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_pooling_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -182,6 +182,128 @@ REG_OP(AvgPool3D) .ATTR(data_format, String, "NDHWC") .OP_END_FACTORY_REG(AvgPool3D) + +/** +*@brief Performs average pooling on the input. + +*@par Inputs: +*@li x: A 5-D Tensor of shape [batch, depth, height, width, channels] and type float16, float32, double. +*@li filter: An optional tensor of type float16, float32, double, fractal_z_3d layout. +*@li multiplier: An optional tensor of float16, float32, double. + +*@par Attributes: +*@li ksize: List of ints that has length 1, 3 or 5. The size of the window for each dimension of the input tensor. +*@li strides:List of ints that has length 1, 3 or 5. The stride of the sliding window for each dimension of the input tensor. +*@li pads: List of ints, implicit zero paddings on both sides of the input. +*@li ceil_mode: When true, will use ceil instead of floor in the formula to compute the output shape. +*@li count_include_pad: When true, will include the zero-padding in the averaging calculation. +*@li divisor_override: if specified, it will be used as divisor, otherwise size of the pooling region will be used. +*@li data_format: A string, format of input data . \n + +*@par Outputs: +*y: The average pooled output tensor . \n + +*@attention Constraints: +*@li "ksize" is in the range [1, 255]. "strides" is in the range [1, 63] + +*@par Third-party framework compatibility +* Compatible with the TensorFlow operator AvgPool3D. +*/ +REG_OP(AvgPool3DD) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE})) + .OPTIONAL_INPUT(filter, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE})) + .OPTIONAL_INPUT(multiplier, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE})) + .REQUIRED_ATTR(ksize, ListInt) + .REQUIRED_ATTR(strides, ListInt) + .REQUIRED_ATTR(pads, ListInt) + .ATTR(ceil_mode, Bool, false) + .ATTR(count_include_pad, Bool, true) + .ATTR(divisor_override, Int, 0) + .ATTR(data_format, String, "NDHWC") + .OP_END_FACTORY_REG(AvgPool3DD) + +/** +* @brief Computes AvgPool3DGrad function. + +* @par Inputs: +* @li orig_input_shape: An NDHWC tensor of type int32. +* @li grads: An NDHWC tensor of type float16, float32, or double. + +* @par Attributes: +* @li ksize: List of ints that has length 5. The size of the window for each dimension of the input tensor. +* @li strides:List of ints that has length 5. The stride of the sliding window for each dimension of the input tensor. +* @li pads: List of ints, implicit zero paddings on both sides of the input. +* @li ceil_mode: When true, will use ceil instead of floor in the formula to compute the output shape. +* @li count_include_pad: When true, will include the zero-padding in the averaging calculation. +* @li divisor_override: if specified, it will be used as divisor, otherwise size of the pooling region will be used. +* @li data_format: A string, format of input data. + +* @par Outputs: +* @output: A mutable tensor with the same shape and type as "orig_input_shape". + +* @attention Constraints: +* @li "ksize" is in the range [1, 255]. "strides" is in the range [1, 63] + +* @par Third-party framework compatibility +* @li Compatible with the TensorFlow operator AvgPoolGrad. +*/ + +REG_OP(AvgPool3DGrad) + .INPUT(orig_input_shape, TensorType({DT_INT32})) + .INPUT(grads, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE})) + .OUTPUT(output, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE})) + .REQUIRED_ATTR(ksize, ListInt) + .REQUIRED_ATTR(strides, ListInt) + .REQUIRED_ATTR(pads, ListInt) + .ATTR(ceil_mode, Bool, false) + .ATTR(count_include_pad, Bool, true) + .ATTR(divisor_override, Int, 0) + .ATTR(data_format, String, "NDHWC") + .OP_END_FACTORY_REG(AvgPool3DGrad) + +/** +* @brief Performs average pooling on the input. + +* @par Inputs: +* @li grads: An NDHWC tensor of type float16. +* @li filter: An optional tensor of type float16, fractal_z_3d layout. +* @li multiplier: An optional tensor of float16. + +* @par Attributes: +* @li orig_input_shape: List of ints that has length 5. The size of the window for each dimension of the input tensor. +* @li ksize: List of ints that has length 5. The size of the window for each dimension of the input tensor. +* @li strides:List of ints that has length 5. The stride of the sliding window for each dimension of the input tensor. +* @li pads: List of ints, implicit zero paddings on both sides of the input. +* @li ceil_mode: When true, will use ceil instead of floor in the formula to compute the output shape. +* @li count_include_pad: When true, will include the zero-padding in the averaging calculation. +* @li divisor_override: if specified, it will be used as divisor, otherwise size of the pooling region will be used. +* @li data_format: A string, format of input data . \n + +* @par Outputs: +* @output: The average pooled output tensor . \n + +* @attention Constraints: +* @li "ksize" is in the range [1, 255]. "strides" is in the range [1, 63] + +* @par Third-party framework compatibility +* Compatible with the TensorFlow operator AvgPool3DGradD. +*/ +REG_OP(AvgPool3DGradD) + .INPUT(grads, TensorType({DT_FLOAT16})) + .OPTIONAL_INPUT(filter, TensorType({DT_FLOAT16})) + .OPTIONAL_INPUT(multiplier, TensorType({DT_FLOAT16})) + .OUTPUT(output, TensorType({DT_FLOAT16})) + .REQUIRED_ATTR(orig_input_shape, ListInt) + .REQUIRED_ATTR(ksize, ListInt) + .REQUIRED_ATTR(strides, ListInt) + .REQUIRED_ATTR(pads, ListInt) + .ATTR(ceil_mode, Bool, false) + .ATTR(count_include_pad, Bool, true) + .ATTR(divisor_override, Int, 0) + .ATTR(data_format, String, "NDHWC") + .OP_END_FACTORY_REG(AvgPool3DGradD) + /** *@brief Performs max_pool_ext2 on the input . \n @@ -278,8 +400,8 @@ No default value. specifying the stride of the sliding window for each dimension of the input tensor. No default value. *@li padding: A required string type of float16. -*@li pads: A list type of int32. Default value {0, 0, 0}. -*@li dilation: A list type of int32. Default value {1, 1, 1}. +*@li pads: A list type of int32. Default value {0,0,0,0,0,0}. +*@li dilation: A list type of int32. Default value {1,1,1,1,1,1}. *@li ceil_mode: A ceil mode number of int32 . Default value 0. *@li data_format: An optional string. Defaults to "NDHWC" . \n @@ -302,12 +424,37 @@ REG_OP(MaxPool3D) .REQUIRED_ATTR(ksize, ListInt) .REQUIRED_ATTR(strides, ListInt) .REQUIRED_ATTR(padding, String) - .ATTR(pads, ListInt, {0,0,0}) - .ATTR(dilation, ListInt, {1,1,1}) + .ATTR(pads, ListInt, {0,0,0,0,0,0}) + .ATTR(dilation, ListInt, {1,1,1,1,1,1}) .ATTR(ceil_mode, Int, 0) .ATTR(data_format, String, "NDHWC") .OP_END_FACTORY_REG(MaxPool3D) +/** +*@brief Applies a 2D adaptive max pooling over an input signal conposed of several input planes. \n +* The output is of size H x W, for any input size. + +* @par Inputs: +* One input, including: +* @li x: A Tensor. Must be one of the following data types: +* float16, float32, float64. \n + +* @par Attributes: +* @li output_size: A required list of 2 ints +* specifying the size (H,W) of the output tensor. \n + +* @par Outputs: +* @li y: A Tensor. Has the same data type as "x" \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator AdaptiveMaxPool2d. +*/ +REG_OP(AdaptiveMaxPool2d) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE})) + .OUTPUT(argmax, TensorType::IndexNumberType()) + .REQUIRED_ATTR(output_size, ListInt) + .OP_END_FACTORY_REG(AdaptiveMaxPool2d) /** * @brief Computes second-order gradients of the maxpooling3d function . \n @@ -477,8 +624,9 @@ REG_OP(MaxPoolV2) *@par Inputs: * One input: -*x: An NC1HWC0 Tensor. Supported type: float, double, int32, - * uint8, int16, int8, int64, uint16, half, uint32, uint64 . \n +* x: An 4D Tensor. Supported type: float, double, int32, + * uint8, int16, int8, int64, uint16, half, uint32, uint64. + * Must set the format, supported format list ["NCHW, NHWC"]. \n *@par Attributes: *@li ksize: A required list of int8, int16, int32, or int64 values, @@ -490,8 +638,8 @@ REG_OP(MaxPoolV2) *@li padding: A required string. No default value . \n *@par Outputs: -*y: A Tensor. Has the same type and format as input "x". -*argmax: A Tensor. Has the same type and format as input "x". +*@li y: A Tensor. Has the same type and format as input "x". +*@li argmax: A Tensor. Has the same type and format as input "x". *@attention Constraints: *@li "ksize" is a list that has length 4: ksize[0] = 1 or ksize[3] = 1, * ksize[1] * ksize[2] <= 255. @@ -517,10 +665,12 @@ REG_OP(MaxPoolWithArgmax) *@par Inputs: * Three inputs, including: -*@li x: An NC1HWC0 tensor. Supported type: float, double, int32, +*@li x: An 4d tensor. Supported type: float, double, int32, * uint8, int16, int8, int64, uint16, half, uint32, uint64. -*@li grad: An NC1HWC0 tensor. Supported type: float, double, int32, + * Must set the format, supported format list ["NCHW, NHWC"] +*@li grad: An 4d tensor. Supported type: float, double, int32, * uint8, int16, int8, int64, uint16, half, uint32, uint64. + * Must set the format, supported format list ["NCHW, NHWC"] *@li argmx: An NC1HWC0 tensor of type int32 or int64 . \n *@par Attributes: @@ -741,7 +891,7 @@ REG_OP(AvgPoolV2Grad) * @brief Computes gradients of averagev2 pooling function. * @par Inputs: -* @li input_grad: An NHWC tensor of type float16, float32, or double. +*input_grad: An NHWC tensor of type float16, float32, or double. * @par Attributes: * @li orig_input_shape: A required tuple or list of type int32. @@ -759,10 +909,10 @@ REG_OP(AvgPoolV2Grad) * @li data_format: An optional string. Defaults to "NHWC". * @par Outputs: -* @out_grad: A mutable tensor with the same shape and type as "orig_input". +*out_grad: A mutable tensor with the same shape and type as "orig_input". * @par Third-party framework compatibility -* @li Compatible with the TensorFlow operator AvgPoolGrad. +*Compatible with the TensorFlow operator AvgPoolGrad. */ REG_OP(AvgPoolV2GradD) .INPUT(input_grad, TensorType({DT_FLOAT16})) @@ -1037,6 +1187,7 @@ REG_OP(MaxPool3DGrad) .OUTPUT(y, TensorType::RealNumberType()) .REQUIRED_ATTR(ksize, ListInt) .REQUIRED_ATTR(strides, ListInt) + .ATTR(padding, String, "SAME") .REQUIRED_ATTR(pads, ListInt) .ATTR(data_format, String, "NDHWC") .OP_END_FACTORY_REG(MaxPool3DGrad) @@ -1107,7 +1258,7 @@ REG_OP(AvgPool1DD) *@par Inputs: * One input: -*x: An NC1HWC0 Tensor of type float16. +*x: An 4d Tensor of type float16. Must set the format, supported format list ["NCHW, NHWC"]. *@par Attributes: *@li ksize: A required list of int8, int16, int32, or int64 values, specifying the size of the window for * each dimension of the input tensor. No default value. @@ -1148,9 +1299,9 @@ REG_OP(MaxPoolWithArgmaxV2) *@par Inputs: * Three inputs, including: -*@li x: An NC1HWC0 tensor of type float16. -*@li grad: An NC1HWC0 tensor of type float16. -*@li argmx: An NC1HWC0 tensor of type uint16 or int64 . \n +*@li x: An 4d tensor of type float16. Must set the format, supported format list ["NCHW, NHWC"] +*@li grad: An 4d tensor of type float16. Must set the format, supported format list ["NCHW, NHWC"] +*@li argmx: An 4d tensor of type uint16 or int64. Must set the format, supported format list ["NCHW, NHWC"] \n *@par Attributes: *@li ksize: A required list of int8, int16, int32, or int64 values, specifying the size of the window for @@ -1291,5 +1442,306 @@ REG_OP(MaxPoolV3Grad) .ATTR(global_pooling, Bool, false) .ATTR(ceil_mode, Bool, false) .OP_END_FACTORY_REG(MaxPoolV3Grad) + +/** +*@brief Performs Dilation2D on the input . \n + +*@par Inputs: +*x: A tensor of shape is 4d, format is support NHWC. +*filter: A tensor of shape is 3d, the type is same with x, and the c dimension is same with x. \n + +*@par Attributes: +*@li strides: A required list of 4 ints, specifying the stride of the sliding window. The strides of the N and C dimensions are 1. +*@li rates: A required list of 4 ints. The rates of the N and C dimensions are 1. +*@li padding_mode: A optional string. Defaults to "SAME", it support SAME and VALID. +*@li pads: An optional list of 4 ints. +*@li ceil_mode: An optional bool. Defaults to "false". Use ceil or floor to calculate the output size when padding_mode is "CALCULATED". +*@li data_format: An optional string, specifying the data format of "rates" and "strides", either "NCHW" or "NHWC" (default). \n + +*@par Outputs: +*y: The output tensor. Has the same type and format as input "x" . \n + +*@par Third-party framework compatibility +* Compatible with the TensorFlow operator Dilation2D. +*/ +REG_OP(Dilation2D) + .INPUT(x,TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .INPUT(filter,TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .OUTPUT(y,TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .REQUIRED_ATTR(strides, ListInt) + .REQUIRED_ATTR(rates, ListInt) + .ATTR(padding_mode, String, "SAME") + .ATTR(pads, ListInt, {0,0,0,0}) + .ATTR(ceil_mode, Bool, false) + .ATTR(data_format, String, "NHWC") + .OP_END_FACTORY_REG(Dilation2D) + +/** +*@brief Performs Dilation2DBackpropFilter on the input. \n + +*@par Inputs: +*x: A tensor of shape is 4d, format is support NHWC. +*filter: A tensor of shape is 3d, the type is same with x, and the c dimension is same with x. +*out_backprop: Has the same type and format as input x and the c dimension is same with x. \n + +*@par Attributes +*@li strides: A required list of 4 ints, specifying the stride of the sliding window. The strides of the N and C dimension are 1. +*@li rates: A required list of 4 ints, the rates of the N and C dimensions are 1. +*@li padding_mode: A optional string. Defaults to "SAME", it support SAME and VALID. +*@li pads: A optional list of 4 ints. +*@li ceil_mode: An optional bool. Defaults to "false". Use ceil or floor to calculate the output size when padding_mode is "CALCULATED". +*@li data_format: An optional string, specifying the data format of "rates" and "strides", either "NCHW" or "NHWC" (default). \n + +*@par Outputs: +*y: The output tensor. Has the same type and format as input "filter" . \n + +*@par Third-party framework compatibility +* Compatible with the TensorFlow operator Dilation2DBackpropFilter. +*/ + +REG_OP(Dilation2DBackpropFilter) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .INPUT(filter, + TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .INPUT(out_backprop, + TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .OUTPUT(y, + TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .REQUIRED_ATTR(strides, ListInt) + .REQUIRED_ATTR(rates, ListInt) + .ATTR(padding_mode, String, "SAME") + .ATTR(pads, ListInt, {0, 0, 0, 0}) + .ATTR(ceil_mode, Bool, false) + .ATTR(data_format, String, "NHWC") + .OP_END_FACTORY_REG(Dilation2DBackpropFilter) + +/** +*@brief Performs Dilation2DBackpropInput on the input. \n + +*@par Inputs: +*x: A tensor of shape is 4d, format is support NHWC. +*filter: A tensor of shape is 3d, the type is same with x, and the c dimension is same with x. +*out_backprop: Has the same type and format as input x and the c dimension is same with x. \n + +*@par Attributes +*@li strides: A required list of 4 ints, specifying the stride of the sliding window. The strides of the N and C dimension are 1. +*@li rates: A required list of 4 ints, the rates of the N and C dimensions are 1. +*@li padding_mode: A optional string. Defaults to "SAME", it support SAME and VALID. +*@li pads: A optional list of 4 ints. +*@li ceil_mode: An optional bool. Defaults to "false". Use ceil or floor to calculate the output size when padding_mode is "CALCULATED". +*@li data_format: An optional string, specifying the data format of "rates" and "strides", either "NCHW" or "NHWC" (default). \n + +*@par Outputs: +*y: The output tensor. Has the same type and format as input "x" . \n + +*@par Third-party framework compatibility +* Compatible with the TensorFlow operator Dilation2DBackpropInput. +*/ + +REG_OP(Dilation2DBackpropInput) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .INPUT(filter, + TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .INPUT(out_backprop, + TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .OUTPUT(y, + TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .REQUIRED_ATTR(strides, ListInt) + .REQUIRED_ATTR(rates, ListInt) + .ATTR(padding_mode, String, "SAME") + .ATTR(pads, ListInt, {0, 0, 0, 0}) + .ATTR(ceil_mode, Bool, false) + .ATTR(data_format, String, "NHWC") + .OP_END_FACTORY_REG(Dilation2DBackpropInput) + +/** +* @brief Applies a 2D adaptive average pooling over +* an input signal composed of several input planes. \n + +* @par Inputs: +* One input, including: +* @li x: A Tensor. Must be one of the following data types: +* float16, float32. \n + +* @par Attributes: +* @li output_size: A required list of 2 ints +* specifying the size (H,W) of the output tensor. \n + +* @par Outputs: +* @li y: A Tensor. Has the same data type as "x" \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator AdaptiveAvgPool2d. +*/ +REG_OP(AdaptiveAvgPool2d) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) + .REQUIRED_ATTR(output_size, ListInt) + .OP_END_FACTORY_REG(AdaptiveAvgPool2d) + +/** +* @brief Compute gradients of adaptive averagev2 pooling function. + +* @par Inputs: +* @li input_grad: A Tensor. Must be one of the following data types: +* float16, float32. + +* @par Attributes: +* @li orig_input_shape: A required tuple or list of type int32. + +* @par Outputs: +* @li output_grad: A tensor with the same type as "input_grad". + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator AdaptiveAvgPool2dGrad. +*/ +REG_OP(AdaptiveAvgPool2dGrad) + .INPUT(input_grad, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(output_grad, TensorType({DT_FLOAT, DT_FLOAT16})) + .REQUIRED_ATTR(orig_input_shape, ListInt) + .OP_END_FACTORY_REG(AdaptiveAvgPool2dGrad) + +/** +* @brief Performs the backpropagation of MaxPoolWithGradArgmaxV1. + +* @par Inputs: +* Three inputs, including: +* @li x: An NC1HWC0 tensor of type float16. +* @li grad: An NC1HWC0 tensor of type float16. +* @li argmax: An NC1HWC0 tensor of type uint16 or int64. \n + +* @par Attributes: +* @li ksize: A required list of int8, int16, int32, or int64 values, specifying the size of the window for +* each dimension of the input tensor. No default value. +* @li strides: A required list of int8, int16, int32, or int64 values, specifying the stride of the sliding window for +* each dimension of the input tensor. No default value. +* @li pads: A required listint. \n + +* @par Outputs: +* y: A Tensor. Has the same type and format as input "x". \n + +* @attention Constraints: +* @li "ksize" is a list that has length 4: ksize[0] = 1 or ksize[3] = 1, ksize[1] * ksize[2] <= 255. +* @li "strides" is a list that has length 4: strides[0] = 1 or strides[3] = 1 +* @li "pads" is listint. +* @li "ceil_mode" defaults to False. +* @li "data_format" defaults to "NC1HWC0". \n + +* @par Third-party framework compatibility +* Compatible with the TensorFlow operator MaxPoolGradWithArgmaxV1. +*/ + +REG_OP(MaxPoolGradWithArgmaxV1) + .INPUT(x, TensorType({DT_FLOAT16})) + .INPUT(grad, TensorType({DT_FLOAT16})) + .INPUT(argmax, TensorType({DT_UINT16})) + .OUTPUT(y, TensorType({DT_FLOAT16})) + .REQUIRED_ATTR(ksize, ListInt) + .REQUIRED_ATTR(strides, ListInt) + .REQUIRED_ATTR(pads, ListInt) + .ATTR(dtype, Int, 3) + .ATTR(dilation, ListInt, {1, 1, 1, 1}) + .ATTR(ceil_mode, Bool, false) + .OP_END_FACTORY_REG(MaxPoolGradWithArgmaxV1) + +/** +* @brief Performs max pooling on the input and outputs both max values and indices. + +* @par Inputs: +* One input: +* x: An NC1HWC0 Tensor of type float16. \n + +* @par Attributes: +* @li ksize: A required list of int8, int16, int32, or int64 values, specifying the size of the window for +* each dimension of the input tensor. No default value. +* @li strides: A required list of int8, int16, int32, or int64 values, specifying the stride of the sliding window for +* each dimension of the input tensor. No default value. +* @li pads: A required string. No default value. \n + +* @par Outputs: +* y: A Tensor. Has the same type and format as input "x". +* argmax: A Tensor. type:uint16, format:NC1HWC0. \n + +* @attention Constraints: +* @li "ksize" is a list that has length 4: ksize[0] = 1 or ksize[3] = 1, ksize[1] * ksize[2] <= 255. +* @li "stride is a list that has length 4: strides[0] = 1 or strides[3] = 1, strides[1] <= 63, strides[0] >= 1, +* strides[2] <= 63, strides[2] >= 1. +* @li "pads" is listint. +* @li "ceil_mode" defaults to False. +* @li "data_format" defaults to "NC1HWC0". \n + +* @par Third-party framework compatibility +* Compatible with the TensorFlow operator MaxPoolWithArgmaxV1. +*/ +REG_OP(MaxPoolWithArgmaxV1) + .INPUT(x, TensorType({DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_FLOAT16})) + .OUTPUT(argmax, TensorType({DT_UINT16})) + .REQUIRED_ATTR(ksize, ListInt) + .REQUIRED_ATTR(strides, ListInt) + .REQUIRED_ATTR(pads, ListInt) + .ATTR(dtype, Int, 3) + .ATTR(dilation, ListInt, {1, 1, 1, 1}) + .ATTR(ceil_mode, Bool, false) + .OP_END_FACTORY_REG(MaxPoolWithArgmaxV1) + +/** +*@brief Randomly sample a subset of positive and negative examples,and overwrite +the label vector to the ignore value (-1) for all elements that are not +included in the sample.\n + +* @par Inputs: +* One input: +* labels: shape of labels,(N, ) label vector with values. \n + +* @par Attributes: +* @li batch_size_per_images: A require attribute of type int. +* @li positive_fraction: A require attribute of type float. + +*@par Outputs: +*y: The result of subSample. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator SubSample. +*@par Restrictions: +*Warning: This operator can be integrated only by MaskRcnn. Please do not use it directly. +*/ +REG_OP(SubSample) + .INPUT(labels, TensorType({DT_INT32})) + .OUTPUT(y, TensorType({DT_INT32})) + .REQUIRED_ATTR(batch_size_per_images, Int) + .REQUIRED_ATTR(positive_fraction, Float) + .OP_END_FACTORY_REG(SubSample) + +/** +*@brief Randomly sample a subset of positive and negative examples,and overwrite +the label vector to the ignore value (-1) for all elements that are not +included in the sample.\n + +* @par Inputs: +* two inputs, including: +* @li labels: shape of labels,(N, ) label vector with values:. +* @li shuffle_matrix: random matrix with shape (N, ). \n + +* @par Attributes: +* @li batch_size_per_images: A require attribute of type int. +* @li positive_fraction: A require attribute of type float. + +*@par Outputs: +*y: The result of subSample. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator SubSampleLabels. +*@par Restrictions: +*Warning: This operator can be integrated only by MaskRcnn. Please do not use it directly. +*/ +REG_OP(SubSampleLabels) + .INPUT(labels, TensorType({DT_INT32})) + .INPUT(shuffle_matrix, TensorType({DT_INT32})) + .OUTPUT(y, TensorType({DT_INT32})) + .REQUIRED_ATTR(batch_size_per_images, Int) + .REQUIRED_ATTR(positive_fraction, Float) + .OP_END_FACTORY_REG(SubSampleLabels) + } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_NN_POOLING_OPS_H diff --git a/third_party/fwkacllib/inc/ops/nn_training_ops.h b/third_party/fwkacllib/inc/ops/nn_training_ops.h index 047fd6da..75e91aee 100644 --- a/third_party/fwkacllib/inc/ops/nn_training_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_training_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -2102,6 +2102,55 @@ REG_OP(FusedMulApplyMomentumExtern) .OP_END_FACTORY_REG(FusedMulApplyMomentumExtern) /** +*@brief Updates '*var' according to the momentum scheme. +* accum = accum * momentum - x1 * x2 * lr +* if use_nesterov is True: +* var += accum * momentum - x1 * x2 * lr +* else: +* var += accum +* +*@par Inputs: +*@li var: A mutable tensor. Must be one of the data types defined in +* TensorType::NumberType(). Should be from a Variable(). +*@li accum: A mutable tensor. Has the same type as "var". Should be from a +* Variable(). +*@li lr: A tensor for the learning rate. Has the same type as "var". Should be +* from a Variable(). +*@li x1: A Tensor has type TensorType::NumberType(). +*@li momentum: A scalar. Has the same type as "var". +*@li x2: A scalar has the same type as "var". +* +*@par Attributes: +*@li use_nesterov: An optional bool. Defaults to "False". +* If "True", var will be updated by using Nesterov momentum. +*@li use_locking: An optional bool. Defaults to "False". +* If "True", updating of the "var" tensor is protected by a lock; +* otherwise the behavior is undefined, but may exhibit less contention. +* +*@par Outputs: +* var: A mutable tensor. Has the same type as input "var". +* +*@attention Constraints: +* The input tensors must have the same shape. +* +*@par Third-party framework compatibility +* Compatible with the TensorFlow operator ResourceApplyKerasMomentum. +* +*/ +REG_OP(FusedMulApplyKerasMomentum) + .INPUT(var, TensorType::NumberType()) + .INPUT(accum, TensorType::NumberType()) + .INPUT(lr, TensorType::NumberType()) + .INPUT(x1, TensorType::NumberType()) + .INPUT(momentum, TensorType::NumberType()) + .INPUT(x2, TensorType::NumberType()) + .OUTPUT(var, TensorType::NumberType()) + .OUTPUT(accum, TensorType::NumberType()) + .ATTR(use_locking, Bool, false) + .ATTR(use_nesterov, Bool, false) + .OP_END_FACTORY_REG(FusedMulApplyKerasMomentum) + +/** *@brief Update "g" according to the LARS algorithm . \n *@par Inputs: diff --git a/third_party/fwkacllib/inc/ops/no_op.h b/third_party/fwkacllib/inc/ops/no_op.h index 7834591c..b27b1fa0 100644 --- a/third_party/fwkacllib/inc/ops/no_op.h +++ b/third_party/fwkacllib/inc/ops/no_op.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/nonlinear_fuc_ops.h b/third_party/fwkacllib/inc/ops/nonlinear_fuc_ops.h index e0e5dfc6..ca1c24eb 100644 --- a/third_party/fwkacllib/inc/ops/nonlinear_fuc_ops.h +++ b/third_party/fwkacllib/inc/ops/nonlinear_fuc_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -223,7 +223,29 @@ REG_OP(Relu6Grad) .INPUT(features, TensorType::RealNumberType()) .OUTPUT(backprops, TensorType::RealNumberType()) .OP_END_FACTORY_REG(Relu6Grad) - +/** +*@brief Calculate the elu_grad_v2 function. +*Applies the element-wise function: +* Computes the backward for the elu: if x>0, 1; otherwise elu() + alpha . +*@par Inputs: +*One inputs, including: +* @li grads: A tensor. Must be one of the following types: +* float16, float32. +* @li activations: A tensor. Must be one of the following types: +* float16, float32. +* +*@par Outputs: +*y: A Tensor with the same type and shape of grads's. +* +*@par Attributes: +*@li alpha: scalar parameter, default value = 1.0 +*/ +REG_OP(EluGradV2) + .INPUT(grads, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(activations, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) + .ATTR(alpha, Float, 1.0) + .OP_END_FACTORY_REG(EluGradV2) /** * @brief Compute sigmoid of "x" element-wise . \n @@ -509,6 +531,42 @@ REG_OP(Elu) .OP_END_FACTORY_REG(Elu) /** +*@brief Continuously Differentiable Exponential Linear Uints: +* Perform the linear uint element-wise on the input tensor X using formula: +* max(0, x) + min(0, alpha * (exp(x/alpha) - 1)). \n + +*@par Inputs: +*x: A float16, float32, for the input data type . \n + +*@par Attributes: +*alpha1: A float32. Defines at which negative value the ELU saturates. Defaults to "1.0" . \n + +*@par Attributes: +*alpha2: A float32. Defines at which negative value the ELU saturates. Defaults to "1.0" . \n + +*@par Attributes: +*alpha3: A float32. Defines at which positive value the ELU saturates. Defaults to "1.0" . \n + +*@par Outputs: +*y: A float16, float32, for the normalized result . \n + +*@attention Constraints: +*@li The input is of type float16 or float32 . \n + +*@par Multiple batches supported or not +*Supported +*@par Third-party framework compatibility +*@li Compatible with ONNX's Celu operator +*/ +REG_OP(Celu) + .INPUT(x, TensorType({DT_FLOAT,DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_FLOAT,DT_FLOAT16})) + .ATTR(alpha1, Float, 1.0) + .ATTR(alpha2, Float, 1.0) + .ATTR(alpha3, Float, 1.0) + .OP_END_FACTORY_REG(Celu) + +/** *@brief Computes gradients for the exponential linear (Elu) operation. * *@par Inputs: @@ -640,6 +698,352 @@ REG_OP(Mish) .OUTPUT(y, TensorType({ DT_FLOAT,DT_FLOAT16 })) .OP_END_FACTORY_REG(Mish) +/** + * @brief: pytorch mish_grad operator. + * @par Inputs: + * three input, including: + * @li grad: A Tensor. shape, datatype and format is same as x + * @li x: A Tensor. Must be one of the following types: float16, float32 + * @li tanhx: A Tensor. shape, datatype and format is same as x + * @par Outputs: + * 1 output, including: + * @li x_grad: A Tensor. shape, datatype and format is same as x + */ + +REG_OP(MishGrad) + .INPUT(grad, TensorType({ DT_FLOAT,DT_FLOAT16 })) + .INPUT(x, TensorType({ DT_FLOAT,DT_FLOAT16 })) + .OPTIONAL_INPUT(tanhx, TensorType({ DT_FLOAT,DT_FLOAT16 })) + .OUTPUT(x_grad, TensorType({ DT_FLOAT,DT_FLOAT16 })) + .OP_END_FACTORY_REG(MishGrad) + +/** + * @brief pytorch hardtanh_backward operator. + * + * @par Inputs: + * 2 inputs, including: + * @li result, minimum tensor of the linear region range, + * datatype: float16/float32, format:ND/5HD. + * @li grad, maximum tensor of the linear region range, + * datatype:float16/float32, format:ND/5HD. \n + + * @par Attributes: + * 2 attributes, including: + * @li min_val, minimum value of the linear region range, datatype:float. + * @li max_val, maximum value of the linear region range, datatype:float. \n + + * @par Outputs: + * 1 output, including: + * @li y, hardtanh_backward output tensor, datatype and format is same as + * input result. \n + + * @attention Constraints: + * This operator only supports dataType: float16/float32, format: ND/5HD. \n + + * @par Third-party framework compatibility + * Compatible with the Pytorch operator HardtanhGrad. + */ +REG_OP(HardtanhGrad) + .INPUT(result, TensorType({ DT_FLOAT16, DT_FLOAT })) /* "First operand." */ + .INPUT(grad, TensorType({ DT_FLOAT16, DT_FLOAT })) /* "Second operand." */ + .OUTPUT(y, TensorType({ DT_FLOAT16, DT_FLOAT })) /* "Result, has same element type as two inputs" */ + .ATTR(min_val, Float, -1.0) + .ATTR(max_val, Float, 1.0) + .OP_END_FACTORY_REG(HardtanhGrad) + +/** +* @brief Calculates the softplus loss function with attributes of beta and threshold. \n + +* @par Inputs: +* One inputs, including: +* @li x: A mutable Tensor. Must be one of the following types: +* float16, float32. \n + +* @par Attributes: +* @li beta: An optional float. Defaults to "1.0" \n + +* @li threshold: An optional float. Defaults to "20.0" \n + +* @par Outputs: +* @li y: A mutable Tensor. Has the same type as "x" \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator Softplus. +*/ +REG_OP(SoftplusV2) + .INPUT(x, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .OUTPUT(y, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .ATTR(beta, Float, 1.0) + .ATTR(threshold, Float, 20.0) + .OP_END_FACTORY_REG(SoftplusV2) + +/** +* @brief Calculates the reversed outputs of the function "softplus_v2". \n + +* @par Inputs: +* Two inputs, including: +* @li input_gradients: A mutable Tensor. Must be one of the following types: +* float16, float32. +* @li input_features: A mutable Tensor of the same type as "input_gradients" \n + +* @par Attributes: +* @li beta: An optional float. Defaults to "1.0" \n + +* @li threshold: An optional float. Defaults to "20.0" \n + +* @par Outputs: +* @li output_backprops: A mutable Tensor. Has the same type as "input_gradients" \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator SoftplusGrad. +*/ +REG_OP(SoftplusV2Grad) + .INPUT(input_gradients, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .INPUT(input_features, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .OUTPUT(output_backprops, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .ATTR(beta, Float, 1.0) + .ATTR(threshold, Float, 20.0) + .OP_END_FACTORY_REG(SoftplusV2Grad) + +/** + * @brief ThresholdedRelu takes one input data (Tensor) and produces one output data (Tensor) + * where the rectified linear function, y = x for x > alpha, y = 0 otherwise, is applied to the tensor elementwise. + * + * @par inputs + * one input including: + * @li x: input A Tensor. Must be one of the following types: float32, float16 + * + * @par output + * one output including: + * @li y:A Tensor of the same type as x + * + */ +REG_OP(ThresholdedRelu) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(alpha, Float, 1.0) + .OP_END_FACTORY_REG(ThresholdedRelu) + +/** +* @brief Calculate the hard shrinkage function. \n + +* @par Inputs: +* One inputs, including: +* @li input_x: A tensor. Must be one of the following types: +* float16, float32. \n + +* @par Attributes: +* @li lambd: An optional float. Defaults to 0.5. \n + +* @par Outputs: +* y: A Tensor with the same dtype and shape of input_x's. \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator Hardshrink. \n +*/ +REG_OP(HardShrink) + .INPUT(input_x, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(output_y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(lambd, Float, 0.5) + .OP_END_FACTORY_REG(HardShrink) + +/** +*@brief Calculate the hard shrink grad function. \n +* +* Computes the gradient for the HardShrink: if x > lambda or x < -lambda, x,otherwise 0 +* +*@par Inputs: +*Two inputs, including: +* @li gradients: A tensor. Must be one of the following types: +* float16, float32. \n +* @li features: A tensor. Must be one of the following types: +* float16, float32. \n +* +*@par Outputs: +*backprops: A Tensor with the same type and shape of features's. \n +* +*@par Attributes: +*@li lambd: An optional float.Defaults to 0.5. \n +* +*@par Third-party framework compatibility +*Compatible with the Pytorch operator Hardshrink_backward. \n +*/ + REG_OP(HardShrinkGrad) + .INPUT(gradients, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(features, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(backprops, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(lambd, Float, 0.5) + .OP_END_FACTORY_REG(HardShrinkGrad) + +/** +* @brief Calculate the hard sigmoid function. \n + +* @par Inputs: +* One inputs, including: +* @li input_x: A tensor. Must be one of the following types: +* float16, float32, int32. \n + +* @par Attributes: +* @li alpha: An optional float. Defaults to 0.16666666. \n +* @li beta: An optional float. Defaults to 0.5. \n + +* @par Outputs: +* y: A Tensor with the same dtype and shape of input_x's. \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator Hardsigmoid. \n +*/ +REG_OP(HardSigmoid) + .INPUT(input_x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32})) + .OUTPUT(output_y, TensorType({DT_FLOAT, DT_FLOAT16})) + .ATTR(alpha, Float, 0.16666666) + .ATTR(beta, Float, 0.5) + .OP_END_FACTORY_REG(HardSigmoid) + +/** +* @brief Calculate the soft shrinkage function. \n + +* @par Inputs: +* One inputs, including: +* @li input_x: A tensor. Must be one of the following types: +* float16, float32. \n + +* @par Attributes: +* @li lambd: An optional float. Defaults to 0.5. \n + +* @par Outputs: +* y: A Tensor with the same dtype and shape of input_x's. \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator Softshrink. \n +*/ +REG_OP(SoftShrink) + .INPUT(input_x, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(output_y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(lambd, Float, 0.5) + .OP_END_FACTORY_REG(SoftShrink) + +/** +* @brief Calculate the reversed outputs of the function "soft_shrink". \n + +* @par Inputs: +* Two inputs, including: +* @li input_grad: A tensor. Must be one of the following types: +* float16, float32. \n +* @li input_x: A tensor of the same dtype as "input_grad". \n + +* @par Attributes: +* @li lambd: An optional float. Defaults to 0.5. \n + +* @par Outputs: +* y: A Tensor of the same dtype and shape as "input_graxd". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator SoftShrinkGrad. \n +*/ +REG_OP(SoftShrinkGrad) + .INPUT(input_grad, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(input_x, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(output_y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(lambd, Float, 0.5) + .OP_END_FACTORY_REG(SoftShrinkGrad) + +/** +*@brief Calculate the gradient of log simoid. \n + +*@par Inputs: +*Two inputs, including: +* @li grads: A tensor, gradient of previous layer. Must be one of the following types: +* float16, float32. \n +* @li features: A tensor, input of log sigmoid. Must be one of the following types: +* float16, float32. \n + +*@par Outputs: +*One outputs, including: +* @li backprops: A tensor with the same type of and shape of grads. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator LogSigmoidBackward. \n +*/ +REG_OP(LogSigmoidGrad) + .INPUT(grads, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(features, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(backprops, TensorType({DT_FLOAT16, DT_FLOAT})) + .OP_END_FACTORY_REG(LogSigmoidGrad) + +/** +*@brief Calculate -ln(1+e^(-x)). \n + +*@par Inputs: +*One inputs, including: +* @li x: A tensor. Must be one of the following types: +* float16, float32. \n + +*@par Outputs: +*One outputs, including: +* @li y: A tensor with the same type and shape of x's. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator LogSigmoid. \n +*/ +REG_OP(LogSigmoid) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) /* "input:x" */ + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) /* "output:y" */ + .OP_END_FACTORY_REG(LogSigmoid) + +/** +*@brief Calculate the backward outputs of the function "hard_sigmoid" \n + +*@par Inputs: +*One inputs, including: +* @li grads: A tensor. Must be one of the following types: +* float16, float32. \n +* @li input_x: A tensor. Must be one of the following types: +* float16, float32. \n + +*@par Outputs: +*One outputs, including: +* @li y: A tensor with the same type and shape of x's. \n + +* @par Attributes: +* @li alpha: An optional float. Defaults to 0.16666666. \n +* @li beta: An optional float. Defaults to 0.5. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator LogSigmoidGrad. \n +*/ +REG_OP(HardSigmoidGrad) + .INPUT(grads, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(input_x, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) + .ATTR(alpha, Float, 0.16666666) + .ATTR(beta, Float, 0.5) + .OP_END_FACTORY_REG(HardSigmoidGrad) + +/** +* @brief Calculate the shrink function. \n + +* @par Inputs: +* One inputs, including: +* @li input_x: A tensor. Must be one of the following types: +* float16, float32. \n + +* @par Attributes: +* @li lambd: An optional float. Defaults to 0.5. \n +* @li bias: An optional float. Defaults to 0.0. \n + +* @par Outputs: +* y: A Tensor with the same dtype and shape of input_x's. \n + +* @par Third-party framework compatibility +* Compatible with the ONNX operator Shrink. \n +*/ +REG_OP(Shrink) + .INPUT(input_x, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(output_y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(lambd, Float, 0.5) + .ATTR(bias, Float, 0.0) + .OP_END_FACTORY_REG(Shrink) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_NONLINEAR_FUC_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/npu_loss_scale_ops.h b/third_party/fwkacllib/inc/ops/npu_loss_scale_ops.h index 8d7ef9f9..f36d2935 100644 --- a/third_party/fwkacllib/inc/ops/npu_loss_scale_ops.h +++ b/third_party/fwkacllib/inc/ops/npu_loss_scale_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/outfeed_ops.h b/third_party/fwkacllib/inc/ops/outfeed_ops.h index e0b783bc..53b9d701 100644 --- a/third_party/fwkacllib/inc/ops/outfeed_ops.h +++ b/third_party/fwkacllib/inc/ops/outfeed_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/pad_ops.h b/third_party/fwkacllib/inc/ops/pad_ops.h index f746b3b3..6854c866 100644 --- a/third_party/fwkacllib/inc/ops/pad_ops.h +++ b/third_party/fwkacllib/inc/ops/pad_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -101,7 +101,7 @@ REG_OP(FillD) */ REG_OP(BroadcastTo) .INPUT(x, TensorType::BasicType()) - .INPUT(shape, TensorType({DT_INT32})) + .INPUT(shape, TensorType({DT_INT32,DT_INT64})) .OUTPUT(y, TensorType::BasicType()) .OP_END_FACTORY_REG(BroadcastTo) @@ -161,7 +161,7 @@ REG_OP(Pad) *@brief Pads a tensor . \n *@par Inputs: -*x: A Tensor. Must be one of the following types: float16, float32, int8, uint8, int32 . \n +*x: A Tensor. Must be one of the following types: float16, float32, int32 . \n *@par Attributes: *paddings: An optional "vector>". Defaults to "{}". @@ -180,8 +180,8 @@ REG_OP(Pad) * Warning: THIS FUNCTION IS DEPRECATED. Please use Pad instead. */ REG_OP(PadD) - .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8, DT_FLOAT})) - .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8, DT_FLOAT})) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) .REQUIRED_ATTR(paddings, ListListInt) .OP_END_FACTORY_REG(PadD) @@ -213,7 +213,7 @@ REG_OP(PadV2) *@brief Pads a tensor . \n *@par Inputs: -*x: A Tensor. Must be one of the following types: float16, float32, int8, uint8, int32 . \n +*x: A Tensor. Must be one of the following types: float16, float32, int32 . \n *constant_values: A Tensor. Must have the same type as input. *@par Attributes: @@ -227,10 +227,7 @@ REG_OP(PadV2) *y: A Tensor of the same type as "x" . \n *@par Third-party framework compatibility: -* Compatible with TensorFlow operator Pad. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use Pad instead. +* Compatible with TensorFlow operator PadV2. */ REG_OP(PadV2D) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) @@ -272,42 +269,42 @@ REG_OP(PadV3) .ATTR(paddings_contiguous, Bool, true) .OP_END_FACTORY_REG(PadV3) -/** -*@brief Pads a tensor. - -*@par Inputs: -*x: A Tensor. Must be one of the following types: float16, float32, int8, uint8, int32. - -*@par Attributes: -* @li paddings: An required "vector>". -* For each dimension D of input, paddings[D, 0] indicates how many -* values to add before the contents of tensor in that dimension, -* and paddings[D, 1] indicates how many values to add after the -* contents of tensor in that dimension. -* @li constant_values: An optional int value for pad. -* @li mode: An optional string, Defaults to "constant", indicates paddings mode, -* support "constant", "reflect", "edge" -* @li paddings_contiguous: An optional bool value, Defaults to true. -* If true, paddings is arranged as [[begin0, end0], [begin1, end1], ...] -* If false, paddings is arranged as [[begin0, begin1], ..., [end0, end1], ...] - -*@par Outputs: -*y: A Tensor of the same type as "x". - -*@par Third-party framework compatibility: -* Compatible with ONNX operator Pad. - -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use PadV3 instead. -*/ -REG_OP(PadV3D) - .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8})) - .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8})) - .REQUIRED_ATTR(paddings, ListListInt) - .ATTR(constant_values, Int, 0) - .ATTR(mode, String, "constant") - .ATTR(paddings_contiguous, Bool, true) - .OP_END_FACTORY_REG(PadV3D) + /** + *@brief Pads a tensor. + + *@par Inputs: + *x: A Tensor. Must be one of the following types: float16, float32, int8, uint8, int32. + + *@par Attributes: + * @li paddings: An required "vector>". + * For each dimension D of input, paddings[D, 0] indicates how many + * values to add before the contents of tensor in that dimension, + * and paddings[D, 1] indicates how many values to add after the + * contents of tensor in that dimension. + * @li constant_values: An optional int value for pad. + * @li mode: An optional string, Defaults to "constant", indicates paddings mode, + * support "constant", "reflect", "edge" + * @li paddings_contiguous: An optional bool value, Defaults to true. + * If true, paddings is arranged as [[begin0, end0], [begin1, end1], ...] + * If false, paddings is arranged as [[begin0, begin1], ..., [end0, end1], ...] + + *@par Outputs: + *y: A Tensor of the same type as "x". + + *@par Third-party framework compatibility: + * Compatible with ONNX operator Pad. + + * @par Restrictions: + * Warning: THIS FUNCTION IS DEPRECATED. Please use PadV3 instead. + */ + REG_OP(PadV3D) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8})) + .REQUIRED_ATTR(paddings, ListListInt) + .ATTR(constant_values, Int, 0) + .ATTR(mode, String, "constant") + .ATTR(paddings_contiguous, Bool, true) + .OP_END_FACTORY_REG(PadV3D) /** *@brief Create a diagonal tensor @@ -403,5 +400,76 @@ REG_OP(EmbeddingRankId) .ATTR(mode, String, "mod") .OP_END_FACTORY_REG(EmbeddingRankId) +/** +*@brief EmbeddingLocalIndex, Sort statistics index according to rank_id \n + +*@par Inputs: +* @li addr_table: A 2D tensor which last dimension must be 3. +* @li index: A tensor with data type int32, int64, uint32, uint64. + +*@par Attributes: +* @li row_memory: The size of Embedding vector in a row, the default is 320. +* @li mode: String type, currently there are two options: 'mod' and 'order' + +*@par Outputs: +* @li local_idx:Index on each server. +* @li nums:The number of local_idx found on each server. +* @li recover_idx:The sorted local_idx element is at the position corresponding +* to the original input index. + +*@par Third-party framework compatibility +* Compatible with the TensorFlow operator Diag. +*/ +REG_OP(EmbeddingLocalIndex) + .INPUT(addr_table, TensorType({DT_UINT64})) + .INPUT(index, TensorType({DT_INT64,DT_INT32,DT_UINT32,DT_UINT64})) + .OUTPUT(local_idx, TensorType({DT_INT64,DT_INT32,DT_UINT32,DT_UINT64})) + .OUTPUT(nums, TensorType({DT_INT64,DT_INT32,DT_UINT32,DT_UINT64})) + .OUTPUT(recover_idx, TensorType({DT_INT64,DT_INT32,DT_UINT32,DT_UINT64})) + .ATTR(row_memory, Int, 320) + .ATTR(mode, String, "mod") + .OP_END_FACTORY_REG(EmbeddingLocalIndex) + +/** +* @brief Fill the value to a tensor has the specified shape. + +* @par Inputs: +* One inputs, including: +* @li dims: An Tensor, specify the shape that the value to fill. + +* @par Attributes: +* @li value: An optional float value. Defaults to 0.0. + +* @par Outputs: +* @li y: A Tensor. Has the shape specify by attr shape, and full of the value specify by attr value. + +* @par Third-party framework compatibility +* Compatible with the ONNX operator ConstantOfShape. +*/ +REG_OP(FillV2) + .INPUT(dims, TensorType({DT_INT16, DT_INT32, DT_INT64})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT8, DT_INT16, DT_INT32, DT_INT64})) + .ATTR(value, Float, 0) + .OP_END_FACTORY_REG(FillV2) + +/** +* @brief Fill the value to a tensor has the specified shape. + +* @par Attributes: +* @li value: An optional float value. Defaults to 0.0. + +* @li dims: An required listInt to specify the shape that the value to fill. + +* @par Outputs: +* @li y: A Tensor. Has the shape specify by attr shape, and full of the value specify by attr value. + +* @par Third-party framework compatibility +* Compatible with the ONNX operator ConstantOfShape. +*/ +REG_OP(FillV2D) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT8, DT_UINT8, DT_INT16, DT_INT32, DT_INT64})) + .ATTR(value, Float, 0) + .REQUIRED_ATTR(dims, ListInt) + .OP_END_FACTORY_REG(FillV2D) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_PAD_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/parsing_ops.h b/third_party/fwkacllib/inc/ops/parsing_ops.h index 5c7adfd8..b625180a 100644 --- a/third_party/fwkacllib/inc/ops/parsing_ops.h +++ b/third_party/fwkacllib/inc/ops/parsing_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -51,6 +51,246 @@ REG_OP(StringToNumber) .ATTR(out_type, Type, DT_FLOAT) .OP_END_FACTORY_REG(StringToNumber) +/** +*@brief Convert serialized tensorflow.TensorProto prototype to Tensor. +*@brief Parse an Example prototype. +*@par Input: +*serialized: A Tensor of type string. +*dense_defaults: DYNAMIC INPUT Tensor type as string, float, int64. \n + +*@par Attributes: +*num_sparse: type int num of inputs sparse_indices , sparse_values, sparse_shapes +*out_type: output type +*sparse_keys: ListString +*sparse_types: types of sparse_values +*dense_keys: ListString +*dense_shapes: output of dense_defaults shape +*dense_types: output of dense_defaults type \n + +*@par Outputs: +*sparse_indices: A Tensor of type string. +*sparse_values: Has the same type as sparse_types. +*sparse_shapes: A Tensor of type int64 +*dense_values: Has the same type as dense_defaults. + +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +**/ +REG_OP(ParseSingleExample) + .INPUT(serialized, TensorType({DT_STRING})) + .DYNAMIC_INPUT(dense_defaults, TensorType({DT_STRING,DT_FLOAT,DT_INT64})) + .DYNAMIC_OUTPUT(sparse_indices, TensorType({DT_INT64})) + .DYNAMIC_OUTPUT(sparse_values, TensorType({DT_STRING,DT_FLOAT,DT_INT64})) + .DYNAMIC_OUTPUT(sparse_shapes, TensorType({DT_INT64})) + .DYNAMIC_OUTPUT(dense_values, TensorType({DT_STRING,DT_FLOAT,DT_INT64})) + .ATTR(num_sparse, Int, 0) + .ATTR(sparse_keys, ListString, {}) + .ATTR(dense_keys, ListString, {}) + .ATTR(sparse_types, ListType, {}) + .ATTR(Tdense, ListType, {}) + .ATTR(dense_shapes, ListListInt, {}) + .OP_END_FACTORY_REG(ParseSingleExample) + +/** +*@brief Decodes raw file into tensor . \n +*@par Input: +*bytes: A Tensor of type string. + +*@par Attributes: +*little_endian: bool ture +*out_type: output type + +*@par Outputs: +*Output: A Tensor +**/ +REG_OP(DecodeRaw) + .INPUT(bytes, TensorType({DT_STRING})) + .OUTPUT(output, TensorType({DT_BOOL,DT_FLOAT16,DT_DOUBLE,DT_FLOAT, + DT_INT64,DT_INT32,DT_INT8,DT_UINT8,DT_INT16, + DT_UINT16,DT_COMPLEX64,DT_COMPLEX128})) + .ATTR(out_type, Type, DT_FLOAT) + .ATTR(little_endian, Bool, true) + .OP_END_FACTORY_REG(DecodeRaw) + +/** +*@brief Convert serialized tensorflow.TensorProto prototype to Tensor. \n + +*@par Inputs: +*serialized: A Tensor of string type. Scalar string containing serialized +*TensorProto prototype. \n + +*@par Attributes: +*out_type: The type of the serialized tensor. The provided type must match the +*type of the serialized tensor and no implicit conversion will take place. \n + +*@par Outputs: +*output: A Tensor of type out_type. \n + +*@attention Constraints: +*The implementation for StringToNumber on Ascend uses AICPU, +*with badperformance. \n + +*@par Third-party framework compatibility +*@li compatible with tensorflow ParseTensor operator. +*/ +REG_OP(ParseTensor) + .INPUT(serialized, TensorType({DT_STRING})) + .OUTPUT(output, TensorType(DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, + DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_UINT32, + DT_UINT64, DT_BOOL, DT_DOUBLE, DT_STRING, + DT_COMPLEX64, DT_COMPLEX128})) + .ATTR(out_type, Type, DT_FLOAT) + .OP_END_FACTORY_REG(ParseTensor) + +/** +*@brief Converts each string in the input Tensor to the specified numeric +*type . \n + +*@par Inputs: +*Inputs include: +*records: Each string is a record/row in the csv and all records should have the +*same format. \n +*record_defaults: One tensor per column of the input record, with either a +*scalar default value for that column or an empty vector if the column is +*required. \n + +*@par Attributes: +*OUT_TYPE: The numeric type to interpret each string in string_tensor as . \n +*field_delim: char delimiter to separate fields in a record. \n +*use_quote_delim: If false, treats double quotation marks as regular characters +*inside of the string fields (ignoring RFC 4180, Section 2, Bullet 5). \n +*na_value: Additional string to recognize as NA/NaN. \n + +*@par Outputs: +*output: A Tensor. Has the same type as x . \n + +*@attention Constraints: +*The implementation for StringToNumber on Ascend uses AICPU, with bad +*performance. \n + +*@par Third-party framework compatibility +*@li compatible with tensorflow StringToNumber operator. +*/ +REG_OP(DecodeCSV) + .INPUT(records, TensorType({DT_STRING})) + .DYNAMIC_INPUT(record_defaults, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, + DT_INT64, DT_STRING})) + .DYNAMIC_OUTPUT(output, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, + DT_INT64, DT_STRING})) + .ATTR(OUT_TYPE, ListType, {}) + .ATTR(field_delim, String, ",") + .ATTR(use_quote_delim, Bool, true) + .ATTR(na_value, String, ",") + .ATTR(select_cols, ListInt, {}) + .OP_END_FACTORY_REG(DecodeCSV) + +/** +*@brief Convert serialized tensorflow.TensorProto prototype to Tensor. +*@brief Parse an Example prototype. +*@par Input: +*serialized: A Tensor of type string. \n +*name:A Tensor of type string. \n +*sparse_keys: Dynamic input tensor of string. \n +*dense_keys: Dynamic input tensor of string \n +*dense_defaults: Dynamic input tensor type as string, float, int64. \n + +*@par Attributes: +*Nsparse: Number of sparse_keys, sparse_indices and sparse_shapes \n +*Ndense: Number of dense_keys \n +*sparse_types: types of sparse_values \n +*Tdense: Type of dense_defaults dense_defaults and dense_values \n +*dense_shapes: output of dense_defaults shape \n + +*@par Outputs: +*sparse_indices: A Tensor of type string. \n +*sparse_values: Has the same type as sparse_types. \n +*sparse_shapes: A Tensor of type int64 \n +*dense_values: Has the same type as dense_defaults. \n +*@par Third-party framework compatibility \n +*@li compatible with tensorflow StringToNumber operator. \n +*/ +REG_OP(ParseExample) + .INPUT(serialized, TensorType({DT_STRING})) + .INPUT(name, TensorType({DT_STRING})) + .DYNAMIC_INPUT(sparse_keys, TensorType({DT_STRING})) + .DYNAMIC_INPUT(dense_keys, TensorType({DT_STRING})) + .DYNAMIC_INPUT(dense_defaults, TensorType({DT_FLOAT, DT_INT64, DT_STRING})) + .DYNAMIC_OUTPUT(sparse_indices, TensorType({DT_INT64})) + .DYNAMIC_OUTPUT(sparse_values, TensorType({DT_FLOAT, DT_INT64, DT_STRING})) + .DYNAMIC_OUTPUT(sparse_shapes, TensorType({DT_INT64})) + .DYNAMIC_OUTPUT(dense_values, TensorType({DT_FLOAT, DT_INT64, DT_STRING})) + .ATTR(Nsparse, Int, 0) + .ATTR(Ndense, Int, 0) + .ATTR(sparse_types, ListType, {}) + .ATTR(Tdense, ListType, {}) + .ATTR(dense_shapes, ListListInt, {}) + .OP_END_FACTORY_REG(ParseExample) + +/** +*@brief Transforms a scalar brain.SequenceExample proto (as strings) into typed +*tensors. +*@par Input: +*serialized: A Tensor of type string. \n +*feature_list_dense_missing_assumed_empty:A Tensor of type string. \n +*context_sparse_keys: Dynamic input tensor of string. \n +*context_dense_keys: Dynamic input tensor of string \n +*feature_list_sparse_keys: Dynamic input tensor of string \n +*feature_list_dense_keys: Dynamic input tensor of string \n +*context_dense_defaults: Dynamic input tensor of string, float, int64 \n +*debug_name: A Tensor of type string. \n + +*@par Attributes: +*Ncontext_sparse: Number of context_sparse_keys, context_sparse_indices and context_sparse_shapes \n +*Ncontext_dense: Number of context_dense_keys \n +*Nfeature_list_sparse: Number of feature_list_sparse_keys \n +*Nfeature_list_dense: Number of feature_list_dense_keys \n +*context_sparse_types: Types of context_sparse_values \n +*Tcontext_dense: Number of dense_keys \n +*feature_list_dense_types: Types of feature_list_dense_values \n +*context_dense_shapes: Shape of context_dense \n +*feature_list_sparse_types: Type of feature_list_sparse_values \n +*feature_list_dense_shapes: Shape of feature_list_dense \n + +*@par Outputs: +*context_sparse_indices: Dynamic output tensor of type int64. \n +*context_sparse_values: Dynamic output tensor of type string, float, int64. \n +*context_sparse_shapes: Dynamic output tensor of type int64 \n +*context_dense_values: Dynamic output tensor of type string, float, int64. \n +*feature_list_sparse_indices: Dynamic output tensor of type int64. \n +*feature_list_sparse_values: Dynamic output tensor of type string, float, int64. \n +*feature_list_sparse_shapes: Dynamic output tensor of type int64 \n +*feature_list_dense_values: Dynamic output tensor of type string, float, int64. \n +*@par Third-party framework compatibility \n +*@li compatible with tensorflow StringToNumber operator. \n +*/ +REG_OP(ParseSingleSequenceExample) + .INPUT(serialized, TensorType({DT_STRING})) + .INPUT(feature_list_dense_missing_assumed_empty, TensorType({DT_STRING})) + .DYNAMIC_INPUT(context_sparse_keys, TensorType({DT_STRING})) + .DYNAMIC_INPUT(context_dense_keys, TensorType({DT_STRING})) + .DYNAMIC_INPUT(feature_list_sparse_keys, TensorType({DT_STRING})) + .DYNAMIC_INPUT(feature_list_dense_keys, TensorType({DT_STRING})) + .DYNAMIC_INPUT(context_dense_defaults, TensorType({DT_FLOAT, DT_INT64, DT_STRING})) + .INPUT(debug_name, TensorType({DT_STRING})) + .DYNAMIC_OUTPUT(context_sparse_indices, TensorType({DT_INT64})) + .DYNAMIC_OUTPUT(context_sparse_values, TensorType({DT_FLOAT, DT_INT64, DT_STRING})) + .DYNAMIC_OUTPUT(context_sparse_shapes, TensorType({DT_INT64})) + .DYNAMIC_OUTPUT(context_dense_values, TensorType({DT_FLOAT, DT_INT64, DT_STRING})) + .DYNAMIC_OUTPUT(feature_list_sparse_indices, TensorType({DT_INT64})) + .DYNAMIC_OUTPUT(feature_list_sparse_values, TensorType({DT_FLOAT, DT_INT64, DT_STRING})) + .DYNAMIC_OUTPUT(feature_list_sparse_shapes, TensorType({DT_INT64})) + .DYNAMIC_OUTPUT(feature_list_dense_values, TensorType({DT_FLOAT, DT_INT64, DT_STRING})) + .ATTR(Ncontext_sparse, Int, 0) + .ATTR(Ncontext_dense, Int, 0) + .ATTR(Nfeature_list_sparse, Int, 0) + .ATTR(Nfeature_list_dense, Int, 0) + .ATTR(context_sparse_types, ListType, {}) + .ATTR(Tcontext_dense, ListType, {}) + .ATTR(feature_list_dense_types, ListType, {}) + .ATTR(context_dense_shapes, ListListInt, {}) + .ATTR(feature_list_sparse_types, ListType, {}) + .ATTR(feature_list_dense_shapes, ListListInt, {}) + .OP_END_FACTORY_REG(ParseSingleSequenceExample) + } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_PARSING_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/quantize_ops.h b/third_party/fwkacllib/inc/ops/quantize_ops.h index b53cfeb6..69d5e67e 100644 --- a/third_party/fwkacllib/inc/ops/quantize_ops.h +++ b/third_party/fwkacllib/inc/ops/quantize_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -62,6 +62,26 @@ REG_OP(Dequantize) /** *@brief Quantizes the input . \n +*@par Inputs: +*x: shape and dtype of input_x. \n +*scales: shape and dtype of input_scales. \n +*zero_points: shape and dtype of input_zero_points \n +*@par Attributes: +*@li axis: the processed dim. \n +*@par Outputs: +*y: shape and dtype of output_y, should be same shape as input, dtype is same as the quantified type . \n +*/ +REG_OP(Quantize) + .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(scales, TensorType({DT_FLOAT})) + .INPUT(zero_points, TensorType({DT_INT8,DT_UINT8,DT_INT32})) + .OUTPUT(y, TensorType({DT_INT8,DT_UINT8,DT_INT32})) + .REQUIRED_ATTR(dtype, String) + .ATTR(axis, Int, 1) + .OP_END_FACTORY_REG(Quantize) + +/** +*@brief Quantizes the input . \n *@par Inputs: *x: An NC1HWC0 tensor of type float16 or float32, specifying the input . \n @@ -194,7 +214,7 @@ REG_OP(AscendRequant) *@brief Requantizes the input of int16 . \n *@par Inputs: -*@li x: An NC1HWC0 tensor of type int16, specifying the input. +*@li x0: An NC1HWC0 tensor of type int16, specifying the input. *@li req_scale: An NC1HWC0 tensor of type uint64, specifying the scaling ratio. *@li x1: An NC1HWC0 tensor of type int16 . \n @@ -203,22 +223,21 @@ REG_OP(AscendRequant) *@li relu_flag: A optional bool, specifying whether to perform ReLU, either "True" or "False". Defaults to "False" . \n *@par Outputs: -*@li y: The dequantized output tensor of type int8 and with format NC1HWC0. +*@li y0: The dequantized output tensor of type int8 and with format NC1HWC0. *@li y1: The dequantized output tensor of type int16 and with format NC1HWC0 . \n *@par Third-party framework compatibility * It is a custom operator. It has no corresponding operator in Caffe. */ REG_OP(AscendRequantS16) - .INPUT(x, TensorType({DT_INT16})) + .INPUT(x0, TensorType({DT_INT16})) .INPUT(req_scale, TensorType({DT_UINT64})) .OPTIONAL_INPUT(x1, TensorType({DT_INT16})) - .OUTPUT(y, TensorType({DT_INT8})) + .OUTPUT(y0, TensorType({DT_INT8})) .OUTPUT(y1, TensorType({DT_INT16})) .ATTR(dual_output, Bool, false) .ATTR(relu_flag, Bool, false) .OP_END_FACTORY_REG(AscendRequantS16) - } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_QUANTIZE_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/ragged_array_ops.h b/third_party/fwkacllib/inc/ops/ragged_array_ops.h index 9b31aa8e..20484623 100644 --- a/third_party/fwkacllib/inc/ops/ragged_array_ops.h +++ b/third_party/fwkacllib/inc/ops/ragged_array_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/ragged_conversion_ops.h b/third_party/fwkacllib/inc/ops/ragged_conversion_ops.h index 13488a25..020e3da4 100644 --- a/third_party/fwkacllib/inc/ops/ragged_conversion_ops.h +++ b/third_party/fwkacllib/inc/ops/ragged_conversion_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/ragged_math_ops.h b/third_party/fwkacllib/inc/ops/ragged_math_ops.h index 8af4f867..258b0ca1 100644 --- a/third_party/fwkacllib/inc/ops/ragged_math_ops.h +++ b/third_party/fwkacllib/inc/ops/ragged_math_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/random_ops.h b/third_party/fwkacllib/inc/ops/random_ops.h index b46da435..b65a68f1 100644 --- a/third_party/fwkacllib/inc/ops/random_ops.h +++ b/third_party/fwkacllib/inc/ops/random_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -356,6 +356,39 @@ REG_OP(DropOutGenMask) .ATTR(seed2, Int, 0) .OP_END_FACTORY_REG(DropOutGenMask) + +/** +*@brief Generate random uint8 mask for dropout v3 . \n + +*@par Inputs: +include: +*@li shape:The shape of the output tensor. +*@li prob:0-D. Prob of 1 . \n + +*@par Attributes: +*@li seed:If either seed or seed2 are set to be non-zero, the random number +*generator is seeded by the given seed. Otherwise, it is seeded by a random seed. +*@li seed2:A second seed to avoid seed collision . \n + +*@par Outputs: +*y:Output (1-D) random number using uint8 data format . \n + +*@attention Constraints: +*The output is aligned with 16 + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. + +*@see DropOutGenMaskV3() +*/ +REG_OP(DropOutGenMaskV3) + .INPUT(shape, TensorType({ DT_INT32, DT_INT64 })) + .INPUT(prob, TensorType({ DT_FLOAT16, DT_FLOAT })) + .OUTPUT(y, TensorType({ DT_UINT8 })) + .ATTR(seed, Int, 0) + .ATTR(seed2, Int, 0) + .OP_END_FACTORY_REG(DropOutGenMaskV3) + /** *@brief Generates values in an interval . \n @@ -495,6 +528,62 @@ REG_OP(ShuffleChannel) DT_UINT16, DT_INT32, DT_UINT32,DT_INT64,DT_UINT64})) .ATTR(group, Int, 1) .OP_END_FACTORY_REG(ShuffleChannel) + +/** + * @briefGenerate a tensor of samples from a multinomial + * distribution according to the probabilities of each of + * the possible outcomes. + * + * @par inputs + * one input including: + * @li x:Input tensor with shape [batch_size, class_size], + * where class_size is the number of all possible outcomes. + * Each value along the axis zero represents the unnormalized + * log-probability of each corresponding outcome in a batch. + * + * @par output + * one output including: + * @li y:Output tensor with shape [batch_size, sample_size], + * where sample_size is the number of times to sample. + * Each value along the axis zero represents the outcome of + * the corresponding sample in a batch. + * + * @par Restrictions: + * Warning:THIS FUNCTION IS EXPERIMENTAL. Please do not use. + */ +REG_OP(MultinomialFuss) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_FLOAT64})) + .OUTPUT(y, TensorType({DT_INT32, DT_INT64})) + .ATTR(dtype, Int, 6) + .ATTR(sample_size, Int, 1) + .ATTR(seed, Float, 0) + .OP_END_FACTORY_REG(MultinomialFuss) + +/** +* @brief During training, randomly zeroes some of the elements of the input tensor +* with probability +* +* @par Inputs: +* @li x: A ND Tensor. Must be one of the following data types: Float, Float16 +* @li seed: A ND Tensor. Must be one of the following data types: Float +* +* @par Attributes: +* @li p: probability of an element to be zeroed +* +* @par Outputs: +* @li y: A tensor with the same shape and type as "x". +* @li mask: A tensor with the same shape and type as "x". +* @li new_seed: A tensor with the same shape and type as "seed". +*/ + +REG_OP(DropoutV2) + .INPUT(x, TensorType({ DT_FLOAT16, DT_FLOAT })) + .INPUT(seed, TensorType({ DT_FLOAT })) + .OUTPUT(y, TensorType({ DT_FLOAT16, DT_FLOAT })) + .OUTPUT(mask, TensorType({ DT_FLOAT })) + .OUTPUT(seed, TensorType({ DT_FLOAT })) + .REQUIRED_ATTR(p, Float) + .OP_END_FACTORY_REG(DropoutV2) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_RANDOM_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/reduce_ops.h b/third_party/fwkacllib/inc/ops/reduce_ops.h index 6f44093e..97c7b8e1 100644 --- a/third_party/fwkacllib/inc/ops/reduce_ops.h +++ b/third_party/fwkacllib/inc/ops/reduce_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -37,7 +37,7 @@ namespace ge { *@attention Constraints: * This operator is a BatchNorm fusion operator for updating the moving * averages for training. -* This operator is used in conjunction with BNTrainingUpdate. +* This operator is used in conjunction with BNTrainingReduce. */ REG_OP(BNTrainingReduce) .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) @@ -46,6 +46,27 @@ REG_OP(BNTrainingReduce) .OP_END_FACTORY_REG(BNTrainingReduce) /** +*@brief Performs reduced batch normalization . \n + +*@par Inputs: +*x: A 6D Tensor of type float16 or float32, with format NDC1HWC0 . \n + +*@par Outputs: +*@li sum: A 3D Tensor of type float32 for SUM reduced "x". +*@li square_sum: A 3D Tensor of type float32 for SUMSQ reduced "x" . \n + +*@attention Constraints: +* This operator is a BatchNorm fusion operator for updating the moving +* averages for training. +* This operator is used in conjunction with BN3DTrainingReduce. +*/ +REG_OP(BN3DTrainingReduce) + .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(sum, TensorType({DT_FLOAT})) + .OUTPUT(square_sum, TensorType({DT_FLOAT})) + .OP_END_FACTORY_REG(BN3DTrainingReduce) + +/** *@brief Performs the backpropagation of BatchNorm . \n *@par Inputs: @@ -89,6 +110,49 @@ REG_OP(BNTrainingReduceGrad) .OP_END_FACTORY_REG(BNTrainingReduceGrad) /** +*@brief Performs the backpropagation of BatchNorm . \n + +*@par Inputs: +* Seven inputs, including: +*@li grads: A 6D Tensor of type float16 or float32, with format NDC1HWC0, for +* the gradient. +*@li x: A 6D Tensor of type float16 or float32, with format NDC1HWC0. +*@li diff_scale: A 6D Tensor of type float32, with format NDC1HWC0, +* for the mean of "x". +*@li diff_offset: A 6D Tensor of type float32, with format NDC1HWC0, +* for the variance of "x". +*@li scale: A 6D Tensor of type float32, with format NDC1HWC0. +*@li batch_mean: A 6D Tensor of type float32, with format NDC1HWC0, +* for the mean of "x". +*@li batch_variance: A 6D Tensor of type float32, with format NDC1HWC0, +* for the variance of "x" . \n + +*@par Attributes: +*epsilon: An optional float32. Defaults to "0.0001". A small float number +* added to the variance of "x" . \n + +*@par Outputs: +*y: A Tensor of type float16 or float32, with format NDC1HWC0, for the offset +* of "x" . \n + +*@attention Constraints: +* The preceding layer of this operator must be BN3DTrainingReduceGrad . \n + +*@see BN3DTrainingReduceGrad +*/ +REG_OP(BN3DTrainingReduceGrad) + .INPUT(grads, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(diff_scale, TensorType({DT_FLOAT})) + .INPUT(diff_offset, TensorType({DT_FLOAT})) + .INPUT(scale, TensorType({DT_FLOAT})) + .INPUT(batch_mean, TensorType({DT_FLOAT})) + .INPUT(batch_variance, TensorType({DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT})) + .ATTR(epsilon, Float, 0.0001) + .OP_END_FACTORY_REG(BN3DTrainingReduceGrad) + +/** *@brief Performs reduced batch normalization . \n *@par Inputs: @@ -120,7 +184,7 @@ REG_OP(BNTrainingReduceGrad) *@attention Constraints: *@li This operator is a BatchNorm fusion operator for updating the moving averages for training. -*This operator is used in conjunction with BNTrainingReduce. +*This operator is used in conjunction with BNTrainingUpdate. *@li For Ascend 310, the result accuracy fails to reach 1‰ due to the square * root instruction. */ @@ -142,6 +206,59 @@ REG_OP(BNTrainingUpdate) .OP_END_FACTORY_REG(BNTrainingUpdate) /** +*@brief Performs reduced batch normalization . \n + +*@par Inputs: +* Seven inputs, including: (NDC1HWC0 supported) +*@li x: A 6D Tensor of type float16 or float32. +*@li sum: A 6D Tensor of type float32 for the output of operator +* BN3DTrainingUpdate. +*@li square_sum: A 6D Tensor of type float32 for the output of operator +* BN3DTrainingUpdate. +*@li scale: A 6D Tensor of type float32, for the scaling factor. +*@li offset: A 6D Tensor of type float32, for the scaling offset. +*@li mean: A 6D Tensor of type float32, for the updated mean. +*@li variance: A 6D Tensor of type float32, for the updated variance . \n + +*@par Attributes: +*@li epsilon: A required float32, specifying the small value added to variance +* to avoid dividing by zero. +*@li factor: A required float32, specifying the weight for updating the mean +* and variance . \n + +*@par Outputs: +* Five outputs, including: (NDC1HWC0 supported) +*@li y: A 6D Tensor of type float16 or float32, for normalized "x". +*@li mean: A 6D Tensor of type float32, for the updated mean. +*@li variance: A 6D Tensor of type float32, for the updated variance. +*@li batch_mean: A 6D Tensor of type float32, for the mean of "x". +*@li batch_variance: A 6D Tensor of type float32, for the variance of "x" . \n + +*@attention Constraints: +*@li This operator is a BatchNorm fusion operator for updating the moving +averages for training. +*This operator is used in conjunction with BN3DTrainingUpdate. +*@li For Ascend 310, the result accuracy fails to reach 1‰ due to the square +* root instruction. +*/ +REG_OP(BN3DTrainingUpdate) + .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(sum, TensorType({DT_FLOAT})) + .INPUT(square_sum, TensorType({DT_FLOAT})) + .INPUT(scale, TensorType({DT_FLOAT})) + .INPUT(offset, TensorType({DT_FLOAT})) + .INPUT(mean, TensorType({DT_FLOAT})) + .INPUT(variance, TensorType({DT_FLOAT})) + .REQUIRED_ATTR(factor, Float) + .REQUIRED_ATTR(epsilon, Float) + .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(mean, TensorType({DT_FLOAT})) + .OUTPUT(variance, TensorType({DT_FLOAT})) + .OUTPUT(batch_mean, TensorType({DT_FLOAT})) + .OUTPUT(batch_variance, TensorType({DT_FLOAT})) + .OP_END_FACTORY_REG(BN3DTrainingUpdate) + +/** *@brief Performs batch normalization for inference . \n *@par Inputs: @@ -285,6 +402,40 @@ REG_OP(BNTrainingUpdateGrad) .OP_END_FACTORY_REG(BNTrainingUpdateGrad) /** +*@brief Performs the backpropagation of BatchNorm . \n + +*@par Inputs: +* Four inputs, including: +*@li grads: A 6D Tensor of type float16 or float32, with format NDC1HWC0, +* for the gradient. +*@li x: A 6D Tensor of type float16 or float32, with format NDC1HWC0. +*@li batch_mean: A 6D Tensor of type float32, with format NDC1HWC0, +* for the mean of "x". +*@li batch_variance: A 6D Tensor of type float32, with format NDC1HWC0, +* for the variance of "x" . \n + +*@par Attributes: +*epsilon: An optional float32. Defaults to "0.0001". A small float number +* added to the variance of "x" . \n + +*@par Outputs: +*@li diff_scale: A Tensor of type float32, with format NDC1HWC0, +* for the offset of "scale". +*@li diff_offset: A Tensor of type float32, with format NDC1HWC0, +* for the offset of "offset" . \n + +*/ +REG_OP(BN3DTrainingUpdateGrad) + .INPUT(grads, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(batch_mean, TensorType({DT_FLOAT})) + .INPUT(batch_variance, TensorType({DT_FLOAT})) + .ATTR(epsilon, Float, 0.0001) + .OUTPUT(diff_scale, TensorType({DT_FLOAT})) + .OUTPUT(diff_offset, TensorType({DT_FLOAT})) + .OP_END_FACTORY_REG(BN3DTrainingUpdateGrad) + +/** *@brief Performs the backpropagation of BatchNorm for inference . \n *@par Inputs: @@ -635,8 +786,8 @@ REG_OP(ReduceMin) * Warning: THIS FUNCTION IS DEPRECATED. Please use ReduceMin instead. */ REG_OP(ReduceMinD) - .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT8,DT_UINT8})) - .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT8,DT_UINT8})) + .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT8,DT_UINT8,DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT8,DT_UINT8,DT_INT32})) .REQUIRED_ATTR(axes, ListInt) .ATTR(keep_dims, Bool, false) .OP_END_FACTORY_REG(ReduceMinD) @@ -747,14 +898,14 @@ REG_OP(Reduction) *@brief Computes the euclidean norm of elements across dimensions of a tensor . \n *@par Inputs: -*@li input_tensor: A Tensor. Must be one of the following types: float16, float32, int32. +*@li x: A Tensor. Must be one of the following types: float16, float32, int32. *@li axes: A Tensor of type int8 or int32. Specifies the dimensions to reduce. Defaults to "None" . \n *@par Attributes: *keep_dims: An optional bool. If "True", reduced dimensions will be retained. Defaults to "False" . \n *@par Outputs: -*output_tensor: A Tensor. Must be one of the following types: float16, float32, int32 . \n +*y: A Tensor. Must be one of the following types: float16, float32, int32 . \n *@attention Constraints: * If "axes = None", all dimensions will be reduced. "axes" must be in the range [-rank(input_shape), rank(input_shape)) . \n @@ -821,7 +972,7 @@ Defaults to "0.00001" . \n *batch_ variance: A Tensor of type float32 for the result variance . \n *@attention Constraints: -*For Ascend 310, the result accuracy fails to reach 1‰ due to the square root instruction. +*For Ascend 310, the result accuracy fails to reach 0.001 due to the square root instruction. */ REG_OP(INInferV2) .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) @@ -839,7 +990,7 @@ REG_OP(INInferV2) *@brief Performs reduced instance normalization . \n *@par Inputs: -*x: A Tensor of type float16 or float32, with format NC1HWC0 . \n +*x: A Tensor of type float16 or float32. \n *@par Outputs: *@li sum: A Tensor of type float32 for SUM reduced "x". @@ -862,19 +1013,19 @@ REG_OP(INTrainingReduceV2) *@par Inputs: * Seven inputs, including: (NC1HWC0supported) *@li x: A Tensor of type float16 or float32. -*@li sum: A T [N, C1, 1, 1, C0] ensor of type float32 for the output of operator INTrainingReduceV2. -*@li square_sum: A [N, C1, 1, 1, C0] Tensor of type float32 for the output of operator INTrainingReduceV2. -*@li gamma: A [N, C1, 1, 1, C0] Tensor of type float32, for the scaling gamma. -*@li beta: A [N, C1, 1, 1, C0] Tensor of type float32, for the scaling beta. -*@li mean: A [N, C1, 1, 1, C0] Tensor of type float32, for the updated mean. -*@li variance: A [N, C1, 1, 1, C0] Tensor of type float32, for the updated variance . \n +*@li sum: A Tensor of type float32 for the output of operator INTrainingReduceV2. +*@li square_sum: A Tensor of type float32 for the output of operator INTrainingReduceV2. +*@li gamma: A Tensor of type float32, for the scaling gamma. +*@li beta: A Tensor of type float32, for the scaling beta. +*@li mean: A Tensor of type float32, for the updated mean. +*@li variance: A Tensor of type float32, for the updated variance . \n *@par Attributes: *@li momentum: A required float32, specifying the momentum to update mean and var. *@li epsilon: A required float32, specifying the small value added to variance to avoid dividing by zero . \n *@par Outputs: -* Three outputs, including: (NC1HWC0 supported) +* Three outputs *@li y: A Tensor of type float16 or float32, for normalized "x". *@li batch_mean: A Tensor of type float32, for the updated mean. *@li batch_variance: A Tensor of type float32, for the updated variance . \n @@ -882,7 +1033,7 @@ REG_OP(INTrainingReduceV2) *@attention Constraints: *@li This operator is a InstanceNorm fusion operator for updating the moving averages for training. * This operator is used in conjunction with INTrainingReduceV2. -*@li For Ascend 310, the result accuracy fails to reach 1‰ due to the square root instruction. +*@li For Ascend 310, the result accuracy fails to reach 1‰ due to the square root instruction. */ REG_OP(INTrainingUpdateV2) .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) @@ -965,7 +1116,7 @@ for the updated variance. *@attention Constraints: *@li This operator is a InstanceNorm fusion operator for updating the moving averages for training. * This operator is used in conjunction with GNTrainingUpdate. -*@li For Ascend 310, the result accuracy fails to reach 1‰ due to the square root instruction. +*@li For Ascend 310, the result accuracy fails to reach 1‰ due to the square root instruction. */ REG_OP(GNTrainingUpdate) .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) @@ -982,6 +1133,98 @@ REG_OP(GNTrainingUpdate) .OUTPUT(batch_variance, TensorType({DT_FLOAT})) .OP_END_FACTORY_REG(GNTrainingUpdate) +/** +*@brief Joins a string Tensor across the given dimensions. \n + +*@par Inputs: +include: +*@li input:A Tensor of type string. The text to be processed. +*@li reduction_indices:A Tensor of type int. The text to be processed. + +*@par Attributes: +*@li keep_dims:A bool, An optional bool. Defaults to False. If True, retain reduced dimensions with length 1.. +*@li separator:string. + +*@par output: +*@li output::A Tensor of type string.. +*/ +REG_OP(ReduceJoin) + .INPUT(input, TensorType({DT_STRING})) + .INPUT(reduction_indices, TensorType({DT_INT32})) + .OUTPUT(output, TensorType({DT_STRING})) + .ATTR(keep_dims, Bool, true) + .ATTR(separator, String, "") + .OP_END_FACTORY_REG(ReduceJoin) + +/** +* @brief Calculates the standard deviation and average value of Tensors. + +* @par Inputs: +* @li x: A Tensor. Must be one of the following types: +* float16, float32. \n + +* @par Attributes: +* Three Attributes, including: +* @li dim: An optional listint, Defaults to "None". \n + +* @li unbiased: An optional bool. Defaults to "True". +* If "True", Use Bessel Correction. +* If "False", Do not use Bessel Correction. \n + +* @li keepdim: An optional bool. Defaults to "False". +* If "True", Keep the original tensor dimension. +* If "False", Do not keep the original tensor dimension. \n + +* @par Outputs: +* Two Outputs, including: +* @li y1: A Tensor. Has the same type as "x". +* @li y2: A Tensor. Has the same type as "x". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator ReduceStd. +*/ +REG_OP(ReduceStd) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(y1, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(y2, TensorType({DT_FLOAT, DT_FLOAT16})) + .ATTR(dim, ListInt, {}) + .ATTR(unbiased, Bool, true) + .ATTR(keepdim, Bool, false) + .OP_END_FACTORY_REG(ReduceStd) + +/** +* @brief Calculates the standard deviation of Tensors. + +* @par Inputs: +* include: +* @li x: A Tensor. Must be one of the following types: float16, float32. \n +* @li mean: A Tensor. It's the mean of X. Must be one of the following types: float16, float32. \n + + +* @par Attributes: +* Three Attributes, including: +* @li dim: An optional listint, Defaults to "None". \n +* @li unbiased: An optional bool. Defaults to "True". +* If "True", Use Bessel Correction. +* If "False", Do not use Bessel Correction. \n +* @li keepdim: An optional bool. Defaults to "False". +* If "True", Keep the original tensor dimension. +* If "False", Do not keep the original tensor dimension. \n + +* @par Outputs: +* @li y: A Tensor. It's the std of X. Has the same type as "x". + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator ReduceStdWithMean. +*/ +REG_OP(ReduceStdWithMean) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(mean, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) + .ATTR(dim, ListInt, {}) + .ATTR(unbiased, Bool, true) + .ATTR(keepdim, Bool, false) + .OP_END_FACTORY_REG(ReduceStdWithMean) } //namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_REDUCE_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/resource_variable_ops.h b/third_party/fwkacllib/inc/ops/resource_variable_ops.h index 1b60d42a..74ac83f8 100644 --- a/third_party/fwkacllib/inc/ops/resource_variable_ops.h +++ b/third_party/fwkacllib/inc/ops/resource_variable_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/rnn.h b/third_party/fwkacllib/inc/ops/rnn.h index 84723872..80546860 100644 --- a/third_party/fwkacllib/inc/ops/rnn.h +++ b/third_party/fwkacllib/inc/ops/rnn.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -33,6 +33,7 @@ namespace ge { *@li c:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li w:A 4D Tensor. Must be one of the following types: float16. The format must be FRACTAL_Z. *@li b:A 1D Tensor. Must be one of the following types: float16. The format must be ND . \n +*@li mask:A 1D Tensor. Must be one of the following types: uint8. *@par Attributes: *@li keep_prob:An integer identifying the keep prob in the op. Default to 1. @@ -42,7 +43,6 @@ namespace ge { *@par Outputs: *seven outputs: -*@li mask:A 1D Tensor. Must be one of the following types: uint8. *@li ct:A 4D Tensor. Must be one of the following types: float16, float32. *@li ht:A 4D Tensor. Must be one of the following types: float16. *@li it:A 4D Tensor. Must be one of the following types: float16, float32. @@ -187,16 +187,16 @@ REG_OP(DynamicRNNGrad) *@brief: DynamicRNN calculation. *@par Inputs: *ten inputs: -*@li x:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. -*@li w:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. -*@li b:A 1D Tensor. Must be one of the following types: float16, float32. The format must be ND. -*@li seq_length:A 1D Tensor. Must be one of the following types: int32. The format must be ND. -*@li init_h:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. -*@li init_c:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. -*@li wci:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. -*@li wcf:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. -*@li wco:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. -*@li mask:A 1D Tensor. Must be one of the following types: uint8. The format must be ND . \n +*@li x:A required 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li w:A required 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. +*@li b:A required 1D Tensor. Must be one of the following types: float16, float32. The format must be ND. +*@li seq_length:A optional Tensor. Only Support float16 in FRACTAL_NZ and int32 in ND. +*@li init_h:A optional 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li init_c:A optional 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li wci:A 4D optional Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. +*@li wcf:A 4D optional Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. +*@li wco:A 4D optional Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. +*@li mask:A 1D optional Tensor. Must be one of the following types: uint8. The format must be ND . \n *@par Attributes: *@li cell_type:An string identifying the cell type in the op. Default to "LSTM". Only LSTM is currently supported. @@ -209,6 +209,7 @@ REG_OP(DynamicRNNGrad) *@li time_major:An bool identifying the time major in the op. Default to true. *@li activation:An string identifying the type of activation function in the op. Default to "tanh". Only tanh is currently supported. *@li forget_bias:An float identifying the forget bias in the op. Default to 0. +*@li gate_order:An string identifying the type of gate order in the op. Support "ijfo" and "ifjo". Default to "ijfo". *@li is_training:An bool identifying is training in the op. Default to true . \n *@par Outputs: @@ -221,12 +222,14 @@ REG_OP(DynamicRNNGrad) *@li f:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li o:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li tanhct:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@par Third-party framework compatibility: +* Compatible with the TF operator LSTM. */ REG_OP(DynamicRNN) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) .INPUT(w, TensorType({DT_FLOAT16, DT_FLOAT})) .INPUT(b, TensorType({DT_FLOAT16, DT_FLOAT})) - .OPTIONAL_INPUT(seq_length, TensorType({DT_INT32})) + .OPTIONAL_INPUT(seq_length, TensorType({DT_INT32, DT_FLOAT16})) .OPTIONAL_INPUT(init_h, TensorType({DT_FLOAT16, DT_FLOAT})) .OPTIONAL_INPUT(init_c, TensorType({DT_FLOAT16, DT_FLOAT})) .OPTIONAL_INPUT(wci, TensorType({DT_FLOAT16, DT_FLOAT})) @@ -251,10 +254,238 @@ REG_OP(DynamicRNN) .ATTR(time_major, Bool, true) .ATTR(activation, String, "tanh") .ATTR(forget_bias, Float, 0.0) + .ATTR(gate_order, String, "ijfo") .ATTR(is_training, Bool, true) .OP_END_FACTORY_REG(DynamicRNN) /** +*@brief: DynamicRNNV2 calculation. +*@par Inputs: +*ten inputs: +*@li x:A required 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li weight_input:A required 4D Tensor. Must be one of the following types: float16, float32. +*The format must be FRACTAL_Z. +*@li weight_hidden:A required 4D Tensor. Must be one of the following types: float16, float32. +*The format must be FRACTAL_Z. +*@li b:A required 1D Tensor. Must be one of the following types: float16, float32. The format must be ND. +*@li seq_length:A optional 1D Tensor. Must be one of the following types: int32. The format must be ND. +*@li init_h:A optional 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li init_c:A optional 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li wci:A 4D optional Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. +*@li wcf:A 4D optional Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. +*@li wco:A 4D optional Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. +*@li mask:A 1D optional Tensor. Must be one of the following types: uint8. The format must be ND . \n + +*@par Attributes: +*@li cell_type:An string identifying the cell type in the op. Default to "LSTM". Only LSTM is currently supported. +*@li direction:An string identifying the direction in the op. Default to "UNIDIRECTIONAL". +*Only UNIDIRECTIONAL is currently supported. +*@li cell_depth:An integer identifying the cell depth in the op. Default to 1. +*@li use_peephole:An bool identifying if use peephole in the op. Default to false. +*@li keep_prob:An float identifying the keep prob in the op. Default to 1. +*@li cell_clip:An float identifying the cell clip in the op. Default to -1. +*@li num_proj:An integer identifying the num projection in the op. Default to 0. +*@li time_major:An bool identifying the time major in the op. Default to true. +*@li activation:An string identifying the type of activation function in the op. Default to "tanh". +*Only tanh is currently supported. +*@li recurrent_activation:An string identifying the type of activation function in the op. Default to "sigmoid". +*Supprot "sigmoid" and "hard_sigmoid". In general, set "hard_sigmoid" for TF Keras LSTM. +*@li forget_bias:An float identifying the forget bias in the op. Default to 0. +*@li gate_order:An string identifying the type of gate order in the op. Support "ijfo" and "ifco". Default to "ijfo". +*Set "ijfo" for TF operator LSTM, Set "ifco" for TF Keras LSTM. +*@li stateful: An bool identifying the type of stateful in the op. Default to fasle.Only false is currently supported. +*@li merge_mode: An string identifying the type of merge_modein the op. Default to "concat". +*Only "concat" is currently supported +*@li is_training:An bool identifying is training in the op. Default to true . \n + +*@par Outputs: +*eight outputs: +*@li y:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li output_h:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*Return the last output_h. +*@li output_c:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*Return the last output_c. +*@li i:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li j:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li f:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li o:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li tanhct:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@par Third-party framework compatibility: +* Compatible with the TF operator LSTM or TF keras operator LSTM. +*/ + +REG_OP(DynamicRNNV2) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(weight_input, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(weight_hidden, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(b, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(seq_length, TensorType({DT_INT32})) + .OPTIONAL_INPUT(init_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(init_c, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(wci, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(wcf, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(wco, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(mask, TensorType({DT_UINT8})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(output_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(output_c, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(i, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(j, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(f, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(o, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(tanhc, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(cell_type, String, "LSTM") + .ATTR(direction, String, "UNIDIRECTIONAL") + .ATTR(cell_depth, Int, 1) + .ATTR(use_peephole, Bool, false) + .ATTR(keep_prob, Float, 1.0) + .ATTR(cell_clip, Float, -1.0) + .ATTR(num_proj, Int, 0) + .ATTR(time_major, Bool, true) + .ATTR(activation, String, "tanh") + .ATTR(recurrent_activation, String, "sigmoid") + .ATTR(forget_bias, Float, 0.0) + .ATTR(gate_order, String, "ijfo") + .ATTR(stateful, Bool, false) + .ATTR(merge_mode, String, "concat") + .ATTR(is_training, Bool, true) + .OP_END_FACTORY_REG(DynamicRNNV2) + +/** +*@brief: DynamicRNNV3 calculation. +*@par Inputs: +*ten inputs: +*@li x:A required 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li w:A required 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li b:A required 1D Tensor. Must be one of the following types: float16, float32. The format must be ND. +*@li seq_length:A optional 1D Tensor. Must be one of the following types: int32. The format must be ND. +*@li init_h:A optional 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li init_c:A optional 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li wci:A 4D optional Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li wcf:A 4D optional Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li wco:A 4D optional Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li mask:A 1D optional Tensor. Must be one of the following types: uint8. The format must be ND . \n +*@li real_mask:A 4D optional Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li project:A 4D optional Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. + +*@par Attributes: +*@li cell_type:An string identifying the cell type in the op. Default to "LSTM". Only LSTM is currently supported. +*@li direction:An string identifying the direction in the op. Default to "UNIDIRECTIONAL". Only UNIDIRECTIONAL is currently supported. +*@li cell_depth:An integer identifying the cell depth in the op. Default to 1. +*@li use_peephole:An bool identifying if use peephole in the op. Default to false. +*@li keep_prob:An float identifying the keep prob in the op. Default to 1. +*@li cell_clip:An float identifying the cell clip in the op. Default to -1. +*@li num_proj:An integer identifying the num projection in the op. Default to 0. +*@li time_major:An bool identifying the time major in the op. Default to true. +*@li activation:An string identifying the type of activation function in the op. Default to "tanh". Only tanh is currently supported. +*@li forget_bias:An float identifying the forget bias in the op. Default to 0. +*@li is_training:An bool identifying is training in the op. Default to true . \n + +*@par Outputs: +*eight outputs: +*@li y:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li output_h:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li output_c:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li i:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li j:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li f:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li o:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li tanhct:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@par Third-party framework compatibility: +* Compatible with the TF operator LSTM. +*/ +REG_OP(DynamicRNNV3) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(w, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(b, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(seq_length, TensorType({DT_INT32})) + .OPTIONAL_INPUT(init_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(init_c, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(wci, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(wcf, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(wco, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(mask, TensorType({DT_UINT8})) + .OPTIONAL_INPUT(real_mask, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(project, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(output_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(output_c, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(i, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(j, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(f, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(o, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(tanhc, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(cell_type, String, "LSTM") + .ATTR(direction, String, "UNIDIRECTIONAL") + .ATTR(cell_depth, Int, 1) + .ATTR(use_peephole, Bool, false) + .ATTR(keep_prob, Float, 1.0) + .ATTR(cell_clip, Float, -1.0) + .ATTR(num_proj, Int, 0) + .ATTR(time_major, Bool, true) + .ATTR(activation, String, "tanh") + .ATTR(forget_bias, Float, 0.0) + .ATTR(is_training, Bool, true) + .OP_END_FACTORY_REG(DynamicRNNV3) + +/** +*@brief: DynamicLSTMV2 calculation. +*@par Inputs: +*ten inputs: +*@li x:A required 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li w:A required 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. +*@li b:A required 1D Tensor. Must be one of the following types: float16, float32. The format must be ND. +*@li cont:A required 2D Tensor. Must be one of the following types: float16, float32. The format must be ND. +*@li w_xc_x_static:A optional 2D Tensor. Must be one of the following types: float16, float32. The format must be ND. +*@li h0:A optional 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li c0:A optional 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li wci:A optional 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. +*@li wcf:A optional 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. +*@li wco:A optional 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. +*@li mask:A optional 1D Tensor. Must be one of the following types: uint8. The format must be ND . + +*@par Attributes: +*@li num_output:An integer identifying the num projection in the op. Default to 0. +*@li expose_hidden:An bool identifying the expose_hidden in the op. Default to flase. +*@li need_output_last:An bool identifying the time major in the op. Default to true. +*@li forget_bias:An float identifying the forget bias in the op. Default to 0. + +*@par Outputs: +*eight outputs: +*@li y:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li output_h:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li output_c:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li last_output_h:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li last_output_c:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@par Third-party framework compatibility: +* Compatible with the Caffe operator LSTM. +*@par Restrictions: +* Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(DynamicLSTMV2) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(w, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(b, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(cont, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(w_xc_x_static, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(h0, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(c0, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(wci, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(wcf, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(wco, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(mask, TensorType({DT_UINT8})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(output_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(output_c, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(last_output_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(last_output_c, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(num_output, Int, 0) + .ATTR(expose_hidden, Bool, false) + .ATTR(need_output_last, Bool, false) + .ATTR(forget_bias, Float, 0.0) + .OP_END_FACTORY_REG(DynamicLSTMV2) + +/** *@brief: LSTMInputGrad calculation. *@par Inputs: *ten inputs: \n @@ -297,6 +528,60 @@ REG_OP(LSTMInputGrad) .OP_END_FACTORY_REG(LSTMInputGrad) + +/** +*@brief: Dynamic LSTM Cell grad calculation.Calculate the gradient of gates and cell state. +*@par Inputs: +*twelve inputs: +*@li init_c:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li c:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li dy:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li dh:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li dc:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li i:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li j:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li f:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li o:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li tanhct:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li mask:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li t_state:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ . \n + +*@par Attributes: +*@li forget_bias:An integer identifying the forget bias in the op. Default to 1. +*@li activation:An string identifying the type of activation function in the op. Default to "tanh". Only tanh is currently supported . \n +*@li direction:An string that marks the calculation sequence of the operator. Default to "Forward". +*@li gate_order:An string mark the order of output 4 gate. Default to "ijfo". + +*@par Outputs: +*two outputs: +*@li dgate:A 4D Tensor. Must be one of the following types: float16. +*@li dct_1:A 4D Tensor. Must be one of the following types: float16, float32. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(DynamicLSTMGradCell) + .INPUT(init_c, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(c, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(dy, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(dh, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(dc, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(i, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(j, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(f, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(o, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(tanhct, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(mask, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(t_state, TensorType({DT_INT32, DT_INT32})) + .OUTPUT(dgate, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(dct_1, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(forget_bias, Float, 1) + .ATTR(activation, String, "") + .ATTR(direction, String, "Forward") + .ATTR(gate_order, String, "ijfo") + .OP_END_FACTORY_REG(DynamicLSTMGradCell) + + /** *@brief: Basic LSTM Cell backward calculation.Calculate the gradient of input and hidden state. *@par Inputs: @@ -475,9 +760,9 @@ REG_OP(BasicRNNCell) .OP_END_FACTORY_REG(BasicRNNCell) /** -*@brief: DynamicGRU calculation. +*@brief DynamicGRU calculation. *@par Inputs: -*seven inputs: \n +*seven inputs: *@li x:Must be one of the following types: float16. The format must be FRACTAL_NZ. *@li w:Must be one of the following types: float16. The format must be FRACTAL_Z. *@li b:Must be one of the following types: float16, float32. The format must be ND. @@ -497,7 +782,7 @@ REG_OP(BasicRNNCell) *@li is_training:An bool identifying is training in the op. Default to true. *@par Outputs: -*five outputs: \n +*five outputs: *@li y:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li output_h:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li r:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. @@ -531,9 +816,9 @@ REG_OP(DynamicGRU) .OP_END_FACTORY_REG(DynamicGRU) /** -*@brief: DynamicGRUV2 calculation. +*@brief DynamicGRUV2 calculation. *@par Inputs: -*seven inputs: \n +*seven inputs: *@li x:Must be one of the following types: float16. The format must be FRACTAL_NZ. *@li weight_input:Must be one of the following types: float16. The format must be FRACTAL_Z. *@li weight_hidden:Must be one of the following types: float16. The format must be FRACTAL_Z. @@ -555,16 +840,13 @@ REG_OP(DynamicGRU) *@li is_training:An bool identifying is training in the op. Default to true. *@par Outputs: -*six outputs: \n +*six outputs: *@li y:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li output_h:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li update:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li reset:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li new:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li hidden_new:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. - -*@par Restrictions: -*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(DynamicGRUV2) .INPUT(x, TensorType({DT_FLOAT16})) @@ -592,6 +874,68 @@ REG_OP(DynamicGRUV2) .ATTR(is_training, Bool, true) .OP_END_FACTORY_REG(DynamicGRUV2) + +/** +*@brief DynamicGRUV2Hidden calculation. +*@par Inputs: +*five inputs: +*@li x_weight_input:Must be one of the following types: float32. The format must be FRACTAL_NZ. +*@li weight_hidden:Must be one of the following types: float16. The format must be FRACTAL_Z. +*@li bias_hidden:Must be one of the following types: float16, float32. The format must be ND. +*@li seq_length:Must be one of the following types: int32. The format must be ND. +*@li init_h:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. + +*@par Attributes: +*@li direction:An string identifying the direction in the op. Default to "UNIDIRECTIONAL". +Only UNIDIRECTIONAL is currently supported. +*@li cell_depth:An integer identifying the cell depth in the op. Default to 1. +*@li keep_prob:An float identifying the keep prob in the op. Default to 1. +*@li cell_clip:An float identifying the cell clip in the op. Default to -1. +*@li num_proj:An integer identifying the num projection in the op. Default to 0. +*@li time_major:An bool identifying the time major in the op. Default to true. +*@li activation:An string identifying the type of activation function in the op. Default to "tanh". +Only tanh is currently supported. +*@li gate_order:An string identifying the gate order in weight and bias. Default to "zrh". "rzh" is another option. +*@li reset_after:An bool identifying whether to apply reset gate after matrix multiplication. Default to true. +*@li is_training:An bool identifying is training in the op. Default to true. + +*@par Outputs: +*six outputs: +*@li y:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li output_h:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li update:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li reset:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li new:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li hidden_new:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(DynamicGRUV2Hidden) + .INPUT(x_weight_input, TensorType({DT_FLOAT32})) + .INPUT(weight_hidden, TensorType({DT_FLOAT16})) + .OPTIONAL_INPUT(bias_hidden, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(seq_length, TensorType({DT_INT32})) + .OPTIONAL_INPUT(init_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(output_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(update, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(reset, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(new, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(hidden_new, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(direction, String, "UNIDIRECTIONAL") + .ATTR(cell_depth, Int, 1) + .ATTR(keep_prob, Float, 1.0) + .ATTR(cell_clip, Float, -1.0) + .ATTR(num_proj, Int, 0) + .ATTR(time_major, Bool, true) + .ATTR(activation, String, "tanh") + .ATTR(gate_order, String, "zrh") + .ATTR(reset_after, Bool, true) + .ATTR(is_training, Bool, true) + .OP_END_FACTORY_REG(DynamicGRUV2Hidden) + + /** *@brief: DynamicGRUV2Grad calculation. *@par Inputs: @@ -618,7 +962,6 @@ REG_OP(DynamicGRUV2) *@li cell_clip:An float identifying the cell clip in the op. Default to -1. *@li num_proj:An integer identifying the num projection in the op. Default to 0. *@li time_major:An bool identifying the time major in the op. Default to true. -*@li bias_type:An string identifying the type of bias_type function in the op. Default to "double_bias". *@li gate_order:An string identifying the gate order in weight and bias. Default to "zrh". "rzh" is another option. *@li reset_after:An bool identifying whether to apply reset gate after matrix multiplication. Default to true. @@ -630,6 +973,9 @@ REG_OP(DynamicGRUV2) *@li db_hidden:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li dx:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li dh_prev:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(DynamicGRUV2Grad) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) @@ -658,7 +1004,6 @@ REG_OP(DynamicGRUV2Grad) .ATTR(cell_clip, Float, -1.0) .ATTR(num_proj, Int, 0) .ATTR(time_major, Bool, true) - .ATTR(bias_type, String, "double_bias") .ATTR(gate_order, String, "zrh") .ATTR(reset_after, Bool, true) .OP_END_FACTORY_REG(DynamicGRUV2Grad) @@ -667,7 +1012,7 @@ REG_OP(DynamicGRUV2Grad) *@brief: GRUV2HiddenGrad calculation. *@par Inputs: *nine inputs: \n -*@li weight_hidden:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li dh_pre_t:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li init_h:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li h:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li dy:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. @@ -678,6 +1023,7 @@ REG_OP(DynamicGRUV2Grad) *@li hidden_new:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@par Attributes: +*@li t_state:An Int identifying the current t state. Default to [0, 4]. *@li gate_order:An string identifying the gate order in weight and bias. Default to "zrh". "rzh" is another option. *@par Outputs: @@ -685,10 +1031,12 @@ REG_OP(DynamicGRUV2Grad) *@li dh_prev:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li dgate_h:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li dnt_x:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ -REG_OP(GRUV2HiddenGrad) - .INPUT(weight_hidden, TensorType({DT_FLOAT16, DT_FLOAT})) - .INPUT(init_h, TensorType({DT_FLOAT16, DT_FLOAT})) +REG_OP(GRUV2HiddenGradCell) + .INPUT(dh_pre_t, TensorType({DT_FLOAT16, DT_FLOAT})) .INPUT(h, TensorType({DT_FLOAT16, DT_FLOAT})) .INPUT(dy, TensorType({DT_FLOAT16, DT_FLOAT})) .INPUT(dh, TensorType({DT_FLOAT16, DT_FLOAT})) @@ -699,8 +1047,197 @@ REG_OP(GRUV2HiddenGrad) .OUTPUT(dh_prev, TensorType({DT_FLOAT16, DT_FLOAT})) .OUTPUT(dgate_h, TensorType({DT_FLOAT16, DT_FLOAT})) .OUTPUT(dnt_x, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(t_state, Int, 0) .ATTR(gate_order, String, "zrh") - .OP_END_FACTORY_REG(GRUV2HiddenGrad) + .OP_END_FACTORY_REG(GRUV2HiddenGradCell) + +/** +* @brief Calculates the reversed outputs of the function "embedding". \n + +* @par Inputs: +* Two inputs, including: +* @li grad: A mutable Tensor of word grad. Must be one of the following types: +* float32. +* @li indices: A mutable word index Tensor of the int32 type.\n + +* @par Attributes: +* @li num_weights: An int attr which use to judge how many words in dict. \n + +* @li padding_idx: An int attr judge which word to fill zeros. Defaults to "-1". \n + +* @li scale_grad_by_freq: An optional bool. Defaults to "False". +* If "True", "grad_weight" will be scale by word_frequency. +* If "False", "grad_weight" will not be scale by word_frequency. \n + +* @par Outputs: +* @li grad_weight: A mutable output Tensor of new word grad has the same type as "grads". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator EmbeddingDenseGrad. +*/ +REG_OP(EmbeddingDenseGrad) + .INPUT(grad, TensorType({ DT_FLOAT32 })) /* "First operand." */ + .INPUT(indices, TensorType({ DT_INT32 })) /* "Second operand." */ + .OUTPUT(y, TensorType({ DT_FLOAT32 })) /* "Result, has same element type as two inputs" */ + .REQUIRED_ATTR(num_weights, Int) + .ATTR(padding_idx, Int, -1) + .ATTR(scale_grad_by_freq, Bool, false) + .OP_END_FACTORY_REG(EmbeddingDenseGrad) + +/** +*@brief CommonLSTM calculation. +*@par Inputs: +*eight inputs: \n +*@li x:Each time step is a 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li w:Each direction is a 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. +*@li r:Each direction is a 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. +*@li b:An optional input. Each direction is a 1D Tensor. Must be one of the following types: float16, float32. The format must be ND. +*@li sequence_lens:An optional input. A 1D Tensor.Must be one of the following types: int32. The format must be ND. +*@li initial_h:An optional input. Each direction is a 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li initial_c:An optional input. Each direction is a 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li p:An optional input. Each direction is a 1D Tensor.Must be one of the following types: float16, float32. The format must be ND. + +*@par Attributes: +*@li activation_alpha:Optional scaling values used by some activation functions. Empty is currently supported. +*@li activation_beta:Optional scaling values used by some activation functions. Empty is currently supported. +*@li activations:The list of activation functions. Empty is currently supported. +*@li clip:An float identifying the cell clip in the op. Default to -1. +*@li direction:Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward(default), reverse, or bidirectional. +*@li hidden_size:Number of neurons in the hidden layer. Reserved. +*@li input_forget:Couple the input and forget gates if 1. Reserved. + +*@par Outputs: +*three outputs: \n +*@li y:First dimension is time step, second dimension is direction, others is a 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li y_h:Each direction is a 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li y_c:Each direction is a 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*/ + +REG_OP(CommonLSTM) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(w, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(r, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(b, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(sequence_lens, TensorType({DT_INT32})) + .OPTIONAL_INPUT(initial_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(initial_c, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(p, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y_c, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(activation_alpha, ListFloat, {}) + .ATTR(activation_beta, ListFloat, {}) + .ATTR(activations, ListString, {}) + .ATTR(clip, Float, -1.0) + .ATTR(direction, String, "forward") + .REQUIRED_ATTR(hidden_size, Int) + .ATTR(input_forget, Int, 0) + .OP_END_FACTORY_REG(CommonLSTM) + +/** + * @brief Calculate the mask. According to hidden_size and num_step, convert seq_length to mask. + * + * @par Inputs: + * @li seq_length: A 1D Tensor. Must be one of the following types: int32. Record the current length of each batch. [batch_size]. + * @li b: A 1D Tensor. Must be one of the following types: fp16/fp32. Record the hidden_size. [4 * hidden_size]. + * @li x: A 3D Tensor. Must be one of the following types: fp16/fp32. Record the num_step/batch_size/input_size. [num_step, batch_size, input_size]. + * + * @par Outputs: + * seq_mask: A 3D Tensor. Must be one of the following types: fp16/fp32. with the shape of [num_step, batch_size, hidden_size]. And has the same type as "b" \n + * + * @par Restrictions: + * Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. + */ +REG_OP(RnnGenMaskV2) + .INPUT(seq_length, TensorType({DT_INT32})) + .INPUT(b, TensorType({{DT_FLOAT16, DT_FLOAT})) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(seq_mask, TensorType({DT_FLOAT16, DT_FLOAT})) + .OP_END_FACTORY_REG(RnnGenMaskV2) + +/** +* @brief Common GRU calculation. + +* @par Inputs: +* Eight inputs, including: +* @li x: The input sequences packed (and pontentially padded) into on 3D Tesnor(float16). The format must be FRACTAL_NZ +* @li w: The weight tensor for the gates is 3D Tensor(float16). The format must be FRACTAL_Z +* @li r: The recurrence weight tesnor is 3D Tensor(float16). The format must be FRACTAL_Z +* @li b: The bias tensor for the gates. The format must be ND +* @li sequence_lens: Optional tensor specifying lengths of sequences(int32). The format must be ND +* @li init_h: Optional initial value of the hidden(float16,float32). The format must be FRACTAL_NZ + +* @par Attributes: +* @li activation_alpha: Optional scaling values used by some activation functions. \n +* @li activation_beta: Optional scaling values used by some activation functions. \n +* @li activations: A list of 2 (or 4 if bidirectional) activation functions for update, reset, and hidden gates. \n +* @li clip: Cell clip threshold. \n +* @li direction: Specify if the RNN is forward, reverse, or bidirectional. \n +* @li hidden_size: Number of neurons in the hidden layer. \n +* @li linear_before_reset: When computing the output of the hidden gate, apply the linear transformation before multiplying by the output of the reset gate. \n + +* @par Outputs: +* @li y: A Tensor that concats all the intermediate output values of the hidden(float16,float32). The format must be FRACTAL_NZ +* @li y_h: The last output value of the hidden(float16,float32). The format must be FRACTAL_NZ +*/ +REG_OP(CommonGRU) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(w, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(r, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(b, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(sequence_lens, TensorType({DT_INT32})) + .OPTIONAL_INPUT(initial_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(activation_alpha, ListFloat, {}) + .ATTR(activation_beta , ListFloat, {}) + .ATTR(activations , ListString, {}) + .ATTR(clip, Float, -1.0) + .ATTR(direction, String, "forward") + .REQUIRED_ATTR(hidden_size, Int) + .ATTR(linear_before_reset , Int, 0) + .OP_END_FACTORY_REG(CommonGRU) +/** +* @brief Calculates the reversed outputs of the function "embedding". \n + +* @par Inputs: +* Four inputs, including: +* @li weight: A mutable Tensor of word grad. Must be one of the following types: +* float32. +* @li indices: A mutable word index Tensor of the int32 type.\n +* @li offsets: A mutable word index Tensor of the int32 type.\n +* @li per_sample_weights: to indicate all weights should be taken to be 1. +* If specified, per_sample_weights must have exactly the same shape as input +* and is treated as having the same offsets, if those are not None. +* Only supported for mode='sum'..\n + +* @par Attributes: +* @li mode: An string attr which use "sum"``, ``"mean"`` or ``"max"``. Specifies the way to reduce the bag.. \n + +* @li scale_grad_by_freq: An optional bool. Defaults to "False". +* If "True", "grad_weight" will be scale by word_frequency. +* If "False", "grad_weight" will not be scale by word_frequency. \n +* @li sparse: if True, gradient w.r.t.attr weight matrix will be a sparse tensor. \n +* @li include_last_offset: if True, attr offsets has one additional element, where the last element +* is equivalent to the size of indices. This matches the CSR format.. \n + +* @par Outputs: +* @li grad_weight: A mutable output Tensor of new word grad has the same type as "grads". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator EmbeddingBag. +*/ +REG_OP(EmbeddingBag) + .INPUT(weight, TensorType({ DT_FLOAT32 })) + .INPUT(indices, TensorType({ DT_INT32 })) + .OPTIONAL_INPUT(offsets, TensorType({DT_INT32})) + .OPTIONAL_INPUT(per_sample_weights, TensorType({DT_FLOAT32})) + .OUTPUT(y, TensorType({ DT_FLOAT32 })) + .ATTR(mode, String, "mean") + .ATTR(scale_grad_by_freq, Bool, false) + .ATTR(sparse, Bool, false) + .ATTR(include_last_offset, Bool, false) + .OP_END_FACTORY_REG(EmbeddingBag) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_RNN_H_ diff --git a/third_party/fwkacllib/inc/ops/rpn_ops.h b/third_party/fwkacllib/inc/ops/rpn_ops.h index b7649a44..089af326 100644 --- a/third_party/fwkacllib/inc/ops/rpn_ops.h +++ b/third_party/fwkacllib/inc/ops/rpn_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/save_ops.h b/third_party/fwkacllib/inc/ops/save_ops.h index 0ce473b7..5ce6c2e0 100644 --- a/third_party/fwkacllib/inc/ops/save_ops.h +++ b/third_party/fwkacllib/inc/ops/save_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/sdca_ops.h b/third_party/fwkacllib/inc/ops/sdca_ops.h index cbd9839d..34c6a268 100644 --- a/third_party/fwkacllib/inc/ops/sdca_ops.h +++ b/third_party/fwkacllib/inc/ops/sdca_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/selection_ops.h b/third_party/fwkacllib/inc/ops/selection_ops.h index 2c99e82e..1c26e033 100644 --- a/third_party/fwkacllib/inc/ops/selection_ops.h +++ b/third_party/fwkacllib/inc/ops/selection_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -240,6 +240,30 @@ REG_OP(GatherV2D) .OP_END_FACTORY_REG(GatherV2D) /** +*@Gathers values along an axis specified by dim . \n + +*@par Inputs: +*@li x: A Tensor. Must be one of the following types: float16, float32, int32, int64. +*@li index: A Tensor. Must be one of the following types: int64 . \n + +*@par Attributes: +* dim: the axis along which to index . \n + +*@par Outputs: +* y: A Tensor. Has the same type as "x" . \n + +*@par Third-party framework compatibility +*Compatible with the PyTorch operator Gather. +*/ + +REG_OP(GatherElements) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT64})) + .INPUT(index, TensorType({DT_INT64})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT64})) + .ATTR(dim, Int, 0) + .OP_END_FACTORY_REG(GatherElements) + +/** *@brief Extracts a strided slice of a tensor. Roughly speaking, this op extracts a slice of size (end-begin)/stride from the given input tensor. Starting at the location specified by begin the slice continues by @@ -275,8 +299,6 @@ REG_OP(GatherV2D) *@par Outputs: *y: A Tensor. Has the same type as "x" . \n -*@attention Constraints: - *@par Third-party framework compatibility * Compatible with the TensorFlow operator StridedSlice. */ @@ -327,8 +349,6 @@ REG_OP(StridedSlice) *@par Outputs: *y: A Tensor. Has the same type as "x" . \n -*@attention Constraints: - *@par Third-party framework compatibility * Compatible with the TensorFlow operator StridedSlice. @@ -385,8 +405,6 @@ REG_OP(StridedSliceD) *@par Outputs: *output: A Tensor. Has the same type as "dy" . \n -*@attention Constraints: - *@par Third-party framework compatibility * Compatible with the TensorFlow operator StridedSliceGradD. @@ -444,8 +462,6 @@ REG_OP(StridedSliceGradD) *@par Outputs: *output: A Tensor has the same type as "dy" . \n -*@attention Constraints: - *@par Third-party framework compatibility * Compatible with the TensorFlow operator StridedSliceGrad. */ @@ -487,6 +503,38 @@ REG_OP(UnsortedSegmentSum) .OP_END_FACTORY_REG(UnsortedSegmentSum) /** +*@brief Creates a one-dimensional tensor of size steps whose values are evenly spaced from start to +* end, inclusive, on a logarithmic scale with base base. \n + +*@par Inputs: +*One inputs, including: +* @li assist: A tensor. Must be one of the following types: +* float16, float32. \n + +* @par Attributes: +* @li start: An required float. Used to select the start. \n +* @li end: An required float. Used to select the end. \n +* @li steps: An optional int.Defaults to 100. \n +* @li base: An optional float.Defaults to 10.0. \n +* @li dtype: An optional int.Defaults to 1. \n + +*@par Outputs: +*y: A Tensor with the same type and shape of input_x's. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator logspaced. \n +*/ +REG_OP(LogSpaceD) + .INPUT(assist, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) + .REQUIRED_ATTR (start, Float) + .REQUIRED_ATTR (end, Float) + .ATTR(steps, Int, 100) + .ATTR(base, Float, 10.0) + .ATTR(dtype, Int, 1) + .OP_END_FACTORY_REG(LogSpaceD) + +/** *@brief Computes the sum along segments of a tensor . \n *@par Inputs: @@ -797,6 +845,34 @@ REG_OP(SliceD) .OP_END_FACTORY_REG(SliceD) /** +*@brief Extracts a slice from a tensor. +* This operation extracts a slice of size "size" from a tensor "x" +* starting at the location specified by "begin" . \n + +*@par Inputs: +*@li x: A Tensor. Must be one of the following types: +* float16, float32, double, int64, int32, uint8, uint16, uint32, uint64, int8, +* int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32 . \n + +*@par Inputs: +*@li offsets: The starting location for the slice. + +*@par Attributes: +*@li size: The tensor shape . \n + +*@par Outputs: +*y: A Tensor. Has the same type as "x". The slice extracted from the tensor. +*@par Restrictions: +*Warning: THIS FUNCTION IS DEPRECATED. Please use Slice instead. +*/ +REG_OP(SliceDV2) + .INPUT(x, TensorType::BasicType()) + .INPUT(offsets, TensorType::IndexNumberType()) + .OUTPUT(y, TensorType::BasicType()) + .REQUIRED_ATTR(size, ListInt) + .OP_END_FACTORY_REG(SliceDV2) + +/** * @brief Finds values and indices of the "k" largest elements for the last * dimension . \n @@ -829,8 +905,8 @@ REG_OP(SliceD) * @li sorted = true * @li It's unstable sorted indices on the platform of Ascend310 -* @par Third-party framework compatibility -* @li Compatible with the TensorFlow operator TopK. +* @par Restrictions: +* Warning: THIS FUNCTION IS DEPRECATED. Please use TopKV2 instead. */ REG_OP(TopKD) .INPUT(x, TensorType::RealNumberType()) @@ -859,6 +935,44 @@ REG_OP(TopKD) * @li sorted: An optional bool. Defaults to true. * If true, the resulting "k" elements will be sorted by the values in descending * order. +* @li dim: An optional int. Defaults to -1. For reserved use. +* @li largest: An optional bool. Defaults to true. For reserved use. \n + +* @par Outputs: +* @li values: A Tensor, specifying the sorted data. Has the same type as +* "input". +* @li indices: A Tensor of type int32, specifying the indices of sorted data . \n + +* @see TopK() +* @par Third-party framework compatibility +* @li Compatible with the TensorFlow operator TopKV2. +*/ +REG_OP(TopKV2) + .INPUT(x, TensorType::RealNumberType()) + .INPUT(k, TensorType({DT_INT32})) + .OUTPUT(values, TensorType::RealNumberType()) + .OUTPUT(indices, TensorType({DT_INT32})) + .ATTR(sorted, Bool, true) + .ATTR(dim, Int, -1) + .ATTR(largest, Bool, true) + .OP_END_FACTORY_REG(TopKV2) + +/** +* @brief Finds values and indices of the "k" largest elements for the last +* dimension . \n + +* @par Inputs: +* Two inputs, including: +* @li x: A 1D or higher tensor of type BasicType, with the last dimension +* at least "k". +* @li k: A 0D Tensor of type int32. +* Number of top elements to look for along the last dimension (along each row +* for matrices) . \n + +* @par Attributes: +* @li sorted: An optional bool. Defaults to true. +* If true, the resulting "k" elements will be sorted by the values in descending +* order. * @li T: Indicator of indices type . \n * @par Outputs: @@ -876,15 +990,17 @@ REG_OP(TopK) .OUTPUT(values, TensorType::RealNumberType()) .OUTPUT(indices, TensorType({DT_INT32})) .ATTR(sorted, Bool, true) + .ATTR(largest, Bool, true) + .ATTR(dim, Int, -1) .OP_END_FACTORY_REG(TopK) /** *@brief Creates a new tensor by applying sparse "updates" to individual values or slices within a tensor (initially zero for numeric, empty for string) of the given "shape" according to "indices" . \n *@par Inputs: *Inputs including: -* @li indices: A required index tensor. Must be one of the following types: float32, float16, int32, int8, uint8. -* @li x: A required slice tensor. Must be one of the following types: float32, float16, int32, int8, uint8. -* @li shape: A required list of int32, specifying the output shape. +* @li indices: A required index tensor. Must be one of the following types: int32 or int64. +* @li x: A required slice tensor. Must be one of the following types: float32, float16, int32, int8, uint8... +* @li shape: A required list of int32 or int64, specifying the output shape. *@par Outputs: *y:A output Tensor with same datatype as "updates" . \n @@ -895,7 +1011,7 @@ REG_OP(TopK) * Compatible with the TensorFlow operator ScatterNd. */ REG_OP(ScatterNd) - .INPUT(indices, TensorType::BasicType()) + .INPUT(indices, TensorType::IndexNumberType()) .INPUT(x, TensorType::BasicType()) .INPUT(shape, TensorType::IndexNumberType()) .OUTPUT(y, TensorType::BasicType()) @@ -908,11 +1024,11 @@ REG_OP(ScatterNd) *@par Inputs: *Inputs including: * @li indices: A required index tensor. Must be one of the following types: - * float, float16, int32, int16. format:ND. + * int32 or int64. format:ND. * @li x: A required slice tensor. Must be one of the following types: - * float, float16, int32, int16. format:ND. + * float16, float, int32, int8, uint8. format:ND. *@par Attributes: -* @li shape: A required list of int32, specifying the output shape. +* @li shape: A required list of int32 or int64, specifying the output shape. *@par Outputs: *y: A Tensor. Has the same type as "x". format:ND . \n @@ -927,8 +1043,8 @@ REG_OP(ScatterNd) */ REG_OP(ScatterNdD) .INPUT(indices, TensorType::IndexNumberType()) - .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT16})) - .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT16})) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8})) .REQUIRED_ATTR(shape, ListInt) .OP_END_FACTORY_REG(ScatterNdD) @@ -1753,6 +1869,33 @@ REG_OP(Crop) .OP_END_FACTORY_REG(Crop) /** +*@brief Returns a namedtuple (values, indices) where values is the cumulative +* the cumulative minimum of elements of input in the dimension dim. +* And indices is the index location of each maximum value found in the dimension dim. \n + +*@par Inputs: +*One inputs, including: +* @li x: A tensor . Must be one of the following types: +* float16, float32, int32, uint32, int8, uint8. \n + +*@par Attributes: +* @li axis: Axis along which to cummin. \n + +*@par Outputs: +* y: A Tensor with the same type and shape of x's. \n +* indices: A Tensor with the int32 type and the same shape of x's. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator Cummin. \n +*/ +REG_OP(Cummin) + .INPUT(x, TensorType::BasicType()) + .OUTPUT(y, TensorType::BasicType()) + .OUTPUT(indices, TensorType::BasicType()) + .REQUIRED_ATTR(axis, Int) + .OP_END_FACTORY_REG(Cummin) + +/** *@brief Extends the input with copies of data along a specified dimension. For example: *(1) If x = [[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [11, 12]]], with shape (2, 3, 2); *(2) axis = 1; @@ -1921,6 +2064,249 @@ REG_OP(CumulativeLogsumexpD) .ATTR(exclusive, Bool, false) .ATTR(reverse, Bool, false) .OP_END_FACTORY_REG(CumulativeLogsumexpD) + +/** +* @brief Add updates to var according to axis and indices. + +* @par Inputs: +* Three inputs, including: +* @li var: A Tensor. Must be one of the following types: +* float16, float32, int16, int32, int8, uint8. +* @li indices: A Tensor of the indices, type should be int32. +* @li updates: A Tensor of the same type as "var". \n + +* @par Attributes: +* @li axis: An required int to specify the axis to perform indices add. \n + +* @par Outputs: +* @li var: A Tensor. Same as input "var". + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator index_add_. +*/ +REG_OP(InplaceIndexAdd) + .INPUT(var, TensorType({DT_INT16, DT_INT32, DT_INT8, + DT_UINT8, DT_FLOAT32, DT_FLOAT16})) + .INPUT(indices, TensorType({DT_INT32})) + .INPUT(updates, TensorType({DT_INT16, DT_INT32, DT_INT8, + DT_UINT8, DT_FLOAT32, DT_FLOAT16})) + .OUTPUT(var, TensorType({DT_INT16, DT_INT32, DT_INT8, + DT_UINT8, DT_FLOAT32, DT_FLOAT16})) + .REQUIRED_ATTR(axis, Int) + .OP_END_FACTORY_REG(InplaceIndexAdd) + +/** +* @brief Replace the value of X with value according to mask. +* @par Inputs: +* three inputs, including: +* @li x: A Tensor of dtype is float16 or float32 or int64 or int32 or int8. +* @li mask: A Tensor of dtype bool. +* @li value: A Tensor of dtype float16 or float32 or int64 or int32 or int8. + +* @par Outputs: +* @li y: A tensor. Must be one of the following dtypes: +* float16, float32, int64, int32, int8. +*/ +REG_OP(MaskedFill) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT32, DT_INT64})) + .INPUT(mask, TensorType({DT_BOOL})) + .INPUT(value, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT32, DT_INT64})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT32, DT_INT64})) + .OP_END_FACTORY_REG(MaskedFill) + +/** +* @brief Choose the value of X with value according to mask. + +* @par Inputs: +* two inputs, including: +* @li x: A Tensor of dtype is float16 or float32. +* @li mask: A Tensor of dtype is bool. \n + +* @par Outputs: +* @li y: A tensor with the same type as x. \n + +* @par Third-party framework compatibility +* Compatible with the Numpy operator select. +* Replaces the pytorch operator masked_select in some scenarios.\n +*/ +REG_OP(MaskedSelectV2) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(mask, TensorType({DT_BOOL})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .OP_END_FACTORY_REG(MaskedSelectV2) + +/** +* @brief Slice a tensor at its last dim, e.x. a[..., begin:end:stride]. \n + +* @par Inputs: +* One inputs, including: +* @li x: A Tensor. Must be one of the following types: float16, float32, int16, int32. + +* @par Attributes: +* @li start: An attribute of type Int, start index of last dim. \n +* @li end: An attribute of type Int, end index of last dim. \n +* @li stride: An attribute of type Int, stride of slice. \n + +* @par Outputs: +* @li y: A Tensor. Has the same type as "x". \n + +* @par Third-party framework compatibility +* No compatibility +*/ +REG_OP(SliceLastDim) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT8, DT_INT16, DT_INT32, DT_INT64})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT8, DT_INT16, DT_INT32, DT_INT64})) + .REQUIRED_ATTR(start, Int) + .REQUIRED_ATTR(end, Int) + .ATTR(stride, Int, 1) + .OP_END_FACTORY_REG(SliceLastDim) + +/** +* @brief Extracts a strided slice of a tensor. Roughly speaking, this op \n +* extracts a slice of size (end-begin)/stride from the given input tensor. \n +* Starting at the location specified by begin the slice continues by \n +* adding stride to the index until all dimensions are not less than end. \n +* +* @par Inputs: +* Four inputs, including: +* @li x: A Tensor. Must be one of the following types: float32, float64, int32, uint8, int16, int8, \n +* complex64, int64, qint8, quint8, qint32, qint16, quint16, uint16, \n +* complex128, float16, uint32, uint64, complex64, complex128. \n +* @li begin: A Tensor of type int32 or int64, for the index of the first value to select. +* +* @li end: A Tensor of type int32 or int64, for the index of the last value to select. +* +* @li axes: A Tensor of type int32 or int64, indicate axis to be select. +* +* @li strides: A Tensor of type int32 or int64, for the increment. +* +* @par Attributes: +* @li begin_mask: A Tensor of type int32. \n +* A bitmask where a bit "i" being "1" means to ignore the begin \n +* value and instead use the largest interval possible. +* @li end_mask: A Tensor of type int32. \n +* Analogous to "begin_mask". +* @li ellipsis_mask: A Tensor of type int32. \n +* A bitmask where bit "i" being "1" means the "i"th position \n +* is actually an ellipsis. +* @li new_axis_mask: A Tensor of type int32. \n +* A bitmask where bit "i" being "1" means the "i"th \n +* specification creates a new shape 1 dimension. +* @li shrink_axis_mask: A Tensor of type int32. \n +* A bitmask where bit "i" implies that the "i"th \n +* specification should shrink the dimensionality. +* +* @par Outputs: +* y: A Tensor. Has the same type as "x". +* +* @attention Constraints: +* +* @par Third-party framework compatibility +* Compatible with the TensorFlow operator StridedSliceV2. +*/ +REG_OP(StridedSliceV2) + .INPUT(x, TensorType::BasicType()) + .INPUT(begin, TensorType::IndexNumberType()) + .INPUT(end, TensorType::IndexNumberType()) + .OPTIONAL_INPUT(axes, TensorType::IndexNumberType()) + .OPTIONAL_INPUT(strides, TensorType::IndexNumberType()) + .ATTR(begin_mask, Int, 0) + .ATTR(end_mask, Int, 0) + .ATTR(ellipsis_mask, Int, 0) + .ATTR(new_axis_mask, Int, 0) + .ATTR(shrink_axis_mask, Int, 0) + .OUTPUT(y, TensorType::BasicType()) + .OP_END_FACTORY_REG(StridedSliceV2) + +/** +*@brief Fills the elements of the input tensor with value val by selecting the indices in the order given in index. \n + +*@par Inputs: +*Three inputs, including: +* @li x: A tensor. Must be one of the following types: +* float16, float32, int32. \n +*@li assist1: A tensor. Must be one of the following types: +* float16, float32, int32. \n +*@li assist2: A tensor. Must be one of the following types: +* float16, float32, int32. \n + +* @par Attributes: +* @li dim: A required int. Used to select the dimension of this tensor. \n + +*@par Outputs: +*y: A Tensor with the same type and shape of input_x's. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator IndexFill. \n +*/ +REG_OP(IndexFillD) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .INPUT(assist1, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .INPUT(assist2, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .REQUIRED_ATTR(dim, Int) + .OP_END_FACTORY_REG(IndexFillD) + +/** +* @brief For each row r of this and for each column c, do (*this)(r, c) += src(j, c), \n +* where j ranges from indexes[r].first through indexes[r].second - 1. \n +* In general indexes must be >= 0 and < src.NumRows(); \n +* but to represent an empty range you may use the pair (-1, -1) or any pair of numbers (i, j) such that i >= j. \n + +* @par Inputs: +* Three inputs, including: +* @li x: A Tensor. Must be one of the following types: +* float16, float32. +* @li indices: A Tensor of the indices, type should be int32. +* @li src: A Tensor of the same type as "x". \n + +* @par Outputs: +* @li x: A Tensor. Same as input "x". + +* @par Third-party framework compatibility +* Compatible with the kaldi operator AddRowRanges. +*/ +REG_OP(AddRowRanges) + .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(src, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(indices, TensorType({DT_INT32})) + .OUTPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) + .OP_END_FACTORY_REG(AddRowRanges) + +/** +*@brief masked fill tensor along with one axis by range. +* boxes. It is a customized masked fill range operator . \n + +*@par Inputs: +* Four inputs, including: +*@li x: input tensor. A ND Tensor of float32/float16/int32/int8 with shapes +* 1-D (D,), 2-D(N, D), 3-D(N, C, D) +*@li start: masked fill start pos. A 3D Tensor of int32 with +* shape (num, N). "num" indicates the number of loop masked fill, and the value N +* indicates the batch of ND Tensor, if input x shape is 1-D, N = 1. \n +*@li end: masked fill end pos. A 3D Tensor of int32 with +* shape (num, N). "num" indicates the number of loop masked fill, and the value N +* indicates the batch of ND Tensor. \n +*@li value: masked fill value. A 2D Tensor of float32/float16/int32/int8 with +* shape (num,). "num" indicates the number of loop masked fill + +*@par Attributes: +*@li axis: axis with masked fill of int32. Defaults to -1. + +*@par Outputs: +*y: A ND Tensor of float32/float16/int32/int8 with shapes 1-D (D,), 2-D(N, D), 3-D(N, C, D) + +* @par Restrictions: +* Warning: input shape's length must not be bigger than 1024 * 1024 * 1024. +*/ +REG_OP(MaskedFillRange) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32})) + .INPUT(start, TensorType({DT_INT32})) + .INPUT(end, TensorType({DT_INT32})) + .INPUT(value, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32})) + .REQUIRED_ATTR(axis, Int) + .OP_END_FACTORY_REG(MaskedFillRange) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_SELECTION_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/set_ops.h b/third_party/fwkacllib/inc/ops/set_ops.h index 1d02fa15..04e04f1b 100644 --- a/third_party/fwkacllib/inc/ops/set_ops.h +++ b/third_party/fwkacllib/inc/ops/set_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/sparse_ops.h b/third_party/fwkacllib/inc/ops/sparse_ops.h index d7512790..a1fc9ee6 100644 --- a/third_party/fwkacllib/inc/ops/sparse_ops.h +++ b/third_party/fwkacllib/inc/ops/sparse_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -383,11 +383,11 @@ REG_OP(SparseFillEmptyRowsGrad) REG_OP(SparseTensorDenseMatMul) .INPUT(x1_indices, TensorType({DT_INT32, DT_INT64})) .INPUT(x1_values, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, \ - DT_COMPLEXT64, DT_COMPLEX128, DT_FLOAT16})) + DT_COMPLEXT64, DT_COMPLEX128, DT_FLOAT16, DT_INT64})) .INPUT(x1_shape, TensorType({DT_INT64})) - .INPUT(x2, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, DT_COMPLEXT64, \ + .INPUT(x2, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_COMPLEXT64, \ DT_COMPLEX128, DT_FLOAT16})) - .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, DT_COMPLEXT64, \ + .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_COMPLEXT64, \ DT_COMPLEX128, DT_FLOAT16})) .ATTR(adjoint_a, Bool, false) .ATTR(adjoint_b, Bool, false) diff --git a/third_party/fwkacllib/inc/ops/spectral_ops.h b/third_party/fwkacllib/inc/ops/spectral_ops.h index 64fa7814..34ccb398 100644 --- a/third_party/fwkacllib/inc/ops/spectral_ops.h +++ b/third_party/fwkacllib/inc/ops/spectral_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,6 +27,24 @@ namespace ge { /** +*@brief Computes the inverse 1-dimensional discrete Fourier transform over the +inner-most dimension of `x`. \n + +*@par Inputs: +*@li x: A Tensor. Must be the following types: complex64, complex128. \n + +*@par Outputs: +*@li y: A complex tensor of the same rank as `x`. \n + +*@par Third-party framework compatibility +* Compatible with TensorFlow IFFT operator. +*/ +REG_OP(IFFT) + .INPUT(x, TensorType({DT_COMPLEX64,DT_COMPLEX128})) + .OUTPUT(y, TensorType({DT_COMPLEX64,DT_COMPLEX128})) + .OP_END_FACTORY_REG(IFFT) + +/** *@brief Real-valued fast Fourier transform . \n *@par Inputs: @@ -47,6 +65,84 @@ REG_OP(RFFT) .OUTPUT(y, TensorType({DT_COMPLEX64})) .OP_END_FACTORY_REG(RFFT) +/** +*@brief Inverse real-valued fast Fourier transform. \n + +*@par Inputs: +*@li x: A complex64 tensor. +*@li fft_length: An int32 tensor of shape [1]. The FFT length. \n + +*@par Outputs: +*@li y: A float32 tensor of the same rank as `input`. The inner-most + dimension of `input` is replaced with the `fft_length` samples of its inverse + 1D Fourier transform. \n + +*@par Third-party framework compatibility +* Compatible with TensorFlow IRFFT operator. +*/ +REG_OP(IRFFT) + .INPUT(x, TensorType({DT_COMPLEX64})) + .INPUT(fft_length, TensorType({DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT})) + .OP_END_FACTORY_REG(IRFFT) + + +/** +*@brief 2D fast Fourier transform. \n + +*@par Inputs: +*@li x: A complex64 tensor. + +*@par Outputs: +*@li y: A complex64 tensor of the same shape as `input`. The inner-most 2 + dimensions of `input` are replaced with their 2D Fourier transform. \n + +*@par Third-party framework compatibility +* Compatible with TensorFlow FFT2D operator. +*/ +REG_OP(FFT2D) + .INPUT(x, TensorType({DT_COMPLEX64, DT_COMPLEX128})) + .OUTPUT(y, TensorType({DT_COMPLEX64, DT_COMPLEX128})) + .OP_END_FACTORY_REG(FFT2D) + +/** +*@brief Calculate the one-dimensional discrete Fourier transform on the +innermost dimension of the input. \n + +*@par Inputs: +*@li x: A Tensor. Must be the following types: complex64, complex128. \n + +*@par Outputs: +*@li y: A complex tensor with the same shape as input. The innermost dimension +of the input is replaced by its 1-dimensional Fourier transform. \n + +*@par Third-party framework compatibility +* Compatible with TensorFlow FFT operator. +*/ +REG_OP(FFT) + .INPUT(x, TensorType({DT_COMPLEX64,DT_COMPLEX128})) + .OUTPUT(y, TensorType({DT_COMPLEX64,DT_COMPLEX128})) + .OP_END_FACTORY_REG(FFT) + +/** +*@brief Calculate the inverse 1-dimensional discrete Fourier transform on the +innermost dimension of the input. \n + +*@par Inputs: +*@li x: A Tensor. Must be the following types: complex64, complex128. \n + +*@par Outputs: +*@li y: A complex tensor with the same shape as input. The innermost dimension +of the input is replaced by its inverse two-dimensional Fourier transform. \n + +*@par Third-party framework compatibility +* Compatible with TensorFlow IFFT2D operator. +*/ +REG_OP(IFFT2D) + .INPUT(x, TensorType({DT_COMPLEX64,DT_COMPLEX128})) + .OUTPUT(y, TensorType({DT_COMPLEX64,DT_COMPLEX128})) + .OP_END_FACTORY_REG(IFFT2D) + } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_SPECTRAL_OPS_H_ \ No newline at end of file diff --git a/third_party/fwkacllib/inc/ops/split_combination_ops.h b/third_party/fwkacllib/inc/ops/split_combination_ops.h index efe4715d..fe25a46f 100644 --- a/third_party/fwkacllib/inc/ops/split_combination_ops.h +++ b/third_party/fwkacllib/inc/ops/split_combination_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -62,8 +62,8 @@ REG_OP(Split) *Must be one of the following types: float16, float32, int32, int8, int16, int64, uint8, uint16, uint32, uint64 *@par Attributes: -*@li split_dim: A required int8, int16, int32, or int64. Specifies the dimension along which to split. No default value. -*@li num_split: A required int8, int16, int32, or int64. Specifies the number of output tensors. No default value . \n +*@li split_dim: A required int32. Specifies the dimension along which to split. No default value. +*@li num_split: A required int32. Specifies the number of output tensors. No default value . \n *@par Outputs: *y:Dynamic output. A list of output tensors. Has the same type and format as "x" . \n @@ -94,12 +94,12 @@ REG_OP(SplitD) *@par Inputs: * Three inputs, including: *@li x: An ND Tensor. -*Must be one of the following types: -*@li size_splits: A list of int8, int16, int32, or int64. Specifies a list containing the sizes of each output tensor along the split dimension. -*@li split_dim: An int8, int16, int32, or int64. Specifies the dimension along which to split . \n +*Must be one of the types:float16, float32, double, int64, int32, uint8, uint16, uint32, uint64, int8, int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32. +*@li size_splits: Must be one of the types:int32, int64. Specifies a list containing the sizes of each output tensor along the split dimension. +*@li split_dim: Must be the following type:int32. Specifies the dimension along which to split . \n *@par Attributes: -*num_split: A required int8, int16, int32, or int64. Specifies the number of output tensors. No default value . \n +*num_split: A required int32. Specifies the number of output tensors. No default value . \n *@par Outputs: *y: Dynamic output.A list of output tensors. Has the same type and format as "x" . \n @@ -129,9 +129,9 @@ REG_OP(SplitV) *Must be one of the following types: float16, float32, int32, int8, int16, int64, uint8, uint16, uint32, uint64 *@par Attributes: -*@li size_splits: A required list of int8, int16, int32, or int64. Specifies a list containing the sizes of each output tensor along the split dimension. -*@li split_dim: A required int8, int16, int32, or int64. Specifies the dimension along which to split. No default value. -*@li num_split: A required int8, int16, int32, or int64. Specifies the number of output tensors. No default value . \n +*@li size_splits: A required list of int32. Specifies a list containing the sizes of each output tensor along the split dimension. +*@li split_dim: A required int32. Specifies the dimension along which to split. No default value. +*@li num_split: A required int32. Specifies the number of output tensors. No default value . \n *@par Outputs: *y: Dynamic output.A list of output tensors. Has the same type and format as "x" . \n @@ -317,15 +317,15 @@ REG_OP(Concat) * int64, uint8, uint16, uint32, uint64, float16, float32, bool . It's a dynamic input. \n *@par Attributes: -*@li axis: A optional int, defaultvalue is 0. +*@li axis: A optional int, default value is 0. * Dimension along which to pack. The range is [-(R+1), R+1). *@li N: A required int. Number of tensors . \n *@par Outputs: *y: A Tensor. Has the same type as "x". + *@par Third-party framework compatibility -*Compatible with the TensorFlow operator Pack. -It's a dynamic output. +* Compatible with the TensorFlow operator Pack. */ REG_OP(Pack) .DYNAMIC_INPUT(x, TensorType::BasicType()) diff --git a/third_party/fwkacllib/inc/ops/state_ops.h b/third_party/fwkacllib/inc/ops/state_ops.h index db1f5353..3c8e32b6 100644 --- a/third_party/fwkacllib/inc/ops/state_ops.h +++ b/third_party/fwkacllib/inc/ops/state_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/stateful_random_ops.h b/third_party/fwkacllib/inc/ops/stateful_random_ops.h index 366112d6..c2f65c6a 100644 --- a/third_party/fwkacllib/inc/ops/stateful_random_ops.h +++ b/third_party/fwkacllib/inc/ops/stateful_random_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/stateless_random_ops.h b/third_party/fwkacllib/inc/ops/stateless_random_ops.h index dad3c379..ff9daaa3 100644 --- a/third_party/fwkacllib/inc/ops/stateless_random_ops.h +++ b/third_party/fwkacllib/inc/ops/stateless_random_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/string_ops.h b/third_party/fwkacllib/inc/ops/string_ops.h index 4a88bc79..f9cc2549 100644 --- a/third_party/fwkacllib/inc/ops/string_ops.h +++ b/third_party/fwkacllib/inc/ops/string_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,6 +25,235 @@ #include "graph/operator_reg.h" namespace ge { +/** +*@brief Creates ngrams from ragged string data . \n + +*@par Inputs: +include: +*@li data:1-D.The values tensor of the ragged string tensor to make ngrams out of. +*@li data_splits:The splits tensor of the ragged string tensor to make ngrams out of . \n + +*@par Attributes: +* separator:The string to append between elements of the token. Use "" for no separator. +* ngram_widths:The sizes of the ngrams to create. +* left_pad:The string to use to pad the left side of the ngram sequence. Only used if pad_width != 0. +* right_pad:The string to use to pad the right side of the ngram sequence. Only used if pad_width != 0. +* pad_width:The number of padding elements to add to each side of each sequence. +* preserve_short_sequences: Preserve short sequences. \n + +*@par Outputs: +*@li ngrams:The values tensor of the output ngrams ragged tensor. +*@li ngrams_splits:The splits tensor of the output ngrams ragged tensor. \n + +*@see StringNGrams() + +*@par Third-party framework compatibility +*compatible with StringNGrams op of tensorflow + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(StringNGrams) + .INPUT(data, TensorType({DT_STRING})) + .INPUT(data_splits, TensorType({DT_INT32, DT_INT64})) + .OUTPUT(ngrams, TensorType({DT_STRING})) + .OUTPUT(ngrams_splits, TensorType({DT_INT32, DT_INT64})) + .REQUIRED_ATTR(separator, String) + .ATTR(ngram_widths, ListInt, {}) + .REQUIRED_ATTR(left_pad, String) + .REQUIRED_ATTR(right_pad, String) + .REQUIRED_ATTR(pad_width, Int) + .REQUIRED_ATTR(preserve_short_sequences, Bool) + .OP_END_FACTORY_REG(StringNGrams) + +/** +*@brief Decodes each string in `input` into a sequence of Unicode code points . \n + +*@par Inputs: +include: +*@li input:The text to be decoded. Can have any shape. Note that the output is flattened +to a vector of char values. \n + +*@par Attributes: +* input_encoding:Text encoding of the input strings. This is any of the encodings supported +by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`. +* errors:Error handling policy when there is invalid formatting found in the input. +The value of 'strict' will cause the operation to produce a InvalidArgument +error on any invalid input formatting. A value of 'replace' (the default) will +cause the operation to replace any invalid formatting in the input with the +`replacement_char` codepoint. A value of 'ignore' will cause the operation to +skip any invalid formatting in the input and produce no corresponding output +character. +* replacement_char:The replacement character codepoint to be used in place of any invalid +formatting in the input when `errors='replace'`. Any valid unicode codepoint may +be used. The default value is the default unicode replacement character is +0xFFFD or U+65533. +* replace_control_characters:Whether to replace the C0 control characters (00-1F) with the +`replacement_char`. Default is false. \n + +*@par Outputs: +*@li row_splits:A 1D tensor containing the row splits. +*@li char_values:A 1D tensor containing the decoded codepoints. +*@li char_to_byte_starts:A 1D int32 Tensor containing the byte index in the input string where each +character in `char_values` starts. \n + +*@see UnicodeDecodeWithOffsets() + +*@par Third-party framework compatibility +*compatible with UnicodeDecodeWithOffsets op of tensorflow + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(UnicodeDecodeWithOffsets) + .INPUT(input, TensorType({DT_STRING})) + .OUTPUT(row_splits, TensorType({DT_INT64})) + .OUTPUT(char_values, TensorType({DT_INT32})) + .OUTPUT(char_to_byte_starts, TensorType({DT_INT64})) + .REQUIRED_ATTR(input_encoding, String) + .ATTR(errors, String, "replace") + .ATTR(replacement_char, Int, 65533) + .ATTR(replace_control_characters, Bool, false) + .ATTR(Tsplits, Type, DT_INT64) + .OP_END_FACTORY_REG(UnicodeDecodeWithOffsets) + +/** +*@brief Decodes each string in `input` into a sequence of Unicode code points. \n + +*@par Inputs: +include: +*@li input:The text to be decoded. Can have any shape. Note that the output is flattened +to a vector of char values. \n + +*@par Attributes: +* input_encoding:Text encoding of the input strings. This is any of the encodings supported +by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`. +* errors:Error handling policy when there is invalid formatting found in the input. +The value of 'strict' will cause the operation to produce a InvalidArgument +error on any invalid input formatting. A value of 'replace' (the default) will +cause the operation to replace any invalid formatting in the input with the +`replacement_char` codepoint. A value of 'ignore' will cause the operation to +skip any invalid formatting in the input and produce no corresponding output +character. +* replacement_char:The replacement character codepoint to be used in place of any invalid +formatting in the input when `errors='replace'`. Any valid unicode codepoint may +be used. The default value is the default unicode replacement character is +0xFFFD or U+65533. +* replace_control_characters:Whether to replace the C0 control characters (00-1F) with the +`replacement_char`. Default is false. \n + +*@par Outputs: +*@li row_splits:A 1D tensor containing the row splits. +*@li char_values:A 1D tensor containing the decoded codepoints. \n + +*@see UnicodeDecode() + +*@par Third-party framework compatibility +*compatible with UnicodeDecode op of tensorflow + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(UnicodeDecode) + .INPUT(input, TensorType({DT_STRING})) + .OUTPUT(row_splits, TensorType({DT_INT64})) + .OUTPUT(char_values, TensorType({DT_INT32})) + .REQUIRED_ATTR(input_encoding, String) + .ATTR(errors, String, "replace") + .ATTR(replacement_char, Int, 65533) + .ATTR(replace_control_characters, Bool, false) + .ATTR(Tsplits, Type, DT_INT64) + .OP_END_FACTORY_REG(UnicodeDecode) + +/** +*@brief Transcode the input text from a source encoding to a destination encoding. \n + +*@par Inputs: +include: +*@li input:The text to be processed. Can have any shape. \n + +*@par Attributes: +* input_encoding:Text encoding of the input strings. This is any of the encodings supported +by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`. +* output_encoding:The unicode encoding to use in the output. Must be one of `"UTF-8", "UTF-16-BE", "UTF-32-BE"`. +Multi-byte encodings will be big-endian. +* errors:Error handling policy when there is invalid formatting found in the input. +The value of 'strict' will cause the operation to produce a InvalidArgument +error on any invalid input formatting. A value of 'replace' (the default) will +cause the operation to replace any invalid formatting in the input with the +`replacement_char` codepoint. A value of 'ignore' will cause the operation to +skip any invalid formatting in the input and produce no corresponding output +character. +* replacement_char:The replacement character codepoint to be used in place of any invalid +formatting in the input when `errors='replace'`. Any valid unicode codepoint may +be used. The default value is the default unicode replacement character is +0xFFFD or U+65533. +* replace_control_characters:Whether to replace the C0 control characters (00-1F) with the +`replacement_char`. Default is false. \n + +*@par Outputs: +*@li output:A string tensor containing unicode text encoded using `output_encoding`. \n + +*@see UnicodeTranscode() + +*@par Third-party framework compatibility +*compatible with UnicodeTranscode op of tensorflow + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(UnicodeTranscode) + .INPUT(input, TensorType({DT_STRING})) + .OUTPUT(output, TensorType({DT_STRING})) + .REQUIRED_ATTR(input_encoding, String) + .ATTR(output_encoding, String, "UTF-8") + .ATTR(errors, String, "replace") + .ATTR(replacement_char, Int, 65533) + .ATTR(replace_control_characters, Bool, false) + .OP_END_FACTORY_REG(UnicodeTranscode) + +/** +*@brief Encode a tensor of ints into unicode strings. \n + +*@par Inputs: +include: +*@li input_values:A 1D tensor containing the unicode codepoints that should be encoded. +*@li input_splits:A 1D tensor specifying how the unicode codepoints should be split into strings. \n + +*@par Attributes: +* output_encoding:The unicode encoding to use in the output. Must be one of `"UTF-8", "UTF-16-BE", "UTF-32-BE"`. +Multi-byte encodings will be big-endian. +* errors:Error handling policy when there is invalid formatting found in the input. +The value of 'strict' will cause the operation to produce a InvalidArgument +error on any invalid input formatting. A value of 'replace' (the default) will +cause the operation to replace any invalid formatting in the input with the +`replacement_char` codepoint. A value of 'ignore' will cause the operation to +skip any invalid formatting in the input and produce no corresponding output +character. +* replacement_char:The replacement character codepoint to be used in place of any invalid +formatting in the input when `errors='replace'`. Any valid unicode codepoint may +be used. The default value is the default unicode replacement character is +0xFFFD or U+65533. \n + +*@par Outputs: +*@li output:The 1-D Tensor of strings encoded from the provided unicode codepoints. \n + +*@see UnicodeEncode() + +*@par Third-party framework compatibility +*compatible with UnicodeEncode op of tensorflow + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(UnicodeEncode) + .INPUT(input_values, TensorType({DT_INT32})) + .INPUT(input_splits, TensorType({DT_INT32, DT_INT64})) + .OUTPUT(output, TensorType({DT_STRING})) + .ATTR(errors, String, "replace") + .ATTR(output_encoding, String, "UTF-8") + .ATTR(replacement_char, Int, 65533) + .OP_END_FACTORY_REG(UnicodeEncode) /** *@brief Split elements of input based on delimiter into a SparseTensor . \n @@ -62,6 +291,116 @@ REG_OP(StringSplit) .OP_END_FACTORY_REG(StringSplit) /** +*@brief Replaces the match of pattern in input with rewrite. \n + +*@par Inputs: +include: +*@li input:A Tensor of type string. The text to be processed. \n + +*@par Attributes: +*@li pattern:A string. The regular expression to match the input. +*@li rewrite:A string. The rewrite to be applied to the matched expression. +*@li replace_global:An optional bool. Defaults to True. If True, the replacement is global, +otherwise the replacement is done only on the first match. + +*@par output: +*@li output::A Tensor of type string. +*/ +REG_OP(StaticRegexReplace) + .INPUT(input, TensorType({DT_STRING})) + .OUTPUT(output, TensorType({DT_STRING})) + .ATTR(pattern, String, "") + .ATTR(rewrite, String, "") + .ATTR(replace_global, Bool, true) + .OP_END_FACTORY_REG(StaticRegexReplace) + +/** +*@brief The input is a string tensor of any shape. The pattern is the +*regular expression to be matched with every element of the input tensor. +*The boolean values (True or False) of the output tensor indicate +*if the input matches the regex pattern provided. + +*@par Inputs: +include: +*@li input:A Tensor of type string. The text to be processed. \n + +*@par Attributes: +*@li pattern:A string. The regular expression to match the input. + +*@par output: +*@li output::A bool tensor with the same shape as `input`. +*/ +REG_OP(StaticRegexFullMatch) + .INPUT(input, TensorType({DT_STRING})) + .OUTPUT(output, TensorType({DT_BOOL})) + .ATTR(pattern, String, "") + .OP_END_FACTORY_REG(StaticRegexFullMatch) + +/** +*@brief A Tensor of type string. The input to be joined. \n + +*@par Inputs: +include: +*@li input:A Tensor of type string. The text to be processed. +*@li segment_ids:A Tensor. Must be one of the following types: int32, int64. +*A tensor whose shape is a prefix of data.shape. Negative segment ids are not supported. +*@li num_segments:A Tensor. Must be one of the following types: int32, int64. A scalar. + +*@par Attributes: +*@li separator:An optional string. Defaults to "". The separator to use when joining. + +*@par output: +*@li output::A Tensor of type string.. +*/ +REG_OP(UnsortedSegmentJoin) + .INPUT(input, TensorType({DT_STRING})) + .INPUT(segment_ids, TensorType({DT_INT32,DT_INT64})) + .INPUT(num_segments, TensorType({DT_INT32,DT_INT64})) + .OUTPUT(output, TensorType({DT_STRING})) + .ATTR(separator, String, "") + .OP_END_FACTORY_REG(UnsortedSegmentJoin) + +/** +*@brief Inputs to TensorFlow operations are outputs of another TensorFlow operation. +*This method is used to obtain a symbolic handle that represents the computation of the input. + +*@par Inputs: +include: +*@li input:A Tensor of type string. The text to be processed. + +*@par Attributes: +*@li encoding:An optional string. Defaults to "". + +*@par output: +*@li output::A Tensor of type string.. +*/ +REG_OP(StringLower) + .INPUT(input, TensorType({DT_STRING})) + .OUTPUT(output, TensorType({DT_STRING})) + .ATTR(encoding, String, "") + .OP_END_FACTORY_REG(StringLower) + +/** +*@brief Inputs to TensorFlow operations are outputs of another TensorFlow operation. +*This method is used to obtain a symbolic handle that represents the computation of the input. + +*@par Inputs: +include: +*@li input:A Tensor of type string. The text to be processed. + +*@par Attributes: +*@li encoding:An optional string. Defaults to "". + +*@par output: +*@li output::A Tensor of type string.. +*/ +REG_OP(StringUpper) + .INPUT(input, TensorType({DT_STRING})) + .OUTPUT(output, TensorType({DT_STRING})) + .ATTR(encoding, String, "") + .OP_END_FACTORY_REG(StringUpper) + +/** *@brief Split elements of source based on sep into a SparseTensor . \n *@par Inputs: @@ -488,7 +827,7 @@ include: */ REG_OP(AsString) .INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_FLOAT, \ - DT_DOUBLE, DT_BOOL})) + DT_DOUBLE, DT_BOOL, DT_COMPLEX64, DT_COMPLEX128})) .OUTPUT(y, TensorType({DT_STRING})) .ATTR(precision, Int, -1) .ATTR(scientific, Bool, false) @@ -557,6 +896,45 @@ REG_OP(DecodeBase64) .INPUT(x, TensorType({DT_STRING})) .OUTPUT(y, TensorType({DT_STRING})) .OP_END_FACTORY_REG(DecodeBase64) + +/** +*@brief StringNormalization performs string operations for basic cleaning . \n + +*@par Inputs: +*@li input: only accepts [C] or [1, C] UTF-8 strings tensor . \n + +*@par Outputs: +*@li output: UTF-8 strings tensor after cleaning . \n + +*@par Attributes: +*@li stopwords : list of strings (default is empty). +*List of stop words. If not set, no word would be removed from input strings +tensor. + +*@li is_case_sensitive : bool (default is false). +*Boolean. Whether the identification of stop words in input strings tensor is +case-sensitive. Default is false. + +*@li case_change_action : string (default is "NONE"). +*string enum that cases output to be lowercased/uppercases/unchanged. Valid +values are "LOWER", "UPPER", "NONE". Default is "NONE". + +*@li local : string (default is "en_US"). +*Environment dependent string that denotes the locale according to which output +strings needs to be upper/lowercased.Default en_US or platform specific equivalent +as decided by the implementation . \n + +*@attention Constraints: +*@li input can be either a 1-D or 2-D tensor, the shape of 2-D tensor must be [1, C]. +*/ +REG_OP(StringNormalizer) + .INPUT(input, TensorType({DT_STRING})) + .OUTPUT(output, TensorType({DT_STRING})) + .ATTR(stopwords, ListString, {}) + .ATTR(is_case_sensitive, Bool, false) + .ATTR(case_change_action, String, "NONE") + .ATTR(local, String, "en_US") + .OP_END_FACTORY_REG(StringNormalizer) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_STRING_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/swap_co_ops.h b/third_party/fwkacllib/inc/ops/swap_co_ops.h index a1bf4f8b..6e8eaac3 100644 --- a/third_party/fwkacllib/inc/ops/swap_co_ops.h +++ b/third_party/fwkacllib/inc/ops/swap_co_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/target_crop_and_resize.h b/third_party/fwkacllib/inc/ops/target_crop_and_resize.h index 9c61f2c9..9bef1d7b 100644 --- a/third_party/fwkacllib/inc/ops/target_crop_and_resize.h +++ b/third_party/fwkacllib/inc/ops/target_crop_and_resize.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/transformation_ops.h b/third_party/fwkacllib/inc/ops/transformation_ops.h index 64e18fc7..4a46e35f 100644 --- a/third_party/fwkacllib/inc/ops/transformation_ops.h +++ b/third_party/fwkacllib/inc/ops/transformation_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -130,28 +130,27 @@ REG_OP(Transpose) .OP_END_FACTORY_REG(Transpose) /** -*@brief Doing format_transfer for various data format only -support "NHWC/NCHW" to "NC1HWC0" and "NC1HWC0" to "NHWC/NCHW" -"NCHW" to "FRACTAL_Zn" or "FRACTAL_Zn" to "NCHW". -"HWCN" to "FRACTAL_Zn" or "FRACTAL_Zn" to "HWCN" . \n +*@brief Do format transfer for various data format. +* In general, the framework will insert it atomatically . \n *@par Inputs: -*src: A Tensor dtype of all types . \n +*src: A Tensor. For all branches can be types: float16, float32, int32, int8, bool. +* For branches without padding also can be types: int16, int64, uint8, uint16, uint32, uint64 . \n *@par Attributes: -*@li src_format: A string source data format, can be "NHWC", "NCHW", "FRACTAL_Zn" etc. -*@li dst_format: A string target data format, can be "NC1HWC0", "NCHW", "FRACTAL_Zn" etc. -*@li group: A required int32, default value is 1. \n +*@li src_format: A string source data format, can be "NHWC", "NCHW", "FRACTAL_Z" etc. +*@li dst_format: A string target data format, can be "NC1HWC0", "NCHW", "FRACTAL_Z" etc. +*@li group: A optional int32, default value is 1. \n *@par Outputs: -*dst: A Tensor dtype of all types. +*dst: A Tensor. Has the same type as "src". */ REG_OP(TransData) .INPUT(src, TensorType::BasicType()) .OUTPUT(dst, TensorType::BasicType()) .REQUIRED_ATTR(src_format, String) .REQUIRED_ATTR(dst_format, String) - .ATTR(group, Int, 1) + .ATTR(groups, Int, 1) .OP_END_FACTORY_REG(TransData) /** @@ -174,21 +173,27 @@ REG_OP(Permute) .OP_END_FACTORY_REG(Permute) /** -*@brief Flattens the inputs. Reserves axis 0 and flattens the input tensors -* along axis 1 . \n +*@brief Flattens the inputs tensor into a 2D matrix. If input tensor has shape (d_0, d_1,..., d_n), +* then the output will have shape (d_0 X d_1 ... d_(axis-1), d_axis X d_(axis + 1)...X d_n)\n *@par Inputs: -*One input: -*x: A multi-dimensional Tensor. Must be one of the following types: -* int8, uint8, int16, uint16, int32, uint32, int64,uint64, float16, float32 . \n +* One input: +* x: A multi-dimensional Tensor. Must be one of the following types: +* int8, uint8, int16, uint16, int32, uint32, int64,uint64, float16, float32. *@par Outputs: -*y: A 2D flattened Tensor (Reserves axis 0 and flattens the input tensors -* along axis 1). Must be one of the following data types: int8, uint8, int16, -* uint16, int32, uint32, int64,uint64, float16, float32 . \n +* y: A 2D flattened Tensor with the contents of the input tensor, with input dimensions up to axis flattened +* to the outer dimension of the output and remaining input dimensions flattened into the inner dimension of the output. +* Must be one of the following data types: int8, uint8, int16, uint16, int32, uint32, int64,uint64, float16, float32 . + +*@par Attributes: +* axis: A optional int32, default value is 1. Indicate up to which input dimensions (exclusive) should be flattened +* to the outer dimension of the output. The value for axis must be in the range [-r, r], where r is the rank of +* the input tensor. Negative value means counting dimensions from the back. When axis = 0, the shape of +* the output tensor is (1, (d_0 X d_1 ... d_n), where the shape of the input tensor is (d_0, d_1, ... d_n). *@par Third-party framework compatibility -* Compatible with TensorFlow operator Flatten. +* Compatible with TensorFlow / ONNX operator Flatten. */ REG_OP(Flatten) .INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, @@ -197,6 +202,7 @@ REG_OP(Flatten) .OUTPUT(y, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32, DT_UINT64, DT_FLOAT, DT_FLOAT16})) + .ATTR(axis, Int, 1) .OP_END_FACTORY_REG(Flatten) /** @@ -357,7 +363,7 @@ REG_OP(DepthToSpace) *@brief Permutes data into spatial data blocks and then prunes them . \n *@par Inputs: -*@li x: A 4D Tensor with format NHWC. +*@li x: A 4D Tensor with format. Must set the format, supported format list ["NCHW, NHWC"] *@li crops: A 1D list or tuple of int32 or int64 . \n *Must be one of the following types: float16, float32 @@ -418,12 +424,8 @@ REG_OP(BatchToSpace) * Warning: THIS FUNCTION IS DEPRECATED. Please use BatchToSpace instead. */ REG_OP(BatchToSpaceD) - .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8, - DT_UINT16, DT_UINT32, DT_UINT64, DT_INT8, DT_INT16, DT_COMPLEX64, - DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32})) - .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8, - DT_UINT16, DT_UINT32, DT_UINT64, DT_INT8, DT_INT16, DT_COMPLEX64, - DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32})) + .INPUT(x, TensorType::BasicType()) + .OUTPUT(y, TensorType::BasicType()) .REQUIRED_ATTR(block_size, Int) .REQUIRED_ATTR(crops, ListInt) .OP_END_FACTORY_REG(BatchToSpaceD) @@ -434,9 +436,10 @@ REG_OP(BatchToSpaceD) *@par Inputs: * Two inputs, including: -*@li x: An NHWC Tensor. Must be one of the following types: +*@li x: An 4D Tensor. Must be one of the following types: * float16, float32, double, int64, int32, uint8, uint16, uint32, uint64, int8, * int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32. +* Must set the format, supported format list ["NCHW, NHWC"] *@li paddings: A 2D tensor of type int, specifying the input . \n *@par Attributes: @@ -518,7 +521,8 @@ REG_OP(Unpack) * @par Inputs: * x: A 4D Tensor with shape [batch, in_rows, in_cols, depth], Must be one of the * following types:float32, double, int32, uint8, int16, int8, int64, uint16, -* float16, uint32, uint64 +* float16, uint32, uint64. The inputs must have data_format with one of follows: +* NHWC, NCHW. * @par Attributes: * @li ksizes: A required list or tuple. The size of the sliding window for each @@ -533,7 +537,6 @@ REG_OP(Unpack) * This is equivalent to rate in dilated (a.k.a. Atrous) convolutions. * @li padding: A required string. The type of padding algorithm to use, support "SAME" or "VALID". \n -* @li data_format: A required string. The format of input, only supported NHWC. \n * @par Outputs: * y: A 4D Tensor with shape [batch, out_rows, out_cols, ksize_rows * @@ -554,7 +557,6 @@ REG_OP(ExtractImagePatches) .REQUIRED_ATTR(strides, ListInt) .REQUIRED_ATTR(rates, ListInt) .REQUIRED_ATTR(padding, String) - .ATTR(data_format, String, "NHWC") .OP_END_FACTORY_REG(ExtractImagePatches) /** @@ -563,6 +565,7 @@ REG_OP(ExtractImagePatches) * @par Inputs: * x: A 5D Tensor with shape [batch, in_planes, in_rows, in_cols, depth] . \n +* The inputs must have data_format with one of follows: NDHWC, NCDHW. \n * @par Attributes: * @li ksizes: A required list or tuple. The size of the sliding window for each @@ -571,7 +574,6 @@ REG_OP(ExtractImagePatches) * patches are in "x". Must be: [1, stride_planes, stride_rows, stride_cols, 1]. * @li padding: A required string. The type of padding algorithm to use , * support "SAME" or "VALID" . \n -* @li data_format: An optional string. The format of input, only supported NDHWC. \n * @par Outputs: * Output: A 5D Tensor with shape [batch, out_planes, out_rows, out_cols, ksize_planes * @@ -590,7 +592,6 @@ REG_OP(ExtractVolumePatches) .REQUIRED_ATTR(ksizes, ListInt) .REQUIRED_ATTR(strides, ListInt) .REQUIRED_ATTR(padding, String) - .ATTR(data_format, String, "NDHWC") .OP_END_FACTORY_REG(ExtractVolumePatches) /** @@ -717,6 +718,210 @@ REG_OP(CompressFcOp) .OUTPUT(compress_index, TensorType({DT_INT8})) .REQUIRED_ATTR(compress_parameters, ListInt) .OP_END_FACTORY_REG(CompressFcOp) + +/** +*@brief Performs Col2im for each batch entry. \n + +*@par Inputs: +*@li input_x: The Col Tensor. 5-D, shape: `(n, c1, kernel_h*kernel_w, ho*wo, c0)`. +where ho/wo is do = (output_d + 2*padding_d - dilation_d*(kernel_d - 1) - 1)//stride_d + 1 \n + +*@par Outputs: +*@li output_y: The img Tensor. 5-D, shape: `(n, c1, output_h, output_w, c0)`. \n + +*@par Attributes: +*@li kernel_shape: ListInt, value: `(kernel_h, kernel_w)`, the shape of kernel in convolution. +*@li dilation: ListInt, value: `(dilation_h, dilation_w)`, the dilation in convolution. +*@li padding: ListInt, value: `(padding_h, padding_w)`, the dilation in convolution. +*@li stride: ListInt, value: `(stride_h, stride_w)`, the dilation in convolution. \n + +*@par Third-party framework compatibility +* Compatible with Pytorch col2im/im2col_backward operator. +*/ +REG_OP(Col2im) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(output_size, TensorType({DT_INT32, DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) + .REQUIRED_ATTR(kernel_size, ListInt) + .REQUIRED_ATTR(dilation, ListInt) + .REQUIRED_ATTR(padding, ListInt) + .REQUIRED_ATTR(stride, ListInt) + .OP_END_FACTORY_REG(Col2im) + +/** +* @brief Performs Im2col for each batch entry. \n + +* @par Inputs: +* x: A 4D Tensor with shape [batch, in_rows, in_cols, depth], Must be one of the +* following types:float32, int8, float16. The inputs must have data_format with +* one of follows:NHWC, NCHW. + +* @par Attributes: +* @li ksizes: A required list or tuple. The size of the sliding window for each +* dimension of images. +* @li strides: A optional list or tuple. How far the centers of two consecutive +* patches are in the images. Defaults to "{1}". +* @li dilations: A optional list or tuple. Defaults to "{1}". +* This is the input stride, specifying how far two consecutive patch +* samples are in the input. Equivalent to extracting patches +* with patch_sizes_eff = patch_sizes + (patch_sizes - 1) * +* (dilations - 1), followed by subsampling them spatially by a factor of dilations. +* This is equivalent to rate in dilated (a.k.a. Atrous) convolutions. +* @li padding_mode: A optional String. The type of padding algorithm to use, +* support "SAME", "VALID", "CALCULATED". Among the three modes, only the "CALCULATED" +* means to use the pads below. Defaults to "CALCULATED". +* @li pads: A optional list or tuple. The pad distance. Defaults to "{0}". \n + +* @par Outputs: +* y: A 4D Tensor with shape [batch, out_rows, out_cols, ksize_rows * +* ksize_cols * depth] containing image patches with size ksize_rows x ksize_cols +* x depth vectorized in the "depth" dimension. Note "out_rows" and "out_cols" +* are the dimensions of the output patches . \n + +* @attention Constraints: +* "ksizes", "strides", "dilations" and "pads" are lists of integers . \n + +* @par Third-party framework compatibility +* Compatible with Pytorch Im2col operator. +*/ +REG_OP(Im2col) + .INPUT(x, TensorType::RealNumberType()) + .OUTPUT(y, TensorType::RealNumberType()) + .REQUIRED_ATTR(ksizes, ListInt) + .ATTR(strides, ListInt, {1}) + .ATTR(dilations, ListInt, {1}) + .ATTR(padding_mode, String, "CALCULATED") + .ATTR(pads, ListInt, {0}) + .OP_END_FACTORY_REG(Im2col) + +/** +*@brief Generates a 2D or 3D flow field (sampling grid), given a batch of affine +matrices theta. \n + +*@par Inputs: +*Input theta must be float16 or float, output_size must be int32 type.Inputs +include: +*@li theta: input batch of affine matrices with shape (N,2,3) for 2D or (N,3,4) +for 3D +*@li output_size: the target output image size. (N×C×H×W for 2D or N×C×D×H×W for +3D) Example: torch.Size((32, 3, 24, 24)) . \n + + +*@par Attributes: +*align_corners: if True, consider -1 and 1 to refer to the centers of the corner +pixels rather than the image corners.Refer to grid_sample() for a more complete +description. A grid generated by affine_grid() should be passed to grid_sample() +with the same setting for this option. Default: False \n + +*@par Outputs: +*@li y: A 2-D integer tensor of shape [M] representing the +selected indices from the boxes tensor, where M <= max_output_size. \n + +*@attention Constraints: +*Input theta must be float16 or float, output_size must be int32 type . \n + +*@par Third-party framework compatibility +*Compatible with Pytorch affine_grid operator. +*/ + +REG_OP(AffineGrid) + .INPUT(theta, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(output_size, TensorType({DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(align_corners, Bool, false) + .OP_END_FACTORY_REG(AffineGrid) + +/** +*@brief Make memory of a view be contiguous. \n + +*@par Inputs: +*Four inputs, including: +*@li x: The input tensor. +*@li size: The shape of output tensor. +*@li stride: The stride of output tensor. +*@li storage_offset: The offset in the underlying storage of the output tensor. \n + +*@par Outputs: +*y: A Tensor. Has the same type as "x" . \n + +*@par Third-party framework compatibility +*Compatible with the pytorch operator as_strided. +*/ +REG_OP(AsStrided) + .INPUT(x, TensorType::BasicType()) + .INPUT(size, TensorType::IndexNumberType()) + .INPUT(stride, TensorType::IndexNumberType()) + .INPUT(storage_offset, TensorType::IndexNumberType()) + .OUTPUT(y, TensorType::BasicType()) + .OP_END_FACTORY_REG(AsStrided) + +/** +*@brief This transform extracts n-grams from the input sequence and save them as a +vector. \n + +*@par Inputs: +*@li input: can be either a 1-D or 2-D tensor for n-gram extraction, It is ether string UTF-8 or int32/int64 . \n + +*@par Attributes: +*@li max_gram_length : int (required) +*Maximum n-gram length. If this value is 3, 3-grams will be used to generate the output . +*@li max_skip_count : int (required) +*Maximum number of items (integers/strings) to be skipped when constructing an n-gram from X. +If max_skip_count=1, min_gram_length=2, max_gram_length=3, this operator may generate 2-grams +with skip_count=0 and skip_count=1, and 3-grams with skip_count=0 and skip_count=1. +*@li min_gram_length : int (required) +*Minimum n-gram length. If this value is 2 and max_gram_length is 3, output may contain counts of +2-grams and 3-grams. +*@li mode : string (required) +*The weighting criteria. It can be one of "TF" (term frequency), "IDF" (inverse document frequency), +and "TFIDF" (the combination of TF and IDF). +*@li ngram_counts : list of ints (required) +*The starting indexes of 1-grams, 2-grams, and so on in pool. It is useful when determining the boundary +between two consecutive collections of n-grams. For example, if ngram_counts is [0, 17, 36], +the first index (zero-based) of 1-gram/2-gram/3-gram in pool are 0/17/36. This format is essentially identical +to CSR (or CSC) sparse matrix format, and we choose to use this due to its popularity. +*@li ngram_indexes : list of ints (required) +*list of int64s (type: AttributeProto::INTS). This list is parallel to the specified 'pool_*' attribute. The i-th element +in ngram_indexes indicate the coordinate of the i-th n-gram in the output tensor. +*@li pool_int64s : list of ints +*List of int64 n-grams learned from the training set. Either this or pool_strings attributes must be present but not both. +It's an 1-D tensor starting with the collections of all 1-grams and ending with the collections of n-grams. The i-th element +in pool stores the n-gram that should be mapped to coordinate ngram_indexes[i] in the output vector. +*@li pool_strings : list of strings +*List of strings n-grams learned from the training set. Either this or pool_int64s attributes must be present but not both. +It's an 1-D tensor starting with the collections of all 1-grams and ending with the collections of n-grams. The i-th element +in pool stores the n-gram that should be mapped to coordinate ngram_indexes[i] in the output vector. +*@li weights : list of floats +*list of floats. This attribute stores the weight of each n-gram in pool. The i-th element in weights is the weight of +the i-th n-gram in pool. Its length equals to the size of ngram_indexes. By default, weights is an all-one tensor.This attribute +is used when mode is "IDF" or "TFIDF" to scale the associated word counts. \n + +*@par Outputs: +*@li output: tensor(float) +*For 1-D input, output is the n-gram representation of that input. For 2-D input, the output is also a 2-D tensor +whose i-th row is the n-gram representation of the i-th input row. More specifically, if input shape is [C], the corresponding +output shape would be [max(ngram_indexes) + 1]. If input shape is [N, C], this operator produces a [N, max(ngram_indexes) + 1]-tensor. \n + +*@attention Constraints: +*@li input can be either a 1-D or 2-D tensor, shape is [C] or [N, C]. +*@li max(ngram_indexes) + 1 == len(weights), len(y) == len(weights). +*@li ngram_counts and pool(pool_int64s or pool_strings) must match. +*@li either pool_strings or pool_int64s attributes must be present but not both. +*/ + +REG_OP(TfidVectorizer) + .INPUT(input, TensorType({DT_INT32, DT_INT64, DT_STRING})) + .OUTPUT(output, TensorType({DT_FLOAT})) + .REQUIRED_ATTR(max_gram_length, Int) + .REQUIRED_ATTR(max_skip_count, Int) + .REQUIRED_ATTR(min_gram_length, Int) + .REQUIRED_ATTR(mode, String) + .REQUIRED_ATTR(ngram_counts, ListInt) + .REQUIRED_ATTR(ngram_indexes, ListInt) + .ATTR(pool_int64s, ListInt, {}) + .ATTR(pool_strings, ListString, {}) + .ATTR(weights, ListFloat, {}) + .OP_END_FACTORY_REG(TfidVectorizer) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_TRANSFORMATION_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/warp_perspective_ops.h b/third_party/fwkacllib/inc/ops/warp_perspective_ops.h index e19cbd7c..8ef69d8b 100644 --- a/third_party/fwkacllib/inc/ops/warp_perspective_ops.h +++ b/third_party/fwkacllib/inc/ops/warp_perspective_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/runtime/event.h b/third_party/fwkacllib/inc/runtime/event.h index 57948c47..01f63705 100644 --- a/third_party/fwkacllib/inc/runtime/event.h +++ b/third_party/fwkacllib/inc/runtime/event.h @@ -41,6 +41,11 @@ typedef enum rtEventWaitStatus { #define RT_EVENT_DDSYNC 0x04U #define RT_EVENT_TIME_LINE 0x08U +#define RT_EVENT_DDSYNC_NS 0x01U +#define RT_EVENT_STREAM_MARK 0x02U +#define RT_EVENT_DDSYNC 0x04U +#define RT_EVENT_TIME_LINE 0x08U + /** * @ingroup dvrt_event * @brief create event instance diff --git a/third_party/fwkacllib/inc/runtime/rt.h b/third_party/fwkacllib/inc/runtime/rt.h index aa394eea..10f884f2 100644 --- a/third_party/fwkacllib/inc/runtime/rt.h +++ b/third_party/fwkacllib/inc/runtime/rt.h @@ -27,6 +27,7 @@ #include "mem.h" #include "rt_model.h" #include "stream.h" +#include "rt_stars.h" #include "rt_ffts.h" #endif // __CCE_RUNTIME_RT_H__ diff --git a/third_party/fwkacllib/inc/runtime/rt_stars.h b/third_party/fwkacllib/inc/runtime/rt_stars.h new file mode 100644 index 00000000..188656b1 --- /dev/null +++ b/third_party/fwkacllib/inc/runtime/rt_stars.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved. + * Description: + */ + +#ifndef __CCE_RUNTIME_STARS_H +#define __CCE_RUNTIME_STARS_H + +#include "base.h" + +#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +extern "C" { +#endif + +/** + * @ingroup rt_stars + * @brief launch stars task. + * used for send star sqe directly. + * @param [in] taskSqe stars task sqe + * @param [in] sqeLen stars task sqe length + * @param [in] stream associated stream + * @return RT_ERROR_NONE for ok, others failed + */ +RTS_API rtError_t rtStarsTaskLaunch(const void *taskSqe, uint32_t sqeLen, rtStream_t stream); + +/** + * @ingroup rt_stars + * @brief create cdq instance. + * @param [in] batchNum batch number + * @param [in] batchSize batch size + * @param [in] queName cdq name + * @return RT_ERROR_NONE for ok, ACL_ERROR_RT_NO_CDQ_RESOURCE for no cdq resources + */ +RTS_API rtError_t rtCdqCreate(uint32_t batchNum, uint32_t batchSize, const char *queName); + +/** + * @ingroup rt_stars + * @brief destroy cdq instance. + * @param [in] queName cdq name + * @return RT_ERROR_NONE for ok, others failed + */ +RTS_API rtError_t rtCdqDestroy(const char *queName); + +/** + * @ingroup rt_stars + * @brief get free batch in the queue. + * @param [in] queName cdq name + * @param [in] timeout batch size + * @param [out] batchId batch index + * @return RT_ERROR_NONE for ok, ACL_ERROR_RT_WAIT_TIMEOUT for timeout + */ +RTS_API rtError_t rtCdqAllocBatch(const char *queName, int32_t timeout, uint32_t *batchId); + +/** + * @ingroup rt_stars + * @brief launch a write_cdqm task on the stream. + * When the task is executed, the data information will be inserted into the cdqe index position of the queue. + * @param [in] queName cdq name + * @param [in] cdqeIndex cdqe index + * @param [in] data cdqe infomation + * @param [in] dataSize data size + * @param [in] stream launch task on the stream + * @return RT_ERROR_NONE for ok, others failed + */ +RTS_API rtError_t rtCdqEnQueue(const char *queName, uint32_t cdqeIndex, void *data, uint32_t dataSize, + rtStream_t stream); + +/** + * @ingroup rt_stars + * @brief launch a write_cdqm task on the stream. + * When the task is executed, the data information will be inserted into the cdqe index position of the queue. + * @param [in] queName cdq name + * @param [in] cdqeIndex cdqe index + * @param [in] data cdqe infomation + * @param [in] dataSize data size + * @param [in] stream launch task on the stream + * @return RT_ERROR_NONE for ok, others failed + */ +RTS_API rtError_t rtCdqEnQueuePtrMode(const char *queName, uint32_t cdqeIndex, const void *prtAddr, + rtStream_t stream); + +#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +} +#endif +#endif // __CCE_RUNTIME_STARS_H diff --git a/third_party/fwkacllib/inc/tdt/tsd_client.h b/third_party/fwkacllib/inc/tdt/tsd_client.h index 665c8b82..36fc500e 100644 --- a/third_party/fwkacllib/inc/tdt/tsd_client.h +++ b/third_party/fwkacllib/inc/tdt/tsd_client.h @@ -107,88 +107,6 @@ TDT_LIB_EXPORT TDT_StatusT UpdateProfilingMode(const uint32_t logicDeviceId, con */ TDT_LIB_EXPORT TDT_StatusT TsdSetMsprofReporterCallback(MsprofReporterCallback callback); -/** -* @ingroup CreateCmdParameterObj -* @brief creat tsdclient func parameter obj. -* -* @par Function -* creat tsdclient func parameter obj. -* -* @param type [IN] type tdt::TsdCmdType, tsd func type. -* @param cmdParameterObj [IN] type void *, func parameter obj. -* @retval TDT_OK Success -* @retval TDT_INTERFACE_NOT_SUPPORT -* -* @par Dependency -* @li libtsdclient.so: Library to which the interface belongs. -* @li data_common.h: Header file where tdt::TsdCmdType and tdt::InputItem defined. -* @li status.h: Header file where 'TDT_StatusT' defined -*/ -TDT_StatusT CreateCmdParameterObj(tdt::TsdCmdType type, void **cmdParameterObj); - -/** -* @ingroup SetCmdParameterObjAttribute -* @brief set cmdParameterObj input value. -* -* @par Function -* set cmdParameterObj input value. -* -* @param type [IN] type tdt::TsdCmdType, tsd func type. -* @param cmdParameterObj [IN] type void *, func parameter obj. -* @param itemType [IN] type tdt::InputItem, func input type. -* @param valuePtr [IN] type const void *, input value. -* @param valueLength [IN] type int, input value length. -* @retval TDT_OK Success -* @retval TDT_INTERFACE_NOT_SUPPORT -* -* @par Dependency -* @li libtsdclient.so: Library to which the interface belongs. -* @li data_common.h: Header file where tdt::TsdCmdType and tdt::InputItem defined. -* @li status.h: Header file where 'TDT_StatusT' defined -*/ -TDT_StatusT SetCmdParameterObjAttribute(tdt::TsdCmdType type, void *cmdParameterObj, tdt::InputItem itemType, const void *valuePtr, int valueLength); - -/** -* @ingroup GetCmdParameterObjAttribute -* @brief set cmdParameterObj input value. -* -* @par Function -* set cmdParameterObj input value. -* -* @param type [IN] type tdt::TsdCmdType, tsd func type. -* @param cmdParameterObj [IN] type void *, func parameter obj. -* @param itemType [IN] type tdt::InputItem, func input type. -* @param valuePtr [IN] type const void *, input value. -* @param valueLength [IN] type int, input value length. -* @retval TDT_OK Success -* @retval TDT_INTERFACE_NOT_SUPPORT -* -* @par Dependency -* @li libtsdclient.so: Library to which the interface belongs. -* @li data_common.h: Header file where tdt::TsdCmdType and tdt::InputItem defined. -* @li status.h: Header file where 'TDT_StatusT' defined -*/ -TDT_StatusT GetCmdParameterObjAttribute(tdt::TsdCmdType type, void *cmdParameterObj, tdt::InputItem itemType, void *valuePtr, int &valueLength); - -/** -* @ingroup TsdClientCmd -* @brief creat tsdclient func parameter obj. -* -* @par Function -* creat tsdclient func parameter obj. -* -* @param type [IN] type tdt::TsdCmdType, tsd func type. -* @param cmdParameterObj [IN] type void *, func parameter obj. -* @retval TDT_OK Success -* @retval TDT_INTERFACE_NOT_SUPPORT -* -* @par Dependency -* @li libtsdclient.so: Library to which the interface belongs. -* @li data_common.h: Header file where tdt::TsdCmdType and tdt::InputItem defined. -* @li status.h: Header file where 'TDT_StatusT' defined -*/ -TDT_StatusT TsdClientCmd(tdt::TsdCmdType cmd, void *cmdParameterObj); - #ifdef __cplusplus } #endif // __cplusplus diff --git a/third_party/fwkacllib/inc/toolchain/adx_datadump_server.h b/third_party/fwkacllib/inc/toolchain/adx_datadump_server.h index a1c39a51..67adecd9 100644 --- a/third_party/fwkacllib/inc/toolchain/adx_datadump_server.h +++ b/third_party/fwkacllib/inc/toolchain/adx_datadump_server.h @@ -1,12 +1,18 @@ /** -* @file adx_datadump_server.h -* -* Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. -* -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -*/ + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #ifndef ADX_DATADUMP_SERVER_H #define ADX_DATADUMP_SERVER_H diff --git a/third_party/fwkacllib/inc/toolchain/prof_acl_api.h b/third_party/fwkacllib/inc/toolchain/prof_acl_api.h index c8715041..07b32149 100644 --- a/third_party/fwkacllib/inc/toolchain/prof_acl_api.h +++ b/third_party/fwkacllib/inc/toolchain/prof_acl_api.h @@ -14,151 +14,99 @@ * limitations under the License. */ -#ifndef MSPROF_ENGINE_PROF_ACL_API_H_ -#define MSPROF_ENGINE_PROF_ACL_API_H_ - -#define MSVP_MAX_DEV_NUM 64 -#define MSVP_PROF_API __attribute__((visibility("default"))) +#ifndef MSPROFILER_API_PROF_ACL_API_H_ +#define MSPROFILER_API_PROF_ACL_API_H_ // DataTypeConfig -#define PROF_ACL_API 0x0001 -#define PROF_TASK_TIME 0x0002 -#define PROF_AICORE_METRICS 0x0004 -#define PROF_AICPU_TRACE 0x0008 -#define PROF_MODEL_EXECUTE 0x0010 -#define PROF_RUNTIME_API 0x0020 -#define PROF_RUNTIME_TRACE 0x0040 -#define PROF_SCHEDULE_TIMELINE 0x0080 -#define PROF_SCHEDULE_TRACE 0x0100 -#define PROF_AIVECTORCORE_METRICS 0x0200 -#define PROF_SUBTASK_TIME 0x0400 - -#define PROF_TRAINING_TRACE 0x0800 -#define PROF_HCCL_TRACE 0x1000 -#define PROF_DATA_PROCESS 0x2000 -#define PROF_TASK_TRACE 0x3842 +#define PROF_ACL_API 0x00000001 +#define PROF_TASK_TIME 0x00000002 +#define PROF_AICORE_METRICS 0x00000004 +#define PROF_AICPU_TRACE 0x00000008 +#define PROF_MODEL_EXECUTE 0x00000010 +#define PROF_RUNTIME_API 0x00000020 +#define PROF_RUNTIME_TRACE 0x00000040 +#define PROF_SCHEDULE_TIMELINE 0x00000080 +#define PROF_SCHEDULE_TRACE 0x00000100 +#define PROF_AIVECTORCORE_METRICS 0x00000200 +#define PROF_SUBTASK_TIME 0x00000400 + +#define PROF_TRAINING_TRACE 0x00000800 +#define PROF_HCCL_TRACE 0x00001000 + +#define PROF_TASK_TRACE 0x00001852 + +// system profilinig switch +#define PROF_CPU 0x00010000 +#define PROF_HARDWARE_MEMORY 0x00020000 +#define PROF_IO 0x00040000 +#define PROF_INTER_CONNECTION 0x00080000 +#define PROF_DVPP 0x00100000 +#define PROF_SYS_AICORE_SAMPLE 0x00200000 +#define PROF_AIVECTORCORE_SAMPLE 0x00400000 #define PROF_MODEL_LOAD 0x8000000000000000 // DataTypeConfig MASK -#define PROF_ACL_API_MASK 0x0001 -#define PROF_TASK_TIME_MASK 0x0002 -#define PROF_AICORE_METRICS_MASK 0x0004 -#define PROF_AICPU_TRACE_MASK 0x0008 -#define PROF_MODEL_EXECUTE_MASK 0x0010 -#define PROF_RUNTIME_API_MASK 0x0020 -#define PROF_RUNTIME_TRACE_MASK 0x0040 -#define PROF_SCHEDULE_TIMELINE_MASK 0x0080 -#define PROF_SCHEDULE_TRACE_MASK 0x0100 -#define PROF_AIVECTORCORE_METRICS_MASK 0x0200 -#define PROF_SUBTASK_TIME_MASK 0x0400 - -#define PROF_TRAINING_TRACE_MASK 0x0800 -#define PROF_HCCL_TRACE_MASK 0x1000 -#define PROF_DATA_PROCESS_MASK 0x2000 +#define PROF_ACL_API_MASK 0x00000001 +#define PROF_TASK_TIME_MASK 0x00000002 +#define PROF_AICORE_METRICS_MASK 0x00000004 +#define PROF_AICPU_TRACE_MASK 0x00000008 +#define PROF_MODEL_EXECUTE_MASK 0x00000010 +#define PROF_RUNTIME_API_MASK 0x00000020 +#define PROF_RUNTIME_TRACE_MASK 0x00000040 +#define PROF_SCHEDULE_TIMELINE_MASK 0x00000080 +#define PROF_SCHEDULE_TRACE_MASK 0x00000100 +#define PROF_AIVECTORCORE_METRICS_MASK 0x00000200 +#define PROF_SUBTASK_TIME_MASK 0x00000400 + +#define PROF_TRAINING_TRACE_MASK 0x00000800 +#define PROF_HCCL_TRACE_MASK 0x00001000 + +// system profilinig mask +#define PROF_CPU_MASK 0x00010000 +#define PROF_HARDWARE_MEMORY_MASK 0x00020000 +#define PROF_IO_MASK 0x00040000 +#define PROF_INTER_CONNECTION_MASK 0x00080000 +#define PROF_DVPP_MASK 0x00100000 +#define PROF_SYS_AICORE_SAMPLE_MASK 0x00200000 +#define PROF_AIVECTORCORE_SAMPLE_MASK 0x00400000 #define PROF_MODEL_LOAD_MASK 0x8000000000000000 -#include -#include - -/** - * @name ProrErrorCode - * @brief error code enum of prof_acl_apis - */ -enum ProfErrorCode { - PROF_ERROR_NONE = 0, // ok - PROF_ERROR_PARAM_INVALID, // param invalid, for example nullptr - PROF_ERROR_REPEAT_INIT, // profiling has already been inited - PROF_ERROR_CONFIG_INVALID, // config invalid, for example invalid json string - PROF_ERROR_DIR_NO_ACCESS, // dir is not accessable - PROF_ERROR_FAILURE, // failed to init or start profiling - PROF_ERROR_NOT_INITED, // profiling has not been inited - PROF_ERROR_DEVICE_INVALID, // device id invalid - PROF_ERROR_UNSUPPORTED, // unsupported data type or ai core metrics - PROF_ERROR_REPEAT_START, // profiilng has already been started - PROF_ERROR_NOT_STARTED, // profiling has not been started -}; - -/** - * @brief transfer profiling config in acl.json to sample config - * @param aclCfg [IN] profiling json string from acl.json as {"switch":"on", "result_path":"/home",...} - * @param sampleCfg [OUT] json string for GE as {"startCfg":[{"deviceID":"all","jobID":"1234",...}]} - * @return ProfErrorCode - */ -MSVP_PROF_API int32_t ProfAclCfgToSampleCfg(const std::string &aclCfg, std::string &sampleCfg); +#ifndef OS_TYPE +#define OS_TYPE 0 +#endif // OS_TYPE -/** - * @name ProfInit - * @brief init profiling - * @param profInitCfg [IN] config of init profiling of json format - * @return ProfErrorCode - */ -MSVP_PROF_API int32_t ProfInit(const std::string &profInitCfg); - -/** - * @name ProfAicoreMetrics - * @brief aicore metrics enum - */ -enum ProfAicoreMetrics { - PROF_AICORE_ARITHMATIC_THROUGHPUT = 0, - PROF_AICORE_PIPELINE = 1, - PROF_AICORE_SYNCHRONIZATION = 2, - PROF_AICORE_MEMORY = 3, - PROF_AICORE_INTERNAL_MEMORY = 4, - PROF_AICORE_STALL = 5, - PROF_AICORE_EVENT = 255 -}; +#if (OS_TYPE != LINUX) +#define MSVP_PROF_API __declspec(dllexport) +#else +#define MSVP_PROF_API __attribute__((visibility("default"))) +#endif -/** - * @name ProfConfig - * @brief struct of ProfStart - */ -struct ProfConfig { - uint32_t devNums; // length of device id list - uint32_t devIdList[MSVP_MAX_DEV_NUM]; // physical device id list - ProfAicoreMetrics aicoreMetrics; // aicore metric - uint64_t dataTypeConfig; // data type to start profiling -}; +#include +namespace Msprofiler { +namespace Api { /** - * @name ProfStartProfiling - * @brief start profiling - * @param profStartCfg [IN] config to start profiling - * @return ProfErrorCode + * @name ProfGetOpExecutionTime + * @brief get op execution time of specific part of data + * @param data [IN] data read from pipe + * @param len [IN] data length + * @param index [IN] index of part(op) + * @return op execution time (us) */ -MSVP_PROF_API int32_t ProfStartProfiling(const ProfConfig *profStartCfg); +MSVP_PROF_API uint64_t ProfGetOpExecutionTime(const void *data, uint32_t len, uint32_t index); +} +} -/** - * @name ProfStopConfig - * @brief struct of ProfStop - */ -struct ProfStopConfig { - uint64_t padding; -}; +#ifdef __cplusplus +extern "C" { +#endif -/** - * @name ProfStopProfiling - * @brief stop profiling - * @param profStopCfg [IN] config to stop profiling - * @return ProfErrorCode - */ -MSVP_PROF_API int32_t ProfStopProfiling(const ProfConfig *profStopCfg); - -/** - * @name ProfFinalize - * @brief finalize profiling task - * @return ProfErrorCode - */ -MSVP_PROF_API int32_t ProfFinalize(); +MSVP_PROF_API uint64_t ProfGetOpExecutionTime(const void *data, uint32_t len, uint32_t index); -/** - * @name ProfGetDataTypeConfig - * @brief get dataTypeConfig started with of one device - * @param deviceId [IN] deviceId to get dataTypeConfig - * @param dataTypeConfig [OUT] result get - * @return ProfErrorCode - */ -MSVP_PROF_API int32_t ProfGetDataTypeConfig(uint32_t deviceId, uint64_t &dataTypeConfig); +#ifdef __cplusplus +} +#endif -#endif // MSPROF_ENGINE_PROF_ACL_API_H_ +#endif // MSPROFILER_API_PROF_ACL_API_H_ diff --git a/third_party/fwkacllib/inc/toolchain/prof_mgr_core.h b/third_party/fwkacllib/inc/toolchain/prof_mgr_core.h index 4f013eef..f8cb1b22 100644 --- a/third_party/fwkacllib/inc/toolchain/prof_mgr_core.h +++ b/third_party/fwkacllib/inc/toolchain/prof_mgr_core.h @@ -16,7 +16,16 @@ #ifndef MSPROF_ENGINE_PROF_MGR_CORE_H_ #define MSPROF_ENGINE_PROF_MGR_CORE_H_ +#ifndef OS_TYPE +#define OS_TYPE 0 +#endif // OS_TYPE + +#if (OS_TYPE != LINUX) +#define MSVP_PROF_API __declspec(dllexport) +#else #define MSVP_PROF_API __attribute__((visibility("default"))) +#endif + #include #include diff --git a/third_party/fwkacllib/inc/toolchain/prof_reporter.h b/third_party/fwkacllib/inc/toolchain/prof_reporter.h index ff91351b..d5ed7569 100644 --- a/third_party/fwkacllib/inc/toolchain/prof_reporter.h +++ b/third_party/fwkacllib/inc/toolchain/prof_reporter.h @@ -41,42 +41,44 @@ namespace Engine { * the Reporter class .used to send data to profiling */ class MSVP_PROF_API Reporter { - public: - virtual ~Reporter() {} +public: + virtual ~Reporter() {} - public: - /** - * @ingroup reporter - * @name : Report - * @brief : API of libmsprof, report data to libmsprof, it's a non-blocking function \n - The data will be firstly appended to cache, if the cache is full, data will be ignored - * @param data [IN] const ReporterData * the data send to libmsporf - * @retval PROFILING_SUCCESS 0 (success) - * @retval PROFILING_FAILED -1 (failed) - * - * @par depend: - * @li libmsprof - * @li prof_reporter.h - * @since c60 - * @see Flush - */ - virtual int Report(const ReporterData *data) = 0; +public: + /** + * @ingroup reporter + * @name : Report + * @brief : API of libmsprof, report data to libmsprof, it's a non-blocking function \n + The data will be firstly appended to cache, if the cache is full, data will be ignored + * @param data [IN] const ReporterData * the data send to libmsporf + * @retval PROFILING_SUCCESS 0 (success) + * @retval PROFILING_FAILED -1 (failed) + * + * @par depend: + * @li libmsprof + * @li prof_reporter.h + * @since c60 + * @see Flush + */ + virtual int Report(const ReporterData *data) = 0; - /** - * @ingroup reporter - * @name : Flush - * @brief : API of libmsprof, notify libmsprof send data over, it's a blocking function \n - The all datas of cache will be write to file or send to host - * @retval PROFILING_SUCCESS 0 (success) - * @retval PROFILING_FAILED -1 (failed) - * - * @par depend: - * @li libmsprof - * @li prof_reporter.h - * @since c60 - * @see ProfMgrStop - */ - virtual int Flush() = 0; + /** + * @ingroup reporter + * @name : Flush + * @brief : API of libmsprof, notify libmsprof send data over, it's a blocking function \n + The all datas of cache will be write to file or send to host + * @retval PROFILING_SUCCESS 0 (success) + * @retval PROFILING_FAILED -1 (failed) + * + * @par depend: + * @li libmsprof + * @li prof_reporter.h + * @since c60 + * @see ProfMgrStop + */ + virtual int Flush() = 0; + + virtual uint32_t GetReportDataMaxLen() = 0; }; } // namespace Engine diff --git a/third_party/prebuild/aarch64/libalog.so b/third_party/prebuild/aarch64/libalog.so index e041ad7e1e1f697da1328537689b7b300ae292dd..65aefa59a84fb99b46d0674ef6870f62e282ec82 100755 GIT binary patch delta 96075 zcmV+W{{#TA)eV5)4UiZT5S0J`00000KmY&$00000KxvUGHh&N|0ssI2000m-0ssI2 z000000RR9100031000I6007wW0ssI2007wW0{{R3007wW0{{R3003AN00000001C9 z00000000000RR9100062000I60002;0ssI20002;0{{R30002;0{{R3003|T00000 z003|T000000FmEGBFN|g000000LbV8000000LbV80000002B%U0000002B(7N&!wH z*zp1Y00000*zp4Z00000*zp4Z00000C=LJs00000C=Qdl0UjL%uMpmq1tlp06T6Gy zOaimxAM(%u000070RRAbll1`~4Cg7tk{|$wXn-g{%#$DjDSwDU5`nk?8MPq-g9AVg zAZRF9S5P_`II{#Z03fG;f&?Q95HJv6P(&f5hM<@v01g140q%+82^dfip#ZRD1K0xy zKnNf~K}-Tc004+V7yvN1fB;GW01kx^0776E0pm(s0t$dnM1TSSln4?w5fC6|P{|7c zP+`5$0T6Np34aC^8884=2mzG@x&R1GXHx*w00;;YMAiThpvXxikpch>0H6RVK7t5{ zPy~QLL^2Rj0IntigD`mj00000004Rb00000004df004gg00000004jh004sk004vl z004&o004*p004~u00000000000052v00000005Ez0Dl0K0001%0001(0001*0001- z00000000000001;000000001>0001^0001`0001|0001}00000000000002100022 z0000000025000270002A0002C0002E0000000000000000002H0002I000000002L z0002N0Dk}g!TV!Z00000&Hw-a(EtDd)Bpeg z)&Kwi*8l(j+5i9m-2eap-v9sr;Q#;t;s5{u;{X5vHq)$>;M1&?f?J)?*IS*@Bjb+@&Et;^8f$<^&|iQ000000QUd@000000Qdj^0Qmp_ z000000Q&#{000000R59;0TX}z0000000030000000RR920RR930RR940RR9100006 z0RR980RR9A0RR910000B0RR9G0RR9H0RR9I0RR9J0RR9L0RR910000M0RR9O0RR9Q z0RR9R0RR910000R0RR910000V0RR9Y0RR910000Y0RR910000Z0RVph00000B>?~c zC;+{&LjeE) zL;(N*MF9W+Mgaf-NCAHU07?M>00000080S?00000089Y@00000000000000008jw{ z08s$|0000008;?~08{}009XM409ye70000009*k80AB$B0AT?D000000Am3F0A>LI z0B8XK000000BZpN0B!*Q00000000000B->R000000CNEV0Cflf00000004HA0R|I) zcL4wZcmV(a00000c>w?b00000dI10cdjS9dd;tIeeE|Rf0000000000egOag00000 zfB^si00000fdK#jf&l;kg8={lgaH5mg#iEn00000hyefq00000iU9xsi~#@ujR61v zj{yJxkO2Sy00000kpTbzk^uk!0001g0Fwa#0F(g$0F?m%0G0s&00000000000G9y( z000000GI&)0GR;*0Ga^+0Gj~-000000G$B<000000G0H6T?000000HOf^ z0Hgr`0Hpx{0Hy%|0H^@~0I2~00IC510IUH30ImT50I>l80I~r90000000007006TA z006X;83$JcBvZUdlW+$V1c>F(dXt0)NF0@Sfx`+e8iIn5-_~7Aw-AYSR;k^m zCucCp)s-j|-uQ`YDrxVD4Rj(|>OmP^-kcmQlW+*E45_B3>|+HGRoOD@F_Rz(co=LU zhutlHk|t}yr3F=Hegfz#uP52(Wvi2*34jFq=y2ncAPS5GFu!V6ldKA81UY7Nk&_e) zSQG?nO&N)VB#>*Kw!ATRZB0E0xjmDB3m^n9`#Z;zpbJcsZ~+7c0{{sCz<-nS3nl?% zmys9)7XjUqF$@<05|@z}0~Y}llOYWk0d$ig4Hp3wm(d*q7Xi_eu?-gi6_b$+7XkZ| zF$@=z;0+i7_LCtE7XgEpktYKe0e6$J4Hp3vlQ9ez0dtd44Hp3elW`Fj0SuFI5f=gC zld%mK0R)o)5*Gm$lkp4}0r!(Z6c+*8lTi&90S%KO4Hp4&m(d*q7X{b=000q_VF42X zEtk<90~Z0AlQ9ez0l1gZ9Rn8urjtPw7XikT(GC{@Hj{A?7XhD>0S^}eC6fUU7Xjpx z@eCIMER$gj7XiMLkqj3BWS4<10~Y~?lTi&90X>sp3>TBY4;TS>mys9)7Xf{fVGI`m zV3Sb|7Xf~kktYKe0eF+K4HuJ85Eun#8vpN{H zlR*?00Yj6~5f_sv6c_=!lfeuZ0br9c3>N_;mmw$v7XhA=K@=AO=aXR!7Xj*%Q4kja z_>)l$7XdAokr)FP0f>_!4Hp44ld%mK0S=duCj%D&@RI=&7Xeh0(GeE`QN{blQ9ez0jZNA z4Hp60lfeuZ0T7eX5f=g4ld%mK0l$+W4Hp4-lVJ=O0dben9Rn8uxtGx$0~Y~JlVJ=O z0VN{LlTi&90Zo%34HuJ0 z6&L~glhF|u0VISY@5dZ)Z01E)P76AYN00004000000000(5C8xY01E(+5&-}J0000400000 z0000`0ssIK01E)Hod5s;0000$0ssI20002m82|th01E&Ja{&MV0001(000000000a z7ytkg01E(+UjYCB0001#0RR910000d0RR9J01JNrD4PHP000003;+NC00000VFLgF z5&#PT@b&-z00000Bme*a00000w*mkF5&#PT(4qhU00000`~d&}00000nhXE{5dahb z7ytwS000001ONa400000E*Ag*5&#PT2vPw600000U;zLC00000!T|sP5&#PT@S6Yt z004gg0KfqN000000D%ku01^NT0D$!X000000PFw&0000004Wgw01^NT05BE-00000 z05AXm000000HFc^01^NT0MPdU0000004x9i000000E`&`01^NT05EU?000000Ehqp z000000K^mk01^NT0MIM}000000Pp|+004gg0049u000sI3jok>0RR91003YB00000 z005B+000sI3jiSH00000001xp000000021{000sI3jio#0RR91000aC00000004Xs z000sI3jhER0RR91000C500000006BS000sI3jpYG0RR91000O900000006WA004gy z01E&Bng9R*00008000000002A1pojN01E(s_W%F@0000e000000002?0ssIK01E&J z`2YX_000130ssI20000=3;+NU01E(U6afGL0000O00000000012><{R01E)X836zQ z000040RR910002L761Se01E){NdbQV00000*Z}|l00000eiQ%z5&#PTKs^Bf00000 zWB>pF00000N&^4@5&#PTn5O^$00000xBvhE00000ehmNs5&#PT$P)no00000C;$Ke z00000jtc+)5&#PTc00000paFjX00000 z0KWqO01^NT0KljK000000GI#(000000NWA(01^NT0Kk?2000000H^@~000000QMOG z01^NT0HAXL000000OSDx0000007D1>01^NT09eZa0000006YKy0000003i?n01^NT z0Duw!0000000aO4000000CIm9000sI3jlys0RR91002w@00000007|=000sI3jla0 z0RR91001}v00000005={000sI3jnYU0RR91001Ze00000005Z*000sI3jnB_00000 z001-q00000003nG000sI3jo*+0RR91001Ze00000006-U000sI3jlw>$p8QV0001N z000000000g1ONaM01E&>tN;K20000K000000000n3jhET01E)f>;M1&0000m00000 z000265dZ)Z01E(^7XbhO00004000000001u5C8xY01E(E6afGL0000G000000001N z0ssIK01E){p8x;=004ggd;tIe00000G6w(v5&#PTIK=<}000000096100000j|l(( z5&#PT5Y+$x00000^Z@_>00000F$e$v5&#PTP|E-S000001ONa400000kp=(&5&#PT zh!z0=00000FaQ7m00000@e=?55&#PTASwX>000006afGL004gg0Pzq201^NT0ALjX z0000006+i$000000MrHm01^NT0C>Rw000000Pp|+000000Qv_201^NT0BFhp00000 z0Mq~g0000005cE(01^NT0EiL+0000000aO4000000K*Od01^NT05A>#0000004Mh($0002g000000001O1^@sO01E)v z6#)PM0000S000000000Q2LJ#P01E&}!~g&Q0002+00000000035&!@a01E)f7y$qP z00004004gg00000dV z01^NT08rEb0000008jt`000000F@E|01^NT09Y;o000000Ehqp0000004f>)01^NT z0Jwj40RR91007_s00000004CW000sI3jm-O0RR91000;O00000007Do000sI3jjzW z0RR91006WA00000003|k000sI3jo+U0RR910000100000008L`000sI3jmlH0RR91 z001xm00000003tP000sI3jnak00000004gk000000002$5&!@a01E*4f&l;k0002Q z00000000113jhES02Khx7X$zR00008000000000o82|th01E(6Zvg-R0000K00000 z0001>82|th01E)PZ~*`S0000m0RR910001x2LJ#P01E&($N&HU0001p00000004gg zMF#)?5&#PTIK}_~00000cmV(a00000tQG(O5&#PT_(K5z00000KmY&$00000^$7p~ z5&#PT(A)q3000007ytkO00000(h~px5&#PT=q~{P00000d;tIe00000Hq)$0001300000000188UO$i01E(kcmV(a0000u000000000!0RR9J01E&( zn*aa+0000q000000002z8UO$i01E)J$I~5&#PTz%2m)00000 zlmGw#00000I}rc?5&#PTU={%Y00000C;$Ke00000wGIFP5&#PT$ld?|000003;+NC z00000TMPgI5&#PTsPTUQ000000DJ)e000000L>Zz01^NT0PubR000000Q3O>00000 z0Q3a_01^NT0BE@Y000000IUE2000000I?1L01^NT04NRt0000000;m8000000NfP- z01^NT0BAn}0000006+i$000000R0dE01^NT0H75C0000004RR|00000006lV000sI z3jn|r0RR91003+N00000002-9000sI3jlZx0RR91000C400000002D{000sI3jpwf z0RR91000C400000001Nw000sI3jnA;0RR91002M$00000007$z000sI3jnbA00000 z003A300000004g^3;+NU01E)<5&-}J0000;000000002z2LJ#P01E&Z_W%F@0000e z000000002K5&!@a01E)vG64Vp0002E0{{R30002e761Se01E)fO921?0000)0{{R3 z000293jhET01E*4>;M1&000000RR91000256#xJd01JNrXh8u000000KmY&$00000 zdKLfx5&#PTs6qh%00000C;$Ke000006%qgd5&#PT&=>C8xG000001QY-O z5&#PT*gF9L00000WB>pF000000RaF25&#PTP!|CJ000001ONa400000RSf_D5&#PT zfD{1$004gg01yBG000000DKq#01^NT09auG000000CWKW000000HOl`01^NT0O-yD z0000002BZK0000008hi01^NT0QeOF0000005|{u00000 z0D2k#01^NT0N{H80000001N;C0000007Di401^NT0BAq~0000006+i$000000Jj?~c z0002=0RR910000E6951b01E&BBLM&a0002!000000000d6951b01E)}K01^NT z0I&c70000003-nb0000001_4e01^NT0O&sf0000006+i$0000003Zke01^NT07%OK z0000000aO40000005S;x01^NT065eD0000001N;C000000RIF401^NT0C>;<00000 z00062000000GAX101|%y3jnA+0RR91002M$00000002M<000pH6#%#g1ONa4000mW z00000004v)000sI3jpXq0RR91002M$00000007?#000sI3jpxw00000008U&00000 z003?f000sI3jmN40RR91001}u00000004>-000sI3jmNd0RVph0000)1ONa40002% z1^@sO01E(U!T;M1&0000W004gg00000ody5^5&#PTIKTh^00000Pyhe`00000P6_}35&#PTc;Em4 z00000m;nF)00000f(ift5&#PTnBo8c00000gaQBn000003;_TD5&#PTSQh~R00000 zKmY&$00000S_%LF5&#PTI0OLz00000FaiJo00000gam&801^NT08p*~000000Mr2h z00000009aB01^NT0O;HR0000005kyr000000E_|v01^NT0C1rI0000009*k800000 z07()601*Hc0Jts$0000001yBG000000Gb*A01^NT0PuSO000000FVFx000000PYn4 z01^NT04RSz0RR91002M$00000008h0000sI3ji1r0RR91001BW00000007w#000sI z3jjzJ0RR91000;O00000003MB000sI3jpw}00000000C40000000069000sI3jkQd z00000008g+00000007(t000sI3jiRw00000004hT000000001B6951b01E&}D**ri z0002+000000001^1pojN01E)9_W%F@0000e000000002V5C8xY01E&x6#)PM0000O z00000000236aWAc01E)v00000oB#j-000001Qq}Q5&#PTs6hb$00000KmY&$00000o)`cC z5&#PTNMiv200000v;Y7A00000sR;l85&(Y-00`Fr0000005k#s000000I&!E01^NT z02mYj0000000;m8000000Gbs501^NT05CEE000000Hgo_0000005T2$01*Hc0N57< z0000008sz{000000L=mb01^NT09X?N0000005|{u0000006Pr;01^NT0AT(A004gg z005W(00000005;B000sI3jmN50RR91001xm00000004ss000pH6##e#1ONa4000O8 z00000004>u000sI3jlbi00000002Y)00000006rf000sI3jhda0RR91004vl00000 z000ya000sI3jlBu0RR91000aC004gg0000B4FCWV01E&(`~Uy|0000q0RR910000@ z6951b01E(UF#!Mo0002U000000001G3;+NU01E&p^8f$<000130ssI20000R82|th z01E)vZUF!Q0001Z000000000I6#xJd01E(UJ^=s#0000$000000000B1pt2l5&#PT zn6dx>000001ONa400000We@-W5&#PTpb`N900000NB{r;00000F#`Yq5&#PT@TLF& z00000r~m)}00000!WjSn5&#PT@NxkF000006aWAK00000fENG&5dakc_$~wh00000 z0096100000H5LE>5&#PT=s|x00000006+i$000000I>l801^NT0N|Sd0000001N;C z000000P76^01^NT0B8mQ0000009XJ3000000OSM!01^NT09XeB0000001N;C00000 z06YW$01^NT09dR5000000F(d#000000Pza|01^NT0B8pR0000001STs00000004y( z000sI3jnYv0RR91005`}00000003VN000sI3jp~300000005i-00000007Sk000sI z3jjdb00000007(o00000008z4000sI3jn|d0RR91005W(00000008U^000sI3jkpI z00000007hg00000004ih7XSbf01E)vTmb+80000?000000001v7XSbf01E)nRsjG2 z0001B0ssI20000_6#xJd01E)9Kmh;%0000$000000000L8UO$i01E(!bpZeX0000i z0RR910001+5dZ)Z01E&}7XbhO0000800000000237ytkg01JNr0Av9G000002m$~A z00000a0CDV5&#PT_^bc`00000SOEY4000001O@;A5&#PT=#>Bf00000)Bpeg00000 zrxE}F5&#PTfO-J{00000U;zLC000002Mz!L5dakcFc<^?00000K>z>%00000K?(o> z5&#PTAQS-r004gg05|{u000000D}eq01^NT0PqX}000000DJ%d000000HO#001^NT z0LU5v000000I&c6000000Cfid01^NT0Jz5h000000Du4h000000GJOKaz0000a000000000l z4*&oX01E(E4*>uG0001_000000000O3;+NU01E)9?*IS*0002|0RR910000b4gdfV z02Kgu7z6+S000080RR910001G7ytkg01E&}VF7;t000003;+NC00000$_oGh5&#PT z;1>Y^00000v;Y7A00000%ohLv5&#PTC|v;n00000ga7~l00000Y!v_i5&#PTC_VuI z00000KmY&$00000RR9105&#PTK$!pl00000U;qFB00000LJI%@5&#PT*a-mu00000 zU;=*t0000002>Pc01^NT00`><0000002BZK0000002TxQ01^NT02t2z0000009*k8 z0000009ghA01^NT0LT>q0000001yBG000000A^NT0KgUj0000006+i$00000 z08t4501^NT07%pT0000000aO40000005N|B000sI3jm<900000005`}00000008R& z000sI3jjd(00000000aC00000006uO000sI3jm0~00000008g+00000005^9000pH z6aX**1ONa4007_v00000000~c000sI3jiSA00000005)_00000000*b000pH6###L z83X_T000000RR910001Q2mk;Q01E)1%m4rY0001Z0RR910002g6aWAc01E)9J^=s# z0000$000000001F82|th01E)9Zvg-R0000a000000000G7ytkg01E(sUjYCB0000C z000000001a5dZ)Z01E&B7XbhO004ggKmY&$00000>j(e<5&#PTc+vm>000002mt^9 z00000Xchnf5&#PTctZgI00000d;kCd00000aS{Lk5&#PTFdYE^00000@h~Us^FSMr?I+XhCprOmAmUb^u>rb97;2YhPn%YhPz|VRB>uV{C6@ zYhPz&baZKLWdKxZZDmAhW@Z3XZ*OczWpq$!WB^oeZ){U+Wo2-a5j7N(&NI7{rZjDn zBsDM}Ute%?Z)a{{ZDjyoS}`?DZ)Zbva%@R%V@z*nX=8Sie>Id9IZ1A5bVYV$Zgfm# zc4cfrc|~q^c9Rb_wWo$xuMQ(Pp@;MCwvk5zX1(TdWF0=GN!2vu} zZ*OcpY*T1WVRQgpY*T1$M05aiXl-P4090>p zY*T1$Lv(Dj$3pA@U1wi#OmAmGb!GrV>)Wo%_( zb7hkcMH?tWb!Jy`X>?@(OmAmKY;|*JLt$)eVqs%z08nyoV`X!5OmAmNZfSIrYegk3 zNp5L$OmAmKY;|*JRB3HxZ*_D+c|~q^07PtWXHsQwZ*p`=ZfSIBVQgu7Ws^rmC6kav zP=7>nc0+7wWo~p*Wo%_(b7cTTWp+?(Z)Z|vV|G(?VRCc;L~?dhWpi_BZ*EC$X>MtB07PZfSG?Pz-W!V`X!5OmAmKa%GeLMJyUjZ((Faa%pyD zazt!wMs;pubZKvHa{yItY_m*8*a1vTZ((Fob#8QNZDmAkYyeDeVPr&XY({l%V^e8v zXKZBvRc>r=VPsNuZggpFWkhUj08DRTWJGLiMs;pu0Ay@$aAj@)WNc$>Z*yg{cSxiJ z9dL4QXL4a}UukZ3Z)0I}X>V>{b7*gJbYE^^ZDo@hP0#@alfg|BlRi!clLAgWlRQos zlRQoivtUkI0+Vn~9+SFI3V#9{000mG001BW005VfmIeR-1pu`h00000000310RV&> z000mG001BW005VfmIeR-1OT-g00000000310RWmC000mG001BW005VfmIeR-0|2!f z00000000310RTT6000mG00000005VfmIeR-0syre00000007wW11A6g000031ONa4 z0000unE(I)0002+@dE$=000031ONa40002+n3HNzE`M;Q0RR91001xm1ONa400097 z00000004NV0RR91002M$1ONa40009700000004ld0RR91002+`1ONa40009700000 z004-l0RR91003YB1ONa40009700000005At0RR91003|R1ONa40009700000005Y# z0RR910Dl0000aO4000031ONa40001>rU3u|0000W0R#X5000031ONa40001}rU3u| z0000m0R#X5000031ONa400026rU3u|0000$0R#X5000031ONa40002ErU3u|0000` z0R#X5000031ONa40002MrU3u|0001B0R#X50Dk}g0|Wp700000$ff}R00000Z~+7W z000000|Wp700000(53+Z00000fB^&m000000|Wp700000*rovh00000kO2e$00000 z0|Wp700000=%xVx00000paBE`000000|Wp700000@TLI(00000umJ=B000000|Wp7 z0Dk}g0QjZ>000000KfqR0000000RU700000005@}000000MG#h0000000RU700000 z00^f6000000N?=x0000000RU70000001&4E000000Pq0>0000000RU70000003fFU z00000000660000000RU70000005GQk0Dk}g000mI1ONa40009700000002Oz0RR91 z001BY1ONa40009700000002m*0RR91001xo1ONa40009700000003C00RR91002M& z1ONa40009700000003yG0RR91002+|1ONa40009700000003~O0RR91003YD1b+Yk z000031ONa40001hrvU%}0001R0t5g6000031ONa40001prvU%}0001h0t5g600003 z1ONa40001xrvU%}0001x0t5g6000031ONa40001(rvU%}0001>0t5g6000031ONa4 z0001>rvU%}000260t5g6000031b+Yk00000sHXt{00000zybsS000000|Wp700000 zxTgUC00000&;kSi000000|Wp700000z^4HK00000-~t2y000000|Wp700000(5C?a z00000@B#z?000000|Wp700000*rx#i0000000RU7000000|Wp700000;D4t900000 z01yKN0000000RU7000000O+Ry0000003ZVd0000000RU7000000Qjc?0000005Agt z0000000RU70000000^i70000006+r-0000000RU70000001&7F0000008j%200000 z00RU70000002rtN000000DoWu1ONa40009700000001DU0RR91003|U1ONa400097 z00000001bc0RR91004jk1ONa40009700000001zk0RR910058!1ONa40009700000 z0020s0RR91005u^1ONa40009700000002O!0RR91006K91ONa40Dk}j1ONa40000; zr~v=~0002M0|Wp7000031ONa40000`r~v=~0002c0|Wp7000031ONa400013r~v=~ z0002s0|Wp7000031ONa40001Br~v=~0002+0|Wp7000031ONa40001Jr~v=~00000 z1Oxy8000031ONa40Dk}gaHs(Q000005CjAO000000|Wp700000c&GsY00000AOr*e z000000|Wp700000fT#fg00000Fa!hu000000|Wp700000h^PSo00000Km-H;00000 z0|Wp700000n5Y2&00000Py_@3000000|Wp700000pr`==0Dk}g0AK_J0000000RU7 z000000H~+|000000B{5Z0000000RU7000000I;Y5000000DuGp0000000RU700000 z0Jx|D000000FVR(0000000RU7000000KljL000000H6c}0000000RU7000000LZ8T z000000I&oE0Dk}g0009700000007Xa0RR91006)Q1ONa40009700000007{q0RR91 z007Vg1ONa40009700000008Ky0RR91007_w1ONa40009700000008i)0RR91008g= z1ONa40009700000008)?0RR91000mL1ONa4000970Dk}g00000#{mEU0000W1q1*9 z000031ONa40000O#{mEU0000m1q1*9000031ONa40000m#{mEU0000$1q1*900003 z1ONa40000;#{mEU0000`1q1*9000031ONa40001B#{mEU0001B1q1*9000031ONa4 z0001Z$A19;00000a0LVa000000|Wp700000kjDW400000fCU5q000000|Wp700000 zsK)^S00000kOc$)000000|Wp700000xW@qi00000palc~000000|Wp700000$j1Qy z00000umuDF000000|Wp700000*vA0?00000z<&h<0000000RU7000000Px2F00000 z0MG>l0000000RU70000000_td000000N@1#0000000RU70000004T@-000000PqC_ z0000000RU70000006@qA00000000IA0000000RU70000009eQY0000001yTQ00000 z0Dl7n00000003~v0RR91001Bc1ONa40009700000004l<0RR91007|e0{{R300032 z004dg0000000000008Lm0{{R300032005c*0000000000008*$0{{R300032006oG z0000000000000>O0{{R3000350058y0Fw_}36uC)3>wq`0000000000001ce0{{R3 z00035001)qlO9?YlT2C+4mke<00000009I503897PFfa|d|C_+NdE%>000000R#X5 zMFEq3S{9S6S_}?Y{{sL3000011ONc30h6v;78yzb0000000000003zJ0{{R300035 z005zr-C7q7YXJZN0000000000c#|<(7n2xU3>km{0000000000004;p0{{R300035 z006_2JzEzU76AYN0000000000nEwL+000000R#X5i<50z7aB(Z0000000000005}} z0{{R300035005r>lb%}^8I=J500000000000J#4H00000009I509upHTNfG600000 z000000002U{{sL3000011ONbMll@y48fyRm00000000000NDQn00000009I50CEA7 zE?gEFZvX%Q0000000000=>G!%000000R#X5bCX?M7k|nC0000000000008*^0{{R3 z00035004FX0000000000004;e0{{R300066000930000000000005Bm0{{R300066 z000C40000000000005Zu0{{R300066000F50000000000005x$0{{R300066004jh z000000Dk}g0001}^aB6@000021ONcS000000000000026^aB6@000021ONa`0RR91 z000000002E^aB6@000021ONaA00000000000002M^aB6@000021ONaA0RR9100000 z0002U^aB6@000021ONce00000000000002c^nU{Y000000t5g6SOEY40000000000 z*z^Mc000000t5g6Zvg-R0000000000;Pe9k000000t5g62LJ#70000000000==1{s z000000t5g6mH+?%0000000000@bm)!000000t5g6+5i9m0000000000`1At+00000 z0yG2w00;m80000000000008v^0000000IO600031000000000000{L10000000IO6 z0FIN5To)P%00000000000000O^#cF^000021ONb=0h7L577ZH#0000000000001bH zFpjo0000000000002n!0{{R300066 z003qIlTKY04SE0o000000000009cd3UKf*yT?`ox00000000000001J^#cF^00002 z1ONc2ldWAB8XN%t00000000000C@ES0000000IO600RM&-dz?N4*&oF0000000000 zi1h;i000000t5g6mjIIvUKSb<00000000000001(^#cF^000021ONag0h2yn78zp! z0000000000005}<0{{R300066004rMZC)1|5dZ)H0000000000xb*`7000000t5g6 z5&)B)UKSb?00000000000002U^#cF^000021ONa*0h7*N78wx%0000000000007wa z0{{R300066006O*{azOtTmb+80000000000==B2t000000t5g66q7Aq7a0`*00000 z00000008*)0{{R300066000(~U0)Xs2LS*800000000002$RuZ7n7J@3>qB)00000 z00000000>F0{{R300066003VBlfGXT8K?mO000000000004VkY0000000IO602!0* zUl$py00000000000000u_5%O_000021ONb*lO13elWbuO4M_F_0000000IO6034G| zU>6xw0RR910000000013_5%O_000021ONaXlYL+p8598k00000000000BH6D00000 z00IO60LzoDU>6yE0RR91000000001Z_5%O_000021ONaYligq!8lwOJ0000000000 z0EqSj0000000IO603QI84q+A=Z~*`S0000000000nDzqz000000t5g6O#zcWVHO$l z00000000000001}_5%O_000021ONaalWk!a8X*7x00000000000J!!80000000IO6 z0N4PNo?#XmA^-pY0000000000$o2yO000000t5g6egKosVHO&R00000000000002k z_5%O_000021ONad0F(Y<78xV}0000000000008Lr0{{R300066001SEEn*iLP5}S_ z0000000000`1S(;000000t5g6ER$Vg7a7C=0000000000000R00{{R300066001VF zjbay*++z$5821AJ000000t5g6T>+E6ViuDOWDEf)lW}7gllWo`8O;Cy0000000000 z066yp0000000IO60O6AzV;31H0RR91000000000;_X7X`000021ONc^lTBk68k7M5 z000000000009f|}0000000IO604V^Ieq$CIRRI710000000000X!ipE000000t5g6 z5&@I0V-}PAWDE{?_X7X`000021ONaz0h8Wi7LzPx3=W9*0{{R300066001rllMZAS z8khh800000000000GRg!0000000IO60H6VrK4cadcL4wZ0000000000sP_W^00000 z0t5g6gp+M#7aFqx0000000000006l60{{R300066004>rlb&Q28Y}<+0000000000 z0Lb?P0000000IO60H*+x&SVxDwg3PC0000000000*!Kef000000t5g6EtCCZ7a5%a z0000000000008Ls0{{R300066001tNEoB!P(f|Me0000000000`1b<<000000t5g6 ztpSr>Wfl!D000000000000008lW}GjlbB@;8bbj9000000000002ufK0000000IO6 z05<`XzGW5-IspIx0000000000D3j4<7nAs93=NF|00000000000020X0cRJJC}s>A zGynhq0000000000NcaN)000000t5g6odANN0RR910000000013_yYg{00002 z1ONcIlYM3w3;+NC0000000000Xp@m=7n8VV3>r580000000000004OS0{{R300066 z000C5lip?)8aMy|00000000000EqYl0000000IO602%?44rdk(#Q*>R0000000000 zn3ExB7n4Y53>oDB0000000000005}?0{{R30006600735ZD$u6d;tIe0000000000 zxcCDA000000t5g67Xg!=XBG{|00000000000002Uld)wNlh|hr8O#6x0000000000 z0ND5g0000000IO60E(0SXBQbe00000000000002!_yYg{000021ONb(lPzc$89)I5 z00000000000QmR=0000000IO60Hu>%Xcrkg000000000000008`2zp|000021ONbz zlZ|K>8bSd8000000000002uiL0000000IO6009A$zGxPc+-VFBDER{b000000t5g6 z=m3-MXcigY00000000000000u`2zp|000021ONa&lO1Un8b1I4000000000007&@* z0000000IO60IC3!PH7eyEdc-k0000000000Sos40000000t5g6@soXN7n5vi3=U}d z0{{R300066001EYldfqNlbmV{4tV(k0000000IO603HF8-f0$-%xVk{i1`Bm00000 z0t5g669JPBY8Dx)0RR91000000001(`2zp|000021ONa-lRat|lN@Ob4XF7800000 z00IO60G^X=Y8R7SYYYvz`2zp|000021ONc000000t5g6-jiKx7a2$Z z0000000000000R30{{R300066001YGjcXSg2>}2A000000000082SSM000000t5g6 zg8`GiYZeXF00000000000000elL2iPllW^48Il120000000000066*s0000000IO6 z0F{#+Y!@2r00000000000000;`U3y}000021ONaE0h3N_78#)c0000000000003C} z0{{R300066005PfeQXz#Y;6n)JZWbBV00000000000002^`U3y}000021ONb1lU;5X8ngfa00000 z0000000{d70000000IO605JiRj&2qjj{yJx000000000082bYN000000t5g62mzD6 zZWbAA0RR91000000000e`vU*~000021ONaulkIL785jWo0000000000066;t00000 z00IO608^74ZxI000000t5g6p8%7tZx$JI0RR9100000 z0001Z`vU*~000021ONa=lihC@8GZo(00000000000Eqho0000000IO60A-U6a2Fbj z00000000000001(`vU*~000021ONb30Fyp&78#QP0000000000005}_0{{R300066 z008onZEzPFi2(or0000000000xcdVD000000t5g6{Q#4ma26Vj00000000000002U z`vU*~000021ONb40F%ye78>mU0000000000007wg0{{R300066003zLlm2iPlU#8O z4e0v=0000000IO609TVOaTgj`00000000000002^`vU*~000021ONa90h3;F78#iU z0000000000000R50{{R300066003E&jd2$mS^xk5000000000082keO000000t5g6 zQ2~>_aTXa{00000000000000e`~v_0000021ONb>lkIUA4fy~70000000000063FD zau<^*ats<>00000000000000;`~v_0000021ONbd0h3O077ddC0000000000003B% z(Q+4)h;j@Wy8r+H0000000000X#4{J000000t5g6>65K;7aCsx0000000000004OW z0{{R300066004ObliqR`8jt`000000000000Eqkp0000000IO608#;y4s#Y6=Kufz z0000000000nEV3(000000t5g6q?0{!7a9x!0000000000005}`0{{R300066008v> zlWubs8h-!)00000000000J!`E0000000IO609OH%o^uu&s{sH20000000000$ovBU z000000t5g6VE~iPa~2J-00000000000002klTmaRlL&MS8a)9300000000000OpF0000000000`1}I^000000t5g6vXfnO7aHOK00000 z00000000R60{{R300066007_sla6#28D#(f000000000002uuP0000000IO60HTw< zbQc+B00000000000000e{R031000021ONbhlkIdD84Upd0000000000066^v00000 z00IO60QQp|br%}k00000000000000;{R031000021ONa&0h3O378xS}0000000000 z003D10{{R300066003Zm=y0000000000007AS z0{{R300066003!|&2|?VY5)KL0000000000*!=?l000000t5g6o0I)^7a9-&00000 z00000008Ly0{{R300066003+NlP-4_8V&&f00000000000Qmg_0000000IO60KWi} zUUwE6<^TWy00000000002>t^A000000t5g6qXCnScNUXOcnl2~{sRC2000021ONd0 zlf8Er8T$liYX=4oLn30000000IO60Br%2PIwlR40#LzSd)=?7n6v13>h*30000000000 z003zI0{{R300066002jmt#}t2a{vGU0000000000c>V(b000000t5g6b^(*#corIT z00000000000001p{sRC2000021ONc90h11S78#8I0000000000005Z&0{{R300066 z008%sJ$V-mumJ!70000000000sFT5Y7n68-3>tL+0000000000006lD0{{R300066 z005-{lb(4N8JGb800000000000LcCW0000000IO607R3`c^4T10RR91000000002k z{sRC2000021ONbcll^%Yll*xM4e0&@0000000IO60Ckstye*02Ep z@DY|m`4FNMkstye*02Ep@DZ9q`4GYskstye*02Ep@DZMWLirHl6p`4A!)kstye*02Ep z@DU0_`4B=Fkstye*02Ep@DUC}`4D0lkstye*02Ep@DUP2`4EB_kstye*02Ep@DUb6 z`4FNQkstye*02Ep@DUnA`4GYwkstye*02G80PqnWL-`Qm7?B_XAJ(t|0PqnaL-`N_ z8Id3YAJ(t|0PqneL-`OQ8Id3YAJ(t|0PqniL-`Ow8Id3YAJ(t|0PqnmL-`P58Id3Y zAJ(t|0PqnqL-`Pb8Id3YAJ(t|0PqnuL-`P*8Id3YAJ(t|0PqnyL-`QG8Id3YAJ(vc z0RZq3K12Bs;u(=30w30}0RZq3LPPlw0veGZ0w30}0RZq3Mnm}!A{vn(0w30}0RZq3 zN<;Y&LK=}E0w30}0RZq3PDA++Vj7Vk0w30}0RZq3QbYL=f*O$^0w30}0RZq3Rzvv^ zq8gDP0w30}0RZq3T0{8|!Wxkv0w30YumJ$@5ne<25aJqL-`Qm9FZUbAJ(t|0Pqo_L-`N_9g!dc zAJ(t|0Pqo}L-`OQ9g!dcAJ(t|0Pqp2L-`Ow9g!dcAJ(t|0Pqp6L-`P59g!dcAJ(t| z0PqpAL-`Pb9g!dcAJ(t|0PqpEL-`P*9g!dcAJ(t|0PqpIL-`QG9g!e60w30}0RZq3 zzC-yC;vJD70w30}0RZq3!bABG0v?ed0w30}0RZq3#zXlKA|8<-0w30}0RZr`^ndRO zf6x&|M)?pTGLawxAJ(t|0MHRiM)?pzGLawxAJ(t|0MHRmM)?q8GLawxAJ(t|0MHRq zM)?qeGLawxAJ(t|0MHRuM)?q;GLawxAJ(t|0MHRyM)?rJGLawxAJ(t|0MHR$M)?rp zGLawxAJ(t|0MHR)M)?o|Gm#(yAJ(t|e*n-CW=8oCA~TU70w30}0RYeuYDW1GLNk#d z0w30}0RYeuZbtbKVl$B-0w30}0RYeuaz^H%5W+N(AOauOumJ$j5uQf*5aKkE zAOauOumJ$j5u!%<5CS!kAOauOf3N`n&=ICa`4A#Ckstye*02Ep&=IOe`4B=ikstye z*02Ep&=Iai`4D0?kstye*02Ep&=Imm`4ECNkstye*02Ep&=Iyq`4FNtkstye*02Ep z&=I;u`4GZ2kstye*02Ep&=I~y`4HkYkstye*02Ep&=JB$`49p&kstyef7Y-80MHS} zM)?pTHjy9#AJ(t|0MHT2M)?pzHjy9#AJ(t|0MHT6M)?q8Hjy9#AJ(t|0MHTAM)?qe zHjy9#AJ(t|0MHTEM)?q;Hjy9#AJ(t|0MHTIM)?rJHjy9#AJ(t|0MHTMM)?rpHjy9# zAJ(t|0MHTQM)?o|H<2I$e;?Mc0RYeu=0^DtA~%sB0w30}0RYeu>PGnxLN}2h0w30} z0RYeu?ne0#VmFZ>0w30}0RYeu@<#a(f;W*M0w30}0RYeu_D1;-qBoHs0w30}0RYeu z`bPN>!Z(p10w30}0RYeu{zmx_;x~~X0w30}0RYeu0!R4}0yvQ%e*z!YumJ$j5e7&3 z5F$8{AOauOumJ$j5ei575JEVSAOauOumJ$j5e`TB5MnryAOauOumJ$j5fVrF5P~?7 zAOauOumJ$j5f(@J5TZDdAOauOumJ$j5gJGN5W+Z-AOauOumJ$j5gteR5aKwIAOauO zumJ$j5h6$V5CS=oe;@)M*02Ep&=Dp_`4A#Gkstye*02Ep&=D#}`4B=mkstye*02Ep z&=D?2`4D0`kstye*02Ep&=E36`4ECRkstye*02Ep&=EFA`4FNxkstye*02Ep&=ERE z`4GZ6kstye*02Ep&=EdI`4Hkckstye*02Ep&=EpM`49p+e~};pAJ(t|0MHReNBIyU zI*}j(AJ(t|0MHRiNBIy!I*}j(AJ(t|0MHRmNBIz9I*}j(AJ(t|0MHRqNBIzfI*}j( zAJ(t|0MHRuNBIz;&N0w30}0RYeuW=HuDB0G^F0w30}0RYeuYDf7HLOYQl0w30}0RYeu zZb$hLVmpx_0w30}0RYeua!2_Pf;*8Q0w30}0RYeuc1QUTqC1fw0w30}0RYeudPn&X z!aI>50w30}0RYeuenKszyScTApn3<{R04z^HTw-vjG6m^;-g|)BymH0f06+2_*oO0RVtf004l} zto#3$mH+_IApn3<004l}sQdqyb*4x8q6$E{!U6y_bEZf6U;;q-U;qHL&HMkCe*ysc zlmaS|f8YZYs?_0k=6hImjD2OQu9+m zsr6ezsr`GysK5hX)}aDG`G5i{k!k<`m*fKgDy0fQxq$!x(8CviQo^Dbks-i0k-z|e zQpNxOm*D^Z7at%4)%|^0kx2djm!Jaxe>JlU0QpJ&|Ce9^05zZi0MMcV0MG!O zNBKh7NBQ9c0;*~|0F?6s04hKO05u>0fKpoh|CfFG|Ch4?0Pyqt|Ce*(7?FAY|Ch}C z|CeCq0J%Q?|ChD=|CeAQ0l6UnfKp*T0J*aZK>7W9!l=LlU)KG5!l+^Z0PukT0FVQ! ze}Ga#fDe%&011)60Dw}P|Nj@({{NR9{QsBb0|2Vz0~0E<3qbk(d%~z<008iT005A~ zs(?~LfDe%&fC-Uc0Dw}C|Nj>sAOqF?d%das0|1fpQvs>e0RWKo69D;u0xFSe`u~@J z2mm#+0RYf-rbqdp0zmm72LQBWrbqb$e*l!y;{l-Z0sw$g;R6GzfB-=Gn)?5jpaB5T z0RVtf0LVxAHv9jVAOiq2lmGzGfdBx|g9?CBf}$9aA;2S%U;uzpZvX$6U;uzpfdBx| zBMg92f}$9aA;2b)X#fA0bEZf6U;;q-Km-7^Q2YOv6##$|e*ysc-~$yZ^%Fq(fAdp8 zsr`GysK5hX)}R3Z&>;YTQULfz`5F8FmmmZHHIxD>kpT#RQs4s=s&M=Nm-PbxDqsWv zG?V}U(BuOEDuDn1&_fG=Qi7rwks)9ukzfFTQcC~-m)!sV7XSd1(%=E0^8Wq*m!JXw z`GENUwG{w>65;><7nA@1(BuOEe=30h0MH`|fKq~@7?B|WBavVLfKsyo0MI%A|Ch`E z{}+@10MLN|0MNqB#~eMfKoC4|Chu6{}%&5fzkp1fKuTD1FC?6B#}${ z|CfRQ0MLN|0MMfgfKo!B7?B|$CXv7ZfKnv?|Chr5{}-SE0MG#RNBKYi1^~3z{{I(~ z(32RGxRVV5P|@Z%7GQbK?aks+WEk=c{Alp}v2 z{Qs991OPFB002}V0Dw~cd%>u{17Fq#{{NRv`~R2Z0|2Vy5`a>G0Dw|r008h|008hp zfDe%&fDw_w{{NRC0Dw~cd%>u{17Fr3AOqC^0Dw}!17Fttd%LM%008g+01}b?0|1fr zTLP)I0RYhRQvs=V008izqDT2606>5Fq9*|P0sw&0PW=CuAO-+6v*Q4fv;hF{;R6$@ zfB=9}l;RkXzyAZNw)_8=srmnxAOZk2-~$t?3i|(-9~=Ym4FmvGfEqygzyJWWCHnuD zfEqygKmY)=002MMMg0Gl;R6$@fB=9}ru+Ywnfd>hApZX}pZoup49Z0{H;|fYLydA(j(2ANv27zkdd)zkdg* zzXJj}oBIEk^aB8@002Pwzy<)cxaR|@c>n$Q*QsM&{s^J3|s^0zomw*mI z_`vu7wdMc+7k~;txiOQCmmdL5ldYE^6rxA@0RTYxp(g_Q0RVu~_ZtBD(39bp6B)1h z|Ce9@0Py2~fKuWE6RLp$fKmY96_LG@4VWfY@L>P|@Z|#+D&zweDh32VxuY6@QbK?a zks%-!k$?bzQm6g@mji%+(gFa0QsDyws^9|?s>1mHm-YhyDu4t4HDCY$@Z*1gQsM&> zs$l_uQUD+sk)D%@m?9_eVE_Q|0RYedAOMj83_!U*2mo;b1OQa? zQ$nfzdqJuH!vK-M17Fr3AOqFm+lA5}00HZu0RYedAOMj}1prh53_!U*1ORb>{{K`V z0Dw{f3;?+#`u~^Ye**w2XBtc?VE_QoV+ubX6fB=9}VgLZp zBOriMLVypE3;q9>&bK0So}S68is_mXTm%IP}7at%4)&D~Q(V+nVuwVcH&;WoNk^OrDsr`chks76}0Qq&|7?Ga>0Pz$6l$xU2K)Ip?05qWi0MH>I0FfXJK)D|X z0C7zL090Y(7?JfOK>71iL#coOfKvT?La8Aj0g?Yh0g=@C{}&%11J$+{0Qmp}01+Sz z0J$If|Cjay0IJFP|Cgd3Oex|60J)?NODW?E0J-E7e*n4V0{|-K0~D&{0~V@)0Dw~C zBY;w2007Wp007V-U?h=3fDe(b{QsA>7eM)8;uw+jBS87{Q$wkM0Dw~cdqSxpAOVs8 zLjjSs`TrLp0Dw|q0Dw}U0RYgV+5ou$6acxa`Tv)v8cZqV0{|+c3QH;C0|2?>3jn$0 z0~D%&e*l0|BO`!PVE_QoVgLZpAs`}=LVypElKlUd^&>#}Vd5B(fB=9}^HW2qAs_*f z{d+>G|3d+hn)&}1A0PwO|HA^&{d)tc{lfr}qiX=U{6_%!!VUnmw_^jT_yYhcpaB4| zvr_@7xoZTe^8*2@0Psiod;kE@wOazIx&Z*te}RJ=k@N!ss+N2}lUm~#kr@DhQoAbv z`Jn>`sS5xA6rcnI2SR5=423zPx?>%a#9RG>dV z`Sk-7DjNWRQWpR~`Jg*M`Tqg{@fQG>n&JZ$D&YeYs^9|{Dlq#0m;3_&Dwg~Im!AUw ze{l-^|5P6k0C9i>08}3=0CB(yLwcYC093O90MK*d7?I(lkWzGF0g+)ns&bVKfKp=K zK)Hi@cDO52p_?K6Wz(UG0UMzM05m@X0CAiHnHsZGL8-M{ zLaDc7L#erIM5((gK>6SU94h=rK>7W9e?zJN!vc}O17FslZveR&{QsArZ$PsAOqE6_y3oGe?XIS0sxC202J#f`2SS=0~spl|Nj>sAOqE^`2Uyo z0|2Tp`Tv*a0{|*w223gD0~V@e4ofNF69BoPYe2c>0~{*i8vwb00Dw~CBLKN#VE_Qo zVxv8~}h)(f|J! z004l}q5uCEA0PwO|APY2{d)qb{eu9J^HT$; z02Ch}1Jwxo|Cdkr|Ci(g0IKB!7Aj%@0MKCo0MJ7}fKoz$50N3jDUpBxfKtNy|ChEG zK>2f`laik5@E{};9w0Qn8~|CjxH zzo}pV0MGzHEs^~L0Fi+D|Ca#(fKtE!0JLEM0MH@8Es^{AlXjpXf6xHnEs^~L0FiL| z|CitZ0JO6U0QvI+0IGcY|Cb*e0CK?r093OJK>7W9!l4Eq0MPw=!l+;ne*n4U0{|*w007Wo007W~MSxO5fDe%&U@nos0Dw|g`~MgHd%me) z007Vc05FmL0|1fp3jp~=`u~@p006Y}0|2U0`u~?78~}0w0svH@0RYf50Dw~90~0DU z3;?+w`u~^Y0~0Fq3qbk(d%~z<007Wo007WqNPtp8fDe%&SimunzyN?!J^TL`-~$t? zLIHqM0RVu~5%>R>^8)~?9}oa>X#M|Gp#cEUAPfMx3;O?;ZwVgLZp zVE_Qoqep;JLVypEAz(0(zyN?!DU(H^OAgZa|CfLU0J&fSKso37lb)eY0bi5tp|Citc7OFV*|Cjdz0IEilk)oF}5cdC<;R6+_;sXGx-~$w@`1k*pp#J|g zpff=E;R72g`U4y)C;9)E;R66FUQu{R04z#`pi1{d>Wvl=%M_ zA0PwO{d>Nt{R04z^9um^FZchK^8)}X1@`}!fB=)yrxt07`Tv*T001=r0Duy^`Tv*m z3qbh*0Dw~cd%~!b`TrMA`Tv)d_WzgU0|2V!0~0D@007Wo007WJfDe%&fHje0VSrMA z0Dw{m`u~>z0Duyz`Tv*m3qbk(d%~yy0Dw}0`TrLmAOqFNlToNU0b!Gks3-y2leVZr zE#mh7mlXhjQsn~xD&_+eD&zwbDkEipQh)$}QeprA&|v@o&_aL@ks&}fk;{`MsZ2j2 z0g)f||Ca&Wh0-4Y0qgAf|5Ts>0MGywK)HVc0BdUb|5TJ>0g+*TK$C!aVYmPU01<$C zA-J`oumIpXk^OrBsr>^0k@Hglsj~q9ko8*vsbQi=`62*7`Jzt(`2qld z()l9*`FQ{Uu)OyFmmmRu05m4{|Ci(g04iYs0I-8{fKnoZ50N22JCT3@fKn6r|Cay& zfD*s|1*yOP2C24V0g=D|2dTgR2&te03Mu#V|Ceg?|CjUw0IHNW01@-^|CiwhfYRdw z0IFmF0I*^J0I-BWJdr|xJdxl77OH3X|CjXx04kpX0Bb-408}@B`2UxF_5YXj0|2T7 z_WzgW0{|*v006M%0~D&{0~IRcbAVDJgAb7*pgfUa0Dw~O`2Ux4qDT1v0D#h>PeS=& z0zmme00GK@2mrP7Q$eZqTSBS*dqAoABS888gA$Rz17Fs+V*{zUQvs2;LjjTE0~V^` z0~o5{0~e~`_WzfE^aB7Y00#gyx+ehnIfD<8dH?{hT0=jPwPOO20)T+h0RVtf-~$z^ z!Sw%^pi@A(0)T+h;R6+_LG}Na^aB8@9{>RH4FdpFv;qK&p8^%@>Hhyz^8*#C2LJ#R zKmkxUVGcmKAwU3;a{>X2e*zTi;{E?rUrsk$cs`5*NEm#+2ym*oQhDq#Qsu;c?2Dr0nj zQX+#7ks$zJK9OJmfKr45_0Qmp_fYQGk0QsPU0FeUr|Cb;E05x-> zlgg_if4BGlm;Utsm-GVws)Y6bm*oQhD&zwcs$u{DuweiI&?A6=QbK?aks*KpkzfFT zQg8VGm$VN+`RV`v7qkxm`RDWhm;HObsr>^0k@E`x`Lh84uyf)Vk#Y6^mti77xjz5^ zYoGuCRE75cm-P1kmuvO^mtZ0Rx$_G^`TcvsBB;OvU)KLa1JV6^0;&Cj0FnE12dP^m zKq*`VK>7JI0Qp&yjjR`cU?l)lvr_@7^8*5^q5%MqxE}!dwOazI^aB8@LhwiV^#cJa zU;qHHp#cD}0Dv2jA>bI1x+ehn_5&5FSO7rz`U3>2qelSw9|HjKBa}e-7yy`>KN$dV z;D?_YKS2O-02H4ZfAauw=?DN+zzzU3V;2Cl-~$ONC-nc9phEzE`8oCfm!v~L`Q`%v zD*OWg`C|Y8(B}gfs^tS4Dq}zck>Udvs^J3_s^9|@s+#lvm;3_&DxU`cF#sR{R6hd% zadQel`OOFbR5b?xQFj0U(6t5tk@vy|k#!Okiv$1w6dxc1)qesN>!1e!RNw;?s&VuG zmt+<|slfsOOXCB77pfuz08xO#Ad7QC0Fi?8|Cjs&04g5@05M_y|5QTv|CfpM|CjRv z0ICo5|Ci+h04ibt0I*>I0MO?H8!F}l7^>t06RJXh50N1N1(5@rfKq?}fKvGP|Ch5< zL8-M{LaDc7L#etaK>6SU94h%UK>7P~N2&dLLaF~l1ChXg17Fr3AOqDv2mmyr3IMd= z0|_eO^Z%FR0|2Vv0~;!&KLGjt^8c5hLjd{j^#7Oq0|5D?mq)4Q0{|*x007YE0~o4f zfB})>0~e~{0~M;^0~D%J^Z%Fp0{|+w|Nj@@0~D$?^Z%DZ4?(HH0su=y7eJ|^4+^QG zM?m?bLjsY1q!$UPfWjb)p!5Hi`~v_g9|Qm~;1~c@8~}h)x+g&Svr|E--~${gwOc}| zw_`)8`*TOB{d+>G`7=QI|3d?jzyn{_A0PwO;|~C|se=Hy#`OP}se?ed<^upK`~v{_ zV*miq=K~n3m;3_&`KfzAx#j}^Dy2g}`C|Y8(B}gfs$<{+k>Udv zs^J3_s^9|@s^kL+DiQPlm;3_&DtG_?7ok4@`Kf~dxs>z&m#Kq5xuHKm`QY^bH6#>V zDc}Qt2`VP&K)Ixs4JoE~0J($n|Ci?j0IJ{v8!D$i0QsNt|CgXc0Qr&h|Cji40jZ~l zN2%ch0QsbsO)20D0J)}jK)GW80MOzC7pi060g>SY6{_F^6sqy^|Cjs&04i7i{}-V@ z0Qsqd0J(JY|CgzQK)InmK=~l(|1~5OTq)px0|_c7=s>xo7Y-?=djPp(^Z%EiLjd`8 z^#7Oma{;NO7fvbQ3jn#MdqBBk007XZLqPdspaYTn0|2?=0~e~{0~M;^0~D&&^8c6o z0{|*J|Nj@EKLGiug8;c$^Z%ErgFv~VKS23__5U>_6kI8(lMN{*=s>yP0~;!(n+_>| zXY&7-phE!pS@i#x_;UfNsgq49;0pk`rJGJEV*miqp+i9VV_*W2;R68q;sY0|=K~2U z;R6+_-~$w@xbpv(`~v_g9{>Lrp+5lmse=HyJM;gSse?edp+7+Rpzr@RBotgJsS^$< zCg?!9rF#Imrh@>vKJ@>W_;UfNsS{3rDc}nLxuttRxnlqT(58bxxnm#%k^2Jxx#9yC zs^J3_s^QsM&>s(=81Qegl9&>;W?kr4N5|Cize z6RLxmfKp)q0MLK{fKnkK29XN)|Ch5? z|CfdH|CjRv0ICc0|Ci+h04ibt0I*>I0MO?H8!F}l7^>t06RJa;fKoz$50N1N2a)`f z4YFqvJM#aRq5%M~;R66F-~$t?La>vDvMN|X0f15g_zyW^007YA0|2T*UsAOqEsvlJ;g^#7N@001-~0Dw~SQ$eZyd%~!|17Fr3AOqFv^#7OP0~9LY z0~0D=lRdLI0iu(3vo-;HldZEi0RfZcvqOKN^8c5h3jj6p@&A{=2>>*-0RXUo;uw(u zFhRLL1ORbi002|~ltB5w2mrJ_^Z%FO0~jjc0~aa^^Z%D12mmx(^8c4W0RT0C;uw(t zltB5w4*;~_0~M;a@&A|M0{|-E0~M;T^Z%E?1^_jn0RWHy&`0?I06_Uh_Wzgm0{{jp zpaTezwUgzvBY#fw|Cjm$04m@E6{?@{|Cjj#0IJ^t0CBAz08~5j|CfLU05zgx1Cika z7ApJj|Cb*G05PBl092ssK)C=66cQf@KyhFN092IX7?FTJLAe1008u|UA90Bc08}3U z0CC_KpBjKZ0l8%L|Cg)s|Ci(g0IFgD0I*>I0MG{dfG<+z0~#uWUVu_UfDe%&KoOB( z0Dw{*_Wzg2^Z%D~qDT3m??U-t0zmm7007EB8I0MJ9DfKoz$ z50N2Y3Xy;SfKmwd|Cj9l{}|CgWh|CiVQ{}&%1 z1J#t`7=Mv~IzhPs1OQP#I3ICo4**mj0042|7oQq{Isv&P_5YV^^8c6Q0|2V!0~#t~ z006LI007WJfDe%&01%M|`hZfSU4T+x0Dw~6_5YWM^Z%E?|Nj@h@&A{g0su55@&A{g z001>s_5YWk!vc}4@c);B|Nj@@0~M+<@&A|O149@p;R66F;sX~d-~$z^)bjtAck}<3 zumArSA0PwO00aOLfIb1aa`XR}s{j8N(DMJ6`U3ze0sw$gK>&eLAOL_;Z6SU7Ahw7|Cb;DfKmYU|Cgu#{};OS|Ca&s|Ci(g0IKB! z8Y*G{0I*>I0I6?g{}%uR01<#X0l6FV|Cdw$ zfBzTc0{|*w006LI007YC0~#uWCxB8yfDe%&zzmUq0Dw|u_5YW`|Nj@C^#7OU@&A|Q z0|2V#0~#t~006LI006M$0~;#CCV)~xfDe%&Kp2sL0Dw|h_5YWx|Nj>N1OO2rGy%EH z|Nj?{^#7OE@&A|P0|2V!0~#t~006LIe*gf`BPM`SLVypEA>a#+=7XBme&wyDI?sxbXj%cl7_4 zzVZK;Z{};5^0jVGG|CjxH ztf~D20Fm^Z%D%0RT0$*FmXsqLZGyZZePk|ChAaL8;>Z{};5^0ja0&|CjxHtf~D20Fm^0k@E`x`I7+v@THp)si&U+`DLP$D!*bDfAasAKm-6awAVqYw%NtU;qG+ z0N^c={R04zKJx#U-~a%$vkL(E^8)~?O7j1g9~=O3fdBwhvkO4^{d>Zwzyn{_LIHqM z0RVu~-~$t?74QF-9|Qn#2><_7vkO4^qX7Vr{d>ZwU=RSg{}`TcvssK5hX*5CsZszL#PQUL&f()I5Dm-7Pvsvi&lap?a4RG|R? zkRS{Ix$p7+m*fKzDzghf`TcvssA2#BkYNCS0FVRAfKoz$50N2YFpWOr^8c4z z@Bf!L@Bfzo0Dw~cd%>u{17Fr)0Dw|r0059-006KA0Dw}1%z#otfDe%&U=)$r^8c3r z0DuzS|Nj?Y0Dw|r0059-006M$0|zR91I>U^LVypEAs`iz&+`A5004jz)c^k%{d>Nt z{R04z^HTw-^8*1Y^aB8@PwxMhQvm=pe+B??VE_PBpaB4o6f^_*ZSen>004ke^HV{o z{d>Zwzyn{_paB4o0N@yr6qEq@6#xIUApn3<0Wbl%V(|Z$004ke^HV{o{d>Y9sK5hX z)*m1P)%|^0k%0gJw4wn3kU|hZxgr1nxk8g+!DD~v?*EtI0~IRu6F~X%Q$eZy zd&8)}17Fr3AOqE4005BW0|F`)0Duw#fDe&j006K9)__tW;1-d90Dw}0^8c6W|Nj@E z0RWKT0~0DU3;?-R@&A|L0~IRu6F~X%Q$eZyd&8)}17Fr+0059-006M$0~0Ev)__t% zfDe%&z!xWxfB=9}6##$|Z}R__-v9p>A0PwORPg^7A0PwOQt|&6A0PwOAnucD!Zs43 z@Bf$md&Q{017Fr3AOqEI?vt{@BLSk5F1|(*EAjuAfB^tCwAVqYwiiJ8bE1^0k@E`x`I7+v@THp)si&U+`DLP$D!*bD64aFY4B{0Qmv{fYQ_g0JNc_0Fi(IfKs^b|Cc}m05zb8LaAb#hSGxuK>5P3 zK>1+;K>0xc0m`8P0JW2p#S=ZSVE_QIBLjd^LVy^NA;1}thw=ZH004lJ@c;i80PX*m zA0PwO02l;Q00aP09~1y_05qQ(zyn{_A0PwOliPv0 zKJNdQ^aB7YZ1De=v+e(vy!S+E&{3TlP<^>0^9GCM#wTI!tVc<^8)}XfC2zD z-~vFoLh%2W<^TT|4DkP#Q|Kpv69@c)-U1AtQG z|Nj^K@Bf!W?f;kc0|2VY?UUZf7AhkSfKoz$7?B|$9g%K`q0c$7&092p@0g)d}AGqxA|CjUw04g5@05M<#092qiK)C|| zfKmd0fKuTD6{`5{|CjUw04g8@05!D}K>2f`lhVmEf1ogt6aat{W$^!(wG%-3@BjZ7 zU;qG+0KhSkfb0L4wG%-3>i_>2U;qG+6aat{0N^r_dh7p}wG%-3Ks{R04z^HTw-lK}wG^aB8@VxmX+LjXYeqZa`A z0|0>1lZ46~Dy;ASmmdTGF#rJoRG=q7xdQ-zQUU^kQsDy>s?qKLmmdTGG2j!QT63b4 z%E~8ypfHgu@c)+p0Duzn|Nj?Y0058xATW_e>;IPk0Duzi|Nj@O>;IR40RYedlt=l% z17Fr3AOqEc?*A7bAOqF?d%das0|1fp3jq0(0RYgTlK_!(qDT2*0zmnp7XbMI0D#he z@Bfz{1OPFh7eM)80zmlz00GLO006b~3qbjkaLWujk?a4L{d>Ks{R04z^9um^lK}wG zV4_F)zb62>0RTYxp%(!80RVu~-|YXFzySa>ppyWRAn*T|bE1>B%PI;8>;ISX|Nj?u z>yzrsHCNEH3jq1^0|2Vl&PVwGiGb3#8vyy&0zmnm?*Etd0|2T(3jnkOiGb1q0Dw~A z0|Tl6>;ISH0~4y@2Y}Mx0~M+`>;ISGe}K~S0{|-G0~D&D0s)aT>yviOJ`Q6bfKoz0 zGm#fB=9}VgLY;LO?T-w(tL!v=2b}AOL_;{d>fyzyn{_ zA0PwOgCBrWfB=9}VgLY;VE_P-LO?T-Az(C-s_*}oAOL_;{d>fyzyn{_qaT1$fB=9} zVgLY;VE_P-LO?T-ApkXzpzr^ev=2b}AOL_;{d>fyzyn{_lVr^mRX{V5A;37110sM@ zfB=9}lJEbQ-~$z^eCz+0zyN?!w;MqDvr|E-wOc}|%K!ft#_s=@4ebAyys+ZUK+zDfKq?}fKp-r0FYq-0FXjJ zGm#;HJdsTAlcLTl0^#eE&dw?s<0ODmfB=9}VgLY;VE_P-LO?T-A)qypFq09_BMk#3 zfKoz0Gm#-6JCP)lLC+U|Vcx zzyn{_gCu}bfB=9}VgLY;VE_P-LO?T-A%HrO4)6b$wG%-3AOL_;{d>cxzyn{_A0PwO zMDG8Wi|hZFcca^6rxg&?kQa9+5Zf|Cb*G05PBh092I$0MMY$NBIB%K>6SR0JO6U0QnOj zLix(-|Citc6RNZS|FyFVK>2_I9+Bnk|Cb*G05MGUmU)CQW1Jwxb|Cdke|Ci(g0IGvLfKosAOqF>?f;iU>;ISJ0|2UHJAhJR0059d008hpKr@jcfDw^^ z0Dw}r?vwV=PZa_lk@D;RmmdTGF#r$%RDb~h&;ZPnTG1X_3hn=wVE_L$Vgdn?;R6;b z-~$z^?dkuQ^8)}XpbP*s%I*J`bN~R55bOV!ltMF+fyzyn{_-~t|za{_=;RPFzl z9|Qm~A^HDQf$jg7$m;)>qd|aDVgLY;L~?006M{0|2VQ0s@iX0~4z5=>L~@006M~0|2Vw zA_9@%0~4z0=>M1V0|2Vk006Y%0~D&@0~M-V?EjY`0Dw|}0su7F006YX0s@iX0~o4S z?EjY`0Dw}!0RS`s0Dw}H0RRiM;UWT&-~$t?PwbOs)ILVzLx56%0Dw|r0059-006K; zKr@jc-~f@u?f;hn0Dw~90|6@id&H=~17FsoLx56%0Dw|r0059-006K;Kr@jcKmd`t z?UU-%Mh9a60I*{~0+Tn@Jw5|QfKq?}fKp-r0FeCy0FgpKGm&8c0I(rI0FjXG|Ca** zfKuQC11kM{!KlCkU)CQW1J&UJ0IJ{tfKn{!ld{z|PBQHOm-7Pvs=x*Sv?Bn5(gFa0 zQsDyws+8#em%jo4@n8S|umE5Jks<(sQsDy=ssI3#nicB*mp}smHQ)mis>P|kU~H+ks;s(kt0ojQh)$}QZMcQm*4{v zsuKW!(irLgm$cVGsmK5S7at%4)j|LO@L>P|kONA9QbIs8ks$yBk$?bzQWF4x(jo2t zm%ac07at&#R@WYX&+Gq}zy$!bvkL(EBLITZ0sw$g;R6Gz^8)~?FzEl6)Bpgq0sw+i z;R6$@-~$w@Z|MJ*Km!0ZwG#mO0sw+i;R6$@-~$w@CG7u~^#cH^U>#HvkO3z^VcCl+vxw7KmdSJvkO4^ z^bbJ!_7_0;{d>cxzyn{_B<%l}Yw7=&z^oLO?T-As{%B z-|UlC*diWb0059eKr@jcpazjc9Dq`Q0Dw}_?EjbG0~4yd=#!S%Djs0~0FXjJGm#;{ z2a!V^fKq?}fKtEg|Citc6{@M|lit{|6tMRj0QvU=1FD3gNBQ#u0IHx6P?MwCEf#R- z|Ciwd7OJBI0g>VZ6)NBZ6RKD1ljhkX0?y}?3ffKrzUPx#+EOy9=l_?${{J;X008h| z005BW0~V?wfC-U9Er3!&Kr@km0Dw|J?EjazA3*tG0)SGKqLbd*Is)?RlN#F^MT6)6 zmjM8PQu6}P|kb^6L zQXyankwQQ-k$?bzQWleW+Z;)N3Xy{@fKoz0Gm(G*fKm(W|Ca#*fKs?0K>5r6{})5+ z|CfsB|Ci+h0IK8z6DmRg0PtY|0FXm3fKnkq3z0%VGm(G*fKvVIliu4J0&?e*`r9E3 zu=isFskdVSk)xuMCfqGs(7*o(sdEAzktX6mjZ}@ z(g6T~Qs4s?s>J92mtZnLxxXs_`M>}GHQ)mis{85xmjDw0H6Q?hQeZLwx#0sCs-Qyw zk?rb}uG|)Cuj~Jp^8)}XAP@LAxoZTeSO5U)UbpD~m-7PvD&YG6G|cM%mlNp!m*oQh zs^S9}s^J3~s(=81Qse^@DuXwGQi$vSm!c~``Qe9vQs4s=s{HBym%#b|wV*3N`4a$u zQosfPv^(ejm$_?1so(<>Dz&1M;@vKPtm^-l^XLDUIm*xWis^tR{Du4ihQbGU# z@L>P|kmLgxs$(>OQbIs8ks;s>kv;4Gm$LzYQlKk9`JngzwDsoym%IP}7at%4)pF|p zmvaVyQn=^;m*fKgs$(*MQbGWK0PtY|0FXjJGm#-643U5UfKn{$|Cg@+{};Jy1gWLw z|CjxHy{TgW0FYt;0FYx~0+Ia#0Fm=k0jZP$0I)&;5|IG{g3^PiE6RLFq05s6(|CfN4NBM5&|Cb+=edQYp9_as< zfR;!3WapE545m0JIYzLiye1 z|Citc6RNZS|Fx9PNBQ6b7OI~D0QpDf|CjRv04ksY9+C9v|Cb*G05QM-094=u6Dspl zL8GUmU)CJz|CeLu|Ci(g0IH)pfKq?}fKoyL0Ps#h008hpKr@jcAU=`T z>i?JE0~0FqQ$eZqTSBS*d&8)}17Fr3AOqD5>i?Hh=l_@F0|2T+Ie=0^008ho007WJ zKr@jcfDw^^0Dw}*>XY{1svcugfKq?}fKvSf0Fgof0PsRUGm&8c0FWUd5|NealfK~| ze>LX+mw*5OH2r(QsK5hX)*m1P)d=SQmmmQEHT`?Rs6qe$@L>P|kYiPVQbIs8ks%-y zk-z|eQh)0I7yWy|s9*p9kN_YPk&os77yWy`sr>^0k*Mkam;HOesB-837at%4)%|^0k@OD$`3>g(m!Jp$HM0u<`I7+v2e9-504j6hlTzX$0UMKb;vau|<^Pv;0zmmG z>Hn8u005BU0|2Ta02PtrB7joh0~M;n=l_>>0zmoo0{|)@>HnAF0|2VlVg!+400597 zKoybWB7joh0~V^e=l_>>0zmn-0stx&>HnAF0|2Vl!U&P$B7jn1005BS0~V?wfEAIi z=l_?r0sty?0RS{$0ze1(paB51CgzhG<5DVM0058xz!i}p;79o(06_Ua008j-0D+po z17FsG0RXUI0058xU>1=f;FFf)9tvOp0FVFx7m*?0lfL6C4dO@nU;;q-U;qHL@8y&B z<1YcClP2Ug4rS^8mw*8PG_(&u`E#O^YUDNoaOabvZ)<&y^GP7Md;|Cay)05#wP6sic5VdWPB{^XN-L~s0RS~x=>L}>0RXfC06;n6001<`=#%Q@X91#}=zyn{_(B%J@A0PwO{d=*gVE_P-{R04z^9um^ zlK}v*_7?#8cA`i5A)G>!*XA7pqvVtJ<{|>}=aU-e8eUW6|ChLf1CihZ6{@!@0Qt0A z0J)Cj|Cjj#0IJ{v7^;ip|Ci$f04k(I0g>VZ7^>j|87kld6{>yc|ChiA05$LC|Ca(Z zfKovSfKm|Y|CjUw04e|n_&31-fKtfklal8qe|qNsm!g9Kk>LXXssI3hQj+KYm%st| zH=#>F`QU$_(%=E0@;?9o>n#BQRNw;_s=DR>mr3ORmjNVzQvc}xm*E2!s^9|^s#xa# zm*4{zs_NwbmjMBQQs4s=DyQfFm*4{wDrxBdm&E4(m#gLfm$@TA`Lx$TskdW8sdJ)7 zlQ-xs0$1phX6PRHBp5*Y{R04z^%DU3l>q>d^HTw-^aB8@prVu7=u0*JzY+p8fKnldfKuQC6sn`> z|CjRv04e|j_%?U|05vP+|Chi405rDWLaDgtL#etaK>4+zlUnIA0*&aCj_Dc>TI2th z-~j+N-~$$_^W&4Y=^`@v=l_@B0~D%|=Kq(`|Nj@l=l_=p<^PxA0|2T(008g+Kp2sA z=>M1C0~0C}0Duzi=aUNRCNl8j|Cb*i1J(aS8PWZF0IB^00FkHz7?JY}0Qr*v0FVL= z0;vQQ1F5DX1No;X1Nma2lVa*}0z2fB*6KD+&FBA@-~$w@o8$kN;sX?_;{yPy0f2zg z-~$z^*5v<}^aB7YfC2zCZ|DD)w&eep;sXGxLjVBq;R6&ZVBi>$A?W{?wAVqYw%nZ|&FI|Ciwd6{_L`0IJ{v z7b>RaliKSZ0&3-x`s*?R;pUSr>>htr=Kq)U0{|+Z{Qo!L0~M-CZ z>{bG!=94Dv9)B0+|Cjay04iVr_%^l|K>6PP{}64I{};9w0Qnx{|Cb*i1J(U|zN!5K0Fm=k0jcx@0IKr?0jgpGK>6SU6sq9^0;-|_ z0JIC{|Cay(05o$#0I?ze|5N|~fYS3*L8<+F!l=LlUqjX(AOqFl0~4zAQ$eZyd%~!| z17FtS0{|)j0Dw|o@Bneh002~j0RWI70Dw|s)JOSZ&?1XL3;?;n17FsG0RWI8)RV65 zEq{<9)JORs&>)Kd3_!WS17Fr3AOqDP0Dw}!17FsF0RWHy)JOTi17Fr3AOqEa006Xs z0RWI8)JOTn|Nj@j17Fr3AOqEa006Xs0RXTefB=!d|Nj@j17Fr3AOqDO@Bne>002~h z0RWHye0-B3)JORsXkhrj17Fr3AOqC^0FyfJ6#=l5Q12rgumF5~lOccrksxSb_`m~S z)*m1P)j$9M@BpA0laKEtKajMe0Fh&&NBQ6b6slwZK>4H>0{H|0fYKl1|Cc}j0Py2~ zfKuWE6skc2fKmX!G?6Xm|CfLP05kvqfKqd!lk)E>0Uwhc@D~#8;{TT)1ORd17oQsJ z|Nj?_;*&=3X91v-uJ9uP>9f-CDFFoVApj(k|M8Lmpp%mFVgljellJl=1-Rw^mmiZI z^EM9fK>z^oLLfDfA%HiLN|RypA`b9D008hpAT^O8;5Lyylacct67WF)0Pq78fKoyr zHIX4eHjy%u!Sf^mqLb$HCJNi-|Cc}k05o%=lNR(=0^jA6ZuB01*X940^#cGZAOiR| zwOc}|-v9p>Hs$}9ed7O@<^uq#z^ogBXBPLLfDfA>cQWfB=9} z@8$oOwOc}|(EtAzDCPf`Z{q)#<^uq#z^oF^0k@HglsgnT!kn{rps$im%9`zj?@Ie3o@PiwGQbHg#ks$y% zk$?bzQqJZ7mvf?%R`oW2d*%O^ApZX~p$kSSwG#mO-~$w@^5XxO^#cGZfB*nBwG%-3 z>Hq&1A0PwO@Z|rOH{$=7<^uq#z^osd*{d>KsApn3<{R04z^HTw-lK}t+kn{rps$im%680h* zQRV-a9|Qn#fdqh3XaE3IU;zL$0RVtfbE1<@_9_-^<^PwV1b|Xt{{J-r0Dw~Q|Nj^J z;FFH_8v&w|s`e%UDwEOn908(}?)EA!oZ$bLVE+F$pag(Y^8f!A)ZqV@A0PwOfCB)u z{d>Nt{R04z^9um^^8)~?IpC8<_aZ<%;QyDP001=L0~4w(;QyB)1AtPX{{J-rJ%Cc+ z0~4w|<^Px90~4zA3qbk(d%~!!;QtpNAOqDQ0Dw~90|6?Np!XaR8Q}ky0096s;Q)YA z-~$t?EtAdn7g=ZI|CjRv04hNL|1@Ca|CguX|Cize0IJ~w6DsoyK>7W9!l*+40MK9{ zJCP9O{}&%11J%F-U)CQW1JzXI|CgKL|CjR%K>7W9!l*(30MOwB0IEQMI*|pFJ@_kT zV&wmq$>RT);R6#YU;+TN0uVs?-~$w@T;Ttg^8)}X69fP?z^IBQAha zLLfDfA;3P7zyN?!_T>MUv=2b}-~$sX^9w-v{d>Zwzyn{_VgLZp-~$w@Lf{9HN#K*t z_!@shEPzr%AT^O8pgfU)0Dw{x0DuzVnsAOn+U`5pz}Kas}YlZyEsGVtU`2YZb60=i5sh|J<7w_W# zmp0)4m*)cjs^$X~D&+$clgIrL4x>GQQbHg#ks-hck*DL6=KVGj`QHDR;sXGx;R6$@ z-~$w@;oy@l{x%us-v5_j0RZsh0|2Vx0~4wsz(@HZ2!PTdlYRatYO~`1m-^rTm-GVw zs(|4Cm*xWiD&zwbD&+$dszLw&&_MtI@B=}BQbHg#ks$yIk$?bzQWF4x5^3ZAm$VN+ z`RV`v7hnMZ@BqL^`M?8T)*m1P)nEYt@Bp+&`M?8T)*m1P)nEYt@BoaH4*wb+a{&PG zbO8YHV4_F)0RTYxp(g_Q0RVu~^%DU3mBN!o{~jtg-~X3D008jge}Gcr0~V@*0f15f zzzdN@;ggR47D-p$|Cb^Ff>PlF7pmX`7OLCd|Cay=05t;$ zfYRav7pfr$fYRUt6{^kO|Cc}s05#wP7pfND|ChHLK>3uyNBN-ufKuQC6sjeY?f(}V zgGhiz^ogG7K*LLfDfA>ay;0OS9c004ke<^TT|A0PwOfB=9} zK>z^o@}b>IJ&R;_mjD2O61Lw$slEUI7ZKwB zmssEbm*fKgs^dj~Qh)$}QbGU#&_MtIutGH;HIX48IFZca|Cay&fD*RfLaDI-{};C3 z0;xmZ|Cj$m5z+m70IB^00Fm=k0jYBV0Pta=mwo{O7ZHv}`2YYwIiLXmG!y`UQgfo0 zrU3yhf6%ty0;$4(fYJc~fKp>%QhA^&0g0g3?pr|Cjdz0IC2C0JI|jg3PlF z7pmX`6skGj|CfLO05r?t|Cb*K0C52c091AXQ~>bfAAr*00~e|RAb`@qwnzCV-~X5N z0{|*u1pqbR0~e}q-v5`@wnzEk0~V^h-~X4Ojz{@L008hq008i%fD)190~V@dpc0Yd zAb`>VA%N2F-It~U0U>|z_hJu`LjVBKz_v&ELJ9w%yR|Citb0E-_$GwVnK094`w6sq9^ z6RO|?7OHaH|CgWv05#$R6{_I_6RO|?7OI8amp%jmQ(fLi`G5jIIluq_G+^FG`SVjj zsr`GxsK5hX)*m1P)!^L!m*N8eszCq%@B<2fQX!xdk>CRqs$Sp!mw^8NG{XddQh)$} zQbGU#&_MtI@IoLpks-hnk>=t5mtfwP?gRlM5kLU|wEcU(sR00h()|Mfkwlk42mvbr zC6{do0Z#$Jm%Ru9I02%U_6Pwp0iu^G2>~tv7MEEG0V4sWmwgEVKNXqW|CfLYK)K-q z7OJ2#0J-1;6{@_K(Fp-F9pl{pm;HOasr>^0k@Hglsq_N@s`V29`Sk+;s$7>L3IR_Q z6yN`sApZX}699k`^8f!A-~$z^6PJYw0YVGV_TK`j_X7Z`fTEYu3IReHL)`zD-~$t? z&D{T&00aOv;R6<`-~$t?A(t%+0T&+c-v5{N0{|-E0~4yS-T#-r0swKd3qbky1D{%y zqL*R|0V)BZmyHVnBLR4qu?qnn3()o#0QvR<0IGnZm)Z*fHUR{e5exwrGOFGGmtX?` zHKGncso?_?s^9|_smq7at%4)qt153;`Db&X?H?0W$$bmkkX8 z8zFY!{}&%11J#h+{}&%11J%ad{}&%11JwxM{}&%11J!z$Neuy`0nwL12muq84gnwm z{FfmP0XG50mst)0D*&35&Pv0(B1!+7u)}r;R67w zLI42JKp+;8gWi|s1_43>>e-hn2LT!?*V+G^0k@VLAsdNDV(3Pe~`J@6s`D6zGw6hBU`SSw-Dz+B@`St?RN?=VQsV;?D&Ya3av=Z!OX33-f2tvTK$Cz#Ad80E|Citd z05z1RNBOoFK>2|RK)Evj6AP0H0J*aZK>767L8<+F#HhdnU)CQW1J&XK04m`F1gf_i z0QrCc0FVFxDv_<-|Cgl-K)Lq=04hQN0MJ1I0MO(E04n1jfKo!B7?B}>8IiyMfKmtE z|Cbj605zZje?a*I0F=_>0if~%0Dw~A0|TnR3IMqu+5eZf8$kJ>3P8D}0zmoF|Nj@W z3qbj{7eM&{0DuzR|Nj@Z-$JPX0Duy+3qbkV|Nj>N0Duy}17Fttd$g(j0|1frTLP(d z0RYhR3jq1^0|2U^qDT4n8vywM06_VnUjX?50D#hDe*!?c{4W6cpZoxE$PWNifP6rc zmH_~e00;n!!U8IhLJR>d z`~w-P01*JRxE}!dxB~-`y6*(3yMqFeK>z^IKmY*HAwV3F001446aauyR{#LengIZ? zpa%!3f4@@z`5(_6xIhCgkpy>vQUQQSIpF{RGz26)x#I&Es^SA1s^9||szKWSmp}*rHKbEO`K6ygxg-Ywv;_wMG@&~{`QQTts-=ej zxxxg1(xgWK`S#lXm!*e5xui!x`Jf2^H9-IXf6(JBfKuY37?B~M9g%1(*0JMqO|Chg00QtY40J)zI0CB35&|Ch86K>4`mL#ev&M5*8d z9IA5K|CiPRDv@OZK)JvGfKuY37?D8$0MO(E1u7xn9+9PdK$8P6fKsIh1&hTF1&hC! ze+2o3-T#-hqDT1v0D#h>UqJbx0zmme00GK@2mrP9TSBSx3qbkz8$kK|FF^VId(5c7 z17FsmKS24TEPzttq8O2Y0Dw|--T#-=|Nj@{0{|-HD}Yksq8O2Y0Dw|K007V-fE$r) z-T#+C11^z30f15=fJiw%0042629z3Jf7<_-%m4ouR{#LeK>z^I!zqAL;-VOlAwU|D zfB=9}8~}jQUETkemH_~e$p8Nrq5}hw;R6J!zyd0fMT{P}j@z^IgDrqk;-VOlAs`-+fB=9}Ox^#NtpEQPbOJ!Rh1~y_%-R2!R{#Lef8_%J zs^kL{DnS4M(4#7VQsSZ*ks-huk$?bzQa|1Qm$VN+`MD!N`KkZ^7r5sGsk-k3sk9FO z`Lx*ommeSl)%|^0k@Z^wsa6yK5!QY{llbQYsq+f}`S}9?s`Uc_svrP>(%}OZ zs#p*J5%U8ADp}b7mmm%Rw1EKte~`2f0QvL-0IK2x6{;kpO}XO(6DkBm0JvcW01<=Q z|CjXx04ksY05n+K|CgWH|Ci(g0IEU&0I)#-0I))!7?B}BAd$l*fKq?}fKn6P|Citc z6srB$|Ch86K>6SU6)N)!K>77sLaF%YL#h3H#i+moU)CQW1J(5d7%I0Le*pO)0D#h= zW&n{i*#DRJ0|2Uk1pu@kC;)kY0Dw~C0~M;^3IKWH0~D&^0~V^_VgQl&VgV`t5&*ee z+W(jJ0{|+30su8)asUzG0~e~`0~xAD-2a#L0{|)j1^_kS0~D&-*#DQ{0~e~;*#DQ{ z0~IQ?4?y|18$kK>TSBS$f9FG~^9w-v{d>izzyn{_A0PwO9^C(zW!e9iz@-LnnYzLZBFtA>bg9fB=9}*xdh@=>Pv065Rin6##$|SlR!Vz@-LZBFtA)p_TLneSyfB=9}%-sK%-~$w@w%Grd-~$yZv=2b}f43Vz`Sn{usrctZ zsq+g!`TcvvsK5hX)*m1P)#L*JDnbANut5L-u%jn{QbM2@ks*K~k$?bzQn}p!m&yPC z7xUZymlXhj5;@ubm*fKgszW7!QbGU#ut5L-utK01ks+WTk$?bzQmx$om!1Fr7at%4 z)%|^0k@Hglf2nf;0I+kWNBLm_K>48r0JLHXK)DnEfKs9W05#wP6e{ymL8<+F z!>GUmU)Hq~0Qq160Pp|+Dv>qZ|CeP7K)Lk;04hQN0I)#-0I=i(04f6`fKo!B7?B}B zB9XuVfKrd#|CiMP05xC&K=}gzl+xn?pz;C$fKuTD1FC-te*n3s*Z-Hb6F~Xk0~9Lr zQ$eZyd&8)}17Fr3AOqF46F~VC0DuzS|Nj>sAOqF?d%mea006K6U?-9N0|1fq4*>aV z+y9qT006K60RXhK3jq04006M_0|2UZ+y9pz8~}3R0RU993qbjT0RWHy`hZeF006M$ z0{|*Oqz$=(f1((X^bbJ!gARaF{d>ZwA>b*IzyN?!V%+~1A0PwO0RVu~LIHqM-~$t? zHQ4``0pyobK>(m~9{>SsX#oIKz@-0}g;vf}$9aA>b&HfB=9}Qr!QSvkO4^ zz@-K%@=1f}$9afAkMP`J)YhQvG|vs3G7dk-z|e zQbXMT7yWy@sr>^0k@HglsdE7UuwkM{`M);+xgr2S`Jx{H`2qld()AMn`IP|xkhB6W zk$|E|IiLXmG!y`UQgfn5`QQT-Dxx1i`C$S;`9S~y%HatBwewR!sr3^;`TcvusK5hX z)*m1Pf7KuWfKq^>2sv`w|Ce9`05zb~0FiUo|Chi705zaDK)D|T0C8dc|5Tv>fKq_M z2$AU5|Cb;I05#6n|Cita05xC%0I&e;NBOSU|CjUw04iVw05vz@-RsaC7!x4Z|As{S~U;uzp!lD?F;oJY0*#G|*wif{TTG#)VA0PwO z{d>5n{R04z^;-g|bpZgd^HTw-a{&PGp`u6ma{?-nbAlw1A^<@7qCWun0sw&0_ZtBD z7kogI`X>PS^a>Y?l=6MkdIA)SKnpUdvDxpIGk-1|5kz^D=sic<$sb&a3fB66# zLHT1CK)I$j0QsYH0J)vn|CfLP05s$S04hQN0I)#-0I=l)7b>GLfKo!B7?B}hFOh%% zfKr>=|ChMuL#f&S{}-SuK>0xc0I&iDfKnkqFp*yV|Citb05t&xfKp%q0I;GfK>6W* zA)5fKNBLlCH25Q60Fgoy0QqAAe*=qG8vwaO2LSmXKmd_o0sxC)0s)KS0~xC0+5eY7 z0su7_;|P%;0Dw{edpEea=R>Kz|Nj@{0~abIF@REl0Dw|L006K-006K;pcs)M;4qPO z+y9rKD?s^E*#DQf=R>Kn|Nj@P+W(jH*Z-I00|2Vz0~aczGJsNm0Dw|Le*gfmK>z@- zLZBFtA)qmlX50UlpesQ6MA-kAxaUKuqW}LFxaR|@+}8h>A0PwO0(?M|fB^uI001tL z{d>Ks0pJLc{R04zBY0xC^9um^a{&ObzY_rYWTHp;g#bYLr56DC1pt83LjeFabD~H2 zp%*~;U;;q-AOHZ$AO`@ofAb4K`TcvtsK5hX)*m1P)c_bl`LquJ`Sb$2qQbM2@ zks;tPk$?bzQZn2Bm$VN+`QiWn7at%4)#U>eD&zwIDnbANut5L-f3U+efKo!B7?B}h zFOh%%fKn#g|Ch86K>5}G{}&%11J$4tK=}dK|CfLO05!A^K>5%A{}&%11J&dM6e>ah z0I)#-0I-8KfKo!B7?B~sGLe7)fKnCP|Ch86K>5V~{};3m0Qrs9|Cb;90C6Y;095^Z zy{Y5_04hTrfKvSfmrfc1AAcY;kyQWyu%eE_(r~@8=QX$|pk-z|eQu5mW7at%4)xg;Qm-GMs7eW94 zut5L-u;c>(DgzyWQbM2@ks*LHkzfFTQs>(L7yWy^sr>^0k@Hglsef|;0Pu4HDv|YD z0;zMtB#{&VfKv4X6RHFN02Gu401=-80dXJ!092p?K>4+z0Fhw-|Fqx(6e^Sk01*h; z|CgTv0dXP!|5QQ%0I)#-0I*h|7?CyLHIWnmfKp)sK>6SU6RLp#0JO2!|Cc}k05yLC z0Qr;#01p0e^Af{{K|-Q$eZqTSBS*d&8)}17Ftq+5eY8*Z-I00|2Vz0~9Jl z6@XIW0~V^`0~M-(0Dw}q+W(jC|Nj^Ld%UUr0|1fpQvs=S0RXU|0|Aj?qDT2606_Vo z9{~9R0D#gTOdq)V)&H0D0{|+Z1pqamH$b@`1ORbh0RU8UqJKyE-~$vYq8~u{VFEz; zK>z{D!36-d^HV{o{d>fyzyn{_A0PwOwG#mOl>q>dwE`}Yp@K*`fdBwBwG%-3@c;i8 zA0PwOpn?cFtN;I(pc+B>RR936rr7_NU;zL%jMe{_^Va{DqM{g)K>z@-gByTSAwV~g zzyN?!h1&m@wSN;p`QQKl7eN33umc-_Qlg?5ks-h~kzfFTQhnP0m+=4p7vuu~DnbAN zut5L-u)`UEQbM2@ks-h;kzfFTQWOAy5_8)Bm(KtH7qt@r`RvvImmeSl)lSv_7at%4 z)nM2E7at%4)%|^0k@HglsdE7Uu=E1}s$il=`IZ|2C;|%Fmns_p9|5A5N*e(t zDumhpm&?}wm*oQhs^kL|Du4ihQbGU#ut5L-u)_s_QbGVZks;tYkv`g&ni~NcBI5;s zQbGVZks-i3ku=)>m$ef>`2YZb65s#-7qt@r`K#5JUL64(96OQs8vyzA0|2U5006M} z0y&Wt0Dw~90~D%3*q06*0WuaN2!K+60Dw{w+W(ie1^^L&41iJx*Oyit0VxZx^%DU3 z^#cH^V4|0b904){dD)k~903~vqL2V06CE%fIN}f*_SFE0U8=Z3V>2V z06CE%06dY<+5eXS0DuzU|Nj?B)t6o!0UR7Xk@p(_`Sb$-s#gF2u=fHvkre=dQs4s= zs=wElmK^~y7NZP+Qh)$}Qn1rd0>;#r3LXI(D%aTmmmb#t zm*oQhs^kL`Du4ihQbGU#ut5L-up<h`E#O|-W~xue}fKyQbGVZks*LTkz3jS zmjD2O672u~7vuu~DnbANut5L-&_Vz?ks+V}ks}X)Qh)$}Qc>Cem*4{wsxQ|6mjD2O z65#*;7k~hOQbGU#ut5L-(4!83QbGVZks%-ekw)47mjD2O64(F#7r)g1m;HOZsr>^0 zk@HglsdE7U3$XM90jl8x0xDvnm!cm5GAdWq|CjXx04iVu05zZ+K>6p_|Chi705zc- zK=}XwfKs&+K>2|J0QqyGm;N6CEq}1X5P(ub06CE%paPK=+5eXS0Duzj|Nj>sAOqD- z*#DQ6)&G~}0|2Vz0~IQO0Dw|L0059d006MV5r9%c06CE%Km(Bo+5eXS0Duy;6F~Xk z|Nj@{0{|*R5`a>G0Dw|L0059d006K;06CE%pa7Bm*#DQH8$kKn)&G~Z6MsPY004jz z)Bpb$fB=9}LI41eK>z@-qY!{nLI63DApioA@!0>D004jz%K!ftfB=9}LI41eK>z@- zgAjmHLI63DAz%TK=-B_4004jz!TKs{R04z^HTw-a{&O5 z^aB8@V4_F)0RTYxp%(!80X+bK(sTj7cJEPm;HOasUH9U@%;k;k@HglsdE7Ukn{rrs$!x?`Gguk`a=Lf`J)#A`2zre z(jWo=FI1w#0FhrzAGrS3|Cita05zc}K)C<_fKq`10J(FbNBN=`K>1+;K>0xc0m>l) zm+l?`A%Bp70Dw|K006LK7l2Yi06CE%fCZ6@*#DOR0Duzg|Nj>R)c=UCV+;UPAOL{Uk<&f7Sn&U=jc| zpanBP`9KwyDkA|lA*R*;m-PbxDgXokHJ~#<`QZZ;s)7PQxk4}i5%1Ukmw=anBLNd7 zWz_$d00{s!pff=EcGUlu;0XXVfC50da{>TsKn4I*-vSeBiI=$}0V98h)&H0E0{|*O z{Qopu*Z-HJ)c=>{0|2T*0059d006MV9)MCp06CE%fC-U+0Dw{#*#DQHGeG$j0Duxe z0|2xy*Z-HiFF^Ua??kEK0~D(J)Bl&VQ$eY=-$JQi0RWHzphx-O0~IQwH$eFz06_Uc z00GJ&4FI+H=R>LW6F`6Y{d>o#zyn{_A0PwO;Whvf_5%T`od5q9A0PwOpff=E0R8{9 zy6;4(ye~lc=>Pv0A0PwOz@-Lmz-rLI63DAs`BofB=9}@7MpA*#G|* zA0PwOz@-!ykZBLI63DApi@JfB=9}<=6k0&i{Y^7at%4)#L*JDnbAN zkU;4>DK>4)){}9+UmmVeo7JuLw08~Hx0CA`p092vF0+F~M0QtNx0Qn*SfKtDc0J-=B0IENX9{8)& z|Cjs&04g9605zdcK>1-50JNY(0+FCY0QsO(0+F**0jZ!r0QockfD)jC0+G3E1gX0# z0QsOh0Qn9802Ch}1J$4cK>0ub0JK=s|CgXoK>51@0Dt+@0sy(J)Bl(J0{|*O6aY1! z1^{uPPeA$L5&%@Ck3hLY0D#h@LqPcg0iaUg0~o4p)&H0L0{|)j1^_jp4@ap10Dw}U zPeA#NH&AqW6e zU;`J6;{zEgLMH%`;R6S%p49)B_X7YbK>7bPbJqWty3_xcV<>=9z@d zLI63DAwVFJfB=9}F4zB;-~$_~7}Nikpie;gV1EDrv_;ncm%mQ{`Je(o`2Yw2v=P() zm%A%K`Lk0&skv)JslNgM`JZP1ai94`;{}&%11J&dM04hQN0FXfd0I=gL zfKoyLIgue?2$6sQfKu(&|Ch}F{}&%11Ao;f*8i7n)Bl&_CxBA3|Nj@WQ$eY@YecEL zD?s_f|Nj?40059d006M$0~{)YEr3!&06CE%U<{Ff0Dw~4*8i8U|Nj>sAOqDx0059d z006K9EPzr%06CE%011(R0Dw{#0Dw}_*8i8dA3*uMFF^UY|Nj>sAOqEb0syq&1AhUk z=GFg~vr|E-xaUKuy6;4(ye~lcumArSz@-<1Tz@-V={nJLI63DApj4NfB=9}7k>bN5~|k!m$)B5`MfVc`H}zs7o?9sxk3N{kU;k)(@2x$Xb|7qe3Vsh9XE0Stcx z(*KtqAOqF?d%das0|1fpQvs=S0RZq60Dw|pW=HwJ006Y$0~9LrQ$eZyd&8)}17Fr3 zAOqF469D;jB0G^l006K6zz>m1(f^mA1_1eB0|2zP7XbNI006Mo0uYho1b|Y&0Dw~B z0~V@5006Kdpb?R4*8i8F20-~i004imAwUw5_tF2CU}gvTzybiY;sX|{qXmFczyN?! zK>z@-A%GK+U)KMZ-~$vYwOc}|^HV{o{d>cxzyn{_LI41;K>z@-g9Lz5LI4nvApjAP zU;uzp6aat{Qr7>MwG%-3(f|J!;sX|{K>z@-Lj{0RA;1!mU;uzp6aat{N!EY=m$h3$ zsmTBT7yWy_sbB#B@cjb-kpO5%`SVi&sZ;;}uyEA>m-7PvD&PPBG(i9Wu%iZmQi1{y zks&}7k$?bzQaINCmx2Nik>dt{Qh)$}Qb7O!ups~ykuui*m*4{vD)Uo8sr`GxsK5hX z)*m1P)nWkv@ZkdhszPK(`GS7{0JJ~=0I&eS6_G#yAJzZ>fD*t1U)CQW1J(U|zNtU} z0I&ey6_Nb|0Fm?$0QqzQ0Pyh8|CfMn2l=1`0JO6U0Qpk@0I+jH7LmgSfKtE!fKuWE z6RJS~0I(t87m*Rx|CfMnNBLm^0I)&=fKnkq6p?t-|CjUw04jh105yN&0~4x2006Lq z2Y^x`KpBz10Dw{i*8i8Y3qbkc0~9Ls4?y|-d%~!|17FtS0~4x2006Kf2Y^x`fEbZr z0Dw{y0Duzs)&G~X3qbkw|Nj?4006K-006LK27ppRKo*f9Ko^l<0Dw{y0Duzi)&H04 z|Nj^Ld%me)0RZs*0|0-K0BlG3^HTw-Q~&_57S#Wj^8)}X-~a$LK>z@-g9w09fz@-A;21u*wz1+-~$sX^HV{o{d>Zwzyn{_ zA0PwOWB~xMgp5M@L;wJ^fB+xX{d>Qu!wG;=fB=9}LI41;{R4jhkwQQgkwE|eupxjO zk;c{kmjD2O68(F@sK5hX)?onvupyK}`9S~xv>*T<*8O|Gsly6@Qh)$}QbGU#u>Au7 zkwQQgkwE|eupxjOk+#+UmjD2O68(F@sK5hX)?@(yu!NjK`9uH!w15B~*8O|Gsly9^ zQh)$}QbGU#u>F4n0FgpK7Lh>!0I(r|8Z70FgpK7Lh>!0I(r|8u{17Fr*0RXTev_kno006Wg03X)u{17Fr+0RXTRgK>z@-A%Gi^X4U_f004jz{d>Wvzyn{_V*voLW5h!FLjVA@ zU;rQ1{d>Qu!w`T{fB=9}LI41;{R04zLO>RgK>z@-A%Gi^R@MKP004jz{d>Wvzyn{_ zVF3WJAz@>AOIiM{d>Qu!x4Z|fB=9}LI8gNu>Au7kwQQgkwE|eupxjOkw(@3 zmjD2O68(F@sK5hX)?onvup!h!`9S~xv>*T<*8O|GslyV0Qh)$}QbGU#ko^MykwQQg zkwE|ekRgB@kv7%;mjD2O68(F@sK5hX)?)zxkYn6J`9lBzv|s=q*8O|GslyY1Qh)$} zQbK!0FWVo8<8f}|Cay&fD-+C!KlCkU)E#+0FZ>_Lit1h0JMMr zAJ+YQzp29%fKq?}fKoyL0FeCy0FgpK7Lh>!0FWVo8<7^(|Cay&fD-+C!KlCkU)Es( z0FWWu{17Fr*0RWI8^g{VT006Wg03X)u{17Fr*0RWI8{6hIb006Wg03X)RgK>z@dA%Gi^=G6a}004jz{d>Wvzyn{_V*vn=V+2F_LjVA@U;rQ1 z{d>Qu!x(^4fB=9}LI41e{R04zLO>RgK>z@dA%Gi^*3|!(004jz{d>Wvzyn{_WB~w> zgbYLZL;wJ^fB+xX{d>Qu!x?~5fB=7hQbGU#ko^MykwQQgkwE|ekRgB@k;c^jmjD2O z68(F@sK5hX)@A_!kj4~4`9=T$w7>u#*8O|GspA@eQh)$}QbGU#ko^MykwQQgkwE|e zkRgB@k+#(TmjD2O68(F@sK5hX*8O|Fsr>^0k@OD$`Je#+w6hBU`5^#+QuBWU0IFNj z|CjUw0IDDX0JO6UK>6SU6sq(OK>7W9!l=LlU)CQW1Jx7&fYSQ^{}6+e{}&%11JwWl0JQyk zzp4EL0Fi&u|Cb*C0CC_0pIU$Yd%>u{17Fqv0Duy}17FrZ008ks7(jXi7(n`<1OQb1 zd%UUr0|1fpQvs<|1OQR>TLP)|0|2TZKmd{R0|BZ+0059d005Bp8vywfP&A7+z$1}Y zKq8T10zmoT0~M;_0|Kgo006W$(EpcU0RXgM1_1eULI9C}0u<}v{{Mee004ke_ZvX@ z^HV{o^;<%z{d>fyzyn{_qX~dg;sY0|;R6<`zyN?!V$}ba^8f!A004jzzyn{_{d>Nt z9{>RHBp5*Y{R04z^9um^AOQeW9MJ!l^8)~?fB^us-~$t?^9w-v{d>Zwzyn{_A0PwO z699nH-~$t?^9w-v{d@0~4zA z3qbk(d%~!|17Fr3AOqFF0RXg@a54cDD45Uxm*E2dD&PYXs)5k|m-7oi`2ZXM68(F^ zsK5hX)&Ky2QosXW)|NB@7=Kd%0Pyq!0IEO$0FYy%NBICSQh8(mK>4H}0{H|0fYS97 z0QrFy0J*i}0Fj_`0C@obfKs6A0FesN|Cc}j0FdKIDP091e$0J-azoihO!O2b!xQbHg)ks$y(k?7O^mjD8wQh*i!x!?c)7vuu~ zDnbANkU;w@`QlRSq zk+jeMmp}jjkmG-VQsM&@s$l|vQUHKHk;2pem!JRuHQ)mks=w0zmmeGea$y1hRD>Ep zxk3N{kU;s(a7>mjV925+48oYs~-vR29geQXc>nYwQ64RDc=)x$BpmGXWQW z<6nSMLLfSkA>cldlGFc}00N*=fEoa~-~ay?z@-&bK!he9${d)kZ{R04z^HTw-Qvm?* z^aB8@KmY)cW1>g-05DQ{WB@?w@`QlRSq zkzUXLmp}jju;YJ#QsM&@s$l|vQUIU=k!;idm!JRuHQ)mks%g^ymmeGea$y1hRD=#d zxk3N{kU;=`Jo>|`CtM-`5*uQ$^ZqIt~3D_ ze?kF(QUL&f(%=IXswL0=mmdHCaV-J>R3D%ZaclqpRDccux$Xb|7vuu~DnbANkbnSy zQb7O!umyd9Qp00_QbHg)ks*Ktkv`M^mjHc$Qh*Ksx#9o+7vuu~DnbANkbnSyQb7O! zuq6P1QbS{aQbHg)ks$yBkuuZ&mmmOue^P)B0J+uw{}-yx|Cb*i1J(Zn5z)ebfYJc~ zfKvT?0IB^00Fm=k0jW~~0Pyq!0IKy{0;yx7NBKYi0FV{|pi;Ht0Fh(>K>4H}0{H|0 zfYO#80J#7#QhA_r0C}M60FemK|Cc}j0I=hKfKuWE6{>*(fKmX!29Xxi|CgWue*iV$ z0~M+h(*Ktq8~}1*0svHm9zeN50059d006K9Xn;~eAUcsD;0cj{0Dw{j)Bl%qqDT3m zA42(H0zmm7007D$1pu}4Q$eZqTSBS*dqAoG0}+wH17Fqw0D#g$0f18A0~M;z&i|JI z|GyF+00C>w002}K$e>am02OQOe*pkgfF1z3>;L~3z@-z@-RHBp5*Y00970002PwzyJWWmuxiw5K=+W z{}=sxzo}yefKq?}fKoyL0FeCy0Fgo0Qpk^0PyuA0QvO;04idlm(Dc-HX0|=|Cc}n05zZ^K>49V0g<2| z0QsQf0Fgb=mmW3&MPIPs0~D%bKn;380Dw|L0059d006K;AUcsDzzdOo(*KvV4?y`K z0Dw|+qL;!p0Wp8D!v}y;LLfSkAs`EpY|{UiAOL_;?*IQ6A0PwOq|yJE>(2j|z@-LLfSkA%F~#U()}Vv=2b}AOL_;;Q#*@A0PwOmeK#0-Om4) zz@-;|qXNLLfSkA;1lhP||<@mmmOuQnU|1`O*LX7qkxm z`Nz%wm;HOTsr>^0k@HglsZ#*}@bm)!s$-%@`M-Y%sbBy=`Jhh#`2YZb(!WyzIln&u z`M;9@xwH=ew6+%j`St?=s(}Omw6|jeskZ|Hk@y1wDkA^@(1fA{kz)W7k%9t%(xC!? z(%=IZs;D&0|Cb*G05L!W093UT0QvO;6so!>0QrFefYRUt7pkz)|Chf;0r>g@0IJ{v z6{-x)|CgYbAvggPf1*eEqEA5iVFEz;K>z{DffE3=^HV{o{d>x&zyn{_fB=9}LI41e zK>z@-gARaFLLfSkA%G8&5YqpbwiiJ8AOL_;@BjZ7NYVe7kIw&>z@-LLfSkAs`Tu1JeJOxF10IAOL_;w%z@- zV-A2)LLfSkA;1oiy3zlaAOL_;n*aY7A0PwOyK_aUlmGu0xg$XNLI41eK>z@-DbD|w<^uq# z54>{}%xO zfKpb?|Cay&fKs(vLaDc7L#ev&M5$x{{}&tpfKtr=m$o_q5+3%<|Cb*i1J(U|xT*aE z0Fm=k0jX300Pynz0IGqam*zSFGXbKP8an|Yf3P_tI+6E66p;jw1sgipWcc9&7^(mS zfKva@|CjXx04kse05xU?fVt%Z7^+~RK>1{%1d-zd7pejXfYKodfYJbXX86R+|Cjay z04g5@05RYP090WHfVp6zK=~GUVff$!7OJGo|Ciwd7OLU{0IJ{v8mf%W|CfLO05xC= z7(lrn1ORc&0RU9C-$JRl=R>KwCqVg>qL5V~{};C30;#y?1F50|2Vz0~4wu008ho005AK9)MCpAUcsD zz!#B#0Dw~2(Epe1|Nj>sAOqDB(Epbo2mo=w{Qp!B(EpcK&HtC>0|2Vz0~4wu008ho z0059eAUcsD02h&C9e`4R0Dw}-(Epd<0~4w%&HsOwVEg|x-~ay?gB*ZTfB=9}A^-sJ zK>z@dLLfSkAz&7fzR>@dAOL_;{d>Zwzyn{_{d=dWf+~R00RVtf{R04z^HTw-Qvm?b z^aB8@W1>g-WB@?3FLQbHg)ks+WNk&n>-mmmOu zQVIY76o3GLQX&8V@Ie3okmLgfDnlfIQbG|RI*}oO7?Fk0|Cb;DfKqd!m!>`eCVwIT z0PsNo0FZ+tfKoyrI*}p34v}=w|Cb;DfKu=O{}&%11J$h0|CjU3|Ci+h0IK8z6e@rK zfKnm=0PsNo0FVPFfKoyrI*}oO6p?1o|Cb;DfKuWA{}S35D**ZW0|2UE3jnmWTLP)JLjjRh005BS0~V^G0vnOn&i|KS1^~3U z=L4x3008h<0059VgFKNKBO8~eKLH&ovCIFL9|Qm~zybhN-~$t?bz{Dfdl}x^bbInuRsAAe>%_q zmx0Xxm*fKgs$(&LQh)$}QX&8V@F4&I&_W7W9!>GUmU)CQW1J$(G0jX300ML}WNBQ6b6snNU|CjXx04jh3 z05oC&0I)#dPPqXD08u{}199mE08}3U0CC_KpBg~m4!Ql#|CdC}|Ci+he*miH0~IRd z0~D$U8GuqE008ho00599IDk@M0Dw|LAUcsDKpT;@&;OSo0Dw}o*FmY?|Nj>sAOqDP z0Dw}!17FtQ0~4z4&i|M60{|*u0{}E(0RXTefIE>O1VFh01OQQA7z1(X0svGW0042| z7oQp+1OU0+&i|Jr%>S3=TLS>92N{4;A^-sJK>z@d<^vQes)5RvMneH2e|XCOmtYD&xxWGcYibGrRCc03 z`5*{@(hAQ1m$e1}5hDnI(gFa0QsDyws=#VA_>>|5kzoG+wT90Bm(9!nm*fKgs>4Kp zQh)$}QX&8V@Ie3okU}6jks$yfkwMS@mmmOuQnOP*skK`|sS5xA6d(Wq@Zcxzyn{_A0PwOUe5oQrOW@9z@d!$W{lLLfSkAs`@;@c;i8 zA0PwO!$E*jfB=9}A^-sJK>z@dLLfSkA;1oi63_paAOL_;{d>cxzyn{_<3WH@fE56M zQX&8V@Ie3okU}6jks*K_kqFP1szd=UQaH~4mw(Ium*oQhs^kL{Dk1;?@Ie3okb^>i zQbHg)ks%-+k$?bzQu5CKmxKTR7yWyxsiOjb()|Mfk@HglsZ#*}(DhpasT80Dk@W)r zs$im*Dn$V%fBwn;mx2O-(nJ6NkV60fkc7Y?kz?Q?k)Z;B(%=IWs!q!PmmdTGF+c?X zRI~#Dk%A(C(g6T~Qs4s=s=vzrmjeI*@Z|#+s>G52kz;^7kzyi%(t#p?(%=IWsz1vA zmmdTGG2j6JRAM54QsDy>s^9|^s%6dpmvf>=`J%5tm(N83A%F7C|Cc$-|Ci(g0IDNN zfKq?}fKnm=0PsNo0FXi;I*}pZB9X1m|Cb;DfKu!K{}=QF04n9p|CcPw|Ci+h0IK8z z6e@rKfKnm=0PsNo0FXmSfKoyrI*}m&B9WiY|Cb;DfKuK6{}&j_|Cb*i1J(U|sHyz} z0FnDE0Qp-10DsW;8vyzD0|2UFqeuAw06;k+06_VozX16H0D#iq3jj2=TLP&T0D#iC z=L4w~0Dw|i005A>Cjj}gQvs=30059VBP5Zyg8`9RLnM(j000zpq6Cqn0)Wy20Dw~A z0|Tny0~D&A$^Vx@0f15fD4B;|>^aB7Y9|Qm~00;n7f+B#@0RVtf-~$+{TFU>I-~$yZjLQF)<^vh3=K}yL zMF0ShLjVAfr2rd|B`J%r-`C$V<`9S~y%Hah7wf7r9`THwC`G5U;*r>n*U)CQW1J$!rL8-M{LaDgt zL#etaK=}XwfKu`Q{}*b_|Ch1K|Ci+h0IK8z6e=PB0PsNo0FcANfKoyrI*}n@B$0ps zfKnvR|CiwZ{}*1(|Cgo8|Ciz@dA;2e*_|5;9AOL_;{d>Wv zzyn{_1pogRA0PwO00aQE0-%>pNdZ0v&B_0lqN10QNdY1T!^!`bzoM7ANdaLN(DVZU zs$il=`2hex`Jo>I`2hfc($tqRN&ysqFj9HKe}K{f0Dw}Ua{zg@;{cJM>j06R$^Vx? z005BVe}Gcr0~M+P0)SEgpeT{6&HtBx0su6G20*zY008ho0058!Re(}LAUcsDz$=k} z0Dw}S&HtCRTSBRGqDT3mA42(H0zmm7007D$2LQG6Q$eZydqAoG0}+wH17Fr6A0PwO z-~$z^mCXN_9~=O3K>hzz;sX{c0RVu~-~$z^W677^N&y-r=>Pv0A0PwOA^-sJK>z@d zV^V-pLLfSkA;1oiU;uzpe9ixt-2f`NBN;2Liu0S1k8~}1a{r^-#0f15g0D#iq0~MsAOqDR007V-008iVT7XhQAUcsDzz&gM0Dw{w&HtC*|Nj@{0{|)_007W{ z0Dw{<008g>eSlJ9T!2zSAUcsDfG&{;&HtCR6F~U@eSlJc3IMs%|Nj>sAOqFp0{|)_ z007W{0Dw{<008hM0Dw~CTYyqRAUcsD;4FWU`po~AwG%-3AOL_;fC>P)#Q*;nwG#mO zaLE6cA0PwOKmY)=8TtPgzyn{_A0PwOKmY)=Yx(~dzyn{_A0PwOKmY)=z4`wazyn{_ zA0PwO{d>Ks{R04z^bY{}Gys57U<3fPvkL(EApnBXwG#mO^8)~?AOL{U49NeN^#fc0 zssIE4v;hEuQUJ>Tm!Jm#HRA&nswDsb@FV~L@TFifk%Rynks<&9@Is(6kpUop(sPmk zkrV)cQs4s;W-@IoLuks$yvk+95{+E4)=V&u#Jmn+Htm*oQh zs^kL`Dk1;?&>;W-@FR7AQbHg)ks-h`k$?bzQl8BJm$M5%`Lz>3`P~2i7vuu~Dk1;? z&>;W-@IoLuks+Wlk)v{eQh)$}Qj^U8m*4{xsx$zAQhdmlT2TQee?S5Nw6hBU`SSw+ zs?*E=m*4{vs>#a#m*4{vs&~i#m*4{vsy)j8m*4{vsxQm`m*4{vs#VPYm*4{vs;W-@IoLuks-hik#5ZYmmmOuQvG|vsK5hX)<6OP zwEcU&sRA^BQXznVm+w&l4S!S2|CjUw04iVs_&2o5|CjRv04hKN05#wP6e{ymL8<+F z!l=s2{}&%11J%F-U)CQW1J&dM6RIKr0MPSOL8&1C0FeEA!l*-lfKoyrI*}p32a$jP zfKpA&{}+hM|Ci6m|Ci+h0IK8z6Dk9NfKq?}fKnm=0MH=-0PsQ}IyaFa;4_gz%>S3* z0~9LrQ$eZyd%~!$%l{W2AOqF?d$Xzi0|1fq*8! z0s#5o0~o5t$N!h$20*!?0zmoT10X6tKp^W31prjw0~4zI$N!hJ3qbh*0Dw}IqL*4z z0V+CS$N!h%0{|-E0~4xB$p4og8~}2F1OQYa005BS0~4wtpa+pe%Kw-40|2TZ{QtC} zBLR`%0~4yx$(O!U0U|!8$N!h$0~4y$$N!fg0Dw}n3qbkV|Nj>t%m0^W$p4q*0|2T6 zgn&|j0Dw{=007Vd008hpAUcsDAUKiR%aT!HfAEH& zK=~s80Pv*%8x0FlOcXZR$?|Cb*G05O0708{_~fKvT?!KlCkU)JKv z|CcGq|Ci(g0IGwEfKq?}fKnm=0MH=-0PsQ}I*}n@HIbdm|Cb;DfKvT?!KlCkU)CQW z1J(U|p{e}?0Fm=k0jW~~0I>WoJOKIp0|2UEqDT3(LjsZYTLP)|0|Kh}=L4zs0|P4g z0|BanDuB`f06_Vn_W=0;0D#iJM*#WY0~D%t$Cq|h0VWcs%Kw+Z1pqX2qDT4Q0~jiz z_m{3!0U&?hsK5hX)*m1P)gS;W-@PjFUQbHg)ks%;9k$?bzQaAvBQd!IYm*4;Y7at%4 z)gk}@&>;W-@S`b!QbHg)ks-h~k$?bzQaAvBQcp3<|CiPO{}&%11J$=10QsXM0+Hea z6{_I_8LHp|9I7A4|Cjdz04iV(05u>7K)Iqvmwr|O7Xc%emR12V2=E~wI*|Z?6qnCd z0X2VvDuB`g0Dw~A0|Tny0~D$~#{ZXvqd@uO0~)F$gFKPq0~@M=DuB}90~D&IXn6RI z#{ZWe1OPFh0{~P3B!E)j0~D$Z%m0@k{r@yd%Kw)i2mo=w{Qp!(%Kw*+$N!h*0|2U` zKS2590~D$u008jAE`U-Y02h&f0Dw{g%l}iC+yDO;A^-r;ApijI!zqALLLfSkAz(L= zfB=9}H~@fB`O5#7fB*j%A0PwOpmzYdG0Oj!c*p;jmbA0PwOfFqY8 zSOG3N!z+MNLLfSkAs{%BfB=9}%F6$jw;MqDPyhcHA0PwO-~$}08OHyYfCE6ej{pA` zy6*(3w;KTYJjR!bSOGUeOa*c11OQaG8vyxZDu7Zq1VH)m$^V!00{|)@1^_j-7XbMJ zG=Neeh=5Yy0~M;4%Kw-40{|+34EQ!+0RWHyfIF81Spgwh)ye;t9LN8c`X+wAVqYw%ua?1agw;MqDmvCAE7#?GvfKq?}fKoF6fKnm=0MH=-0PsQ}I*}nDHj!h>m;PA+ z8dig!fKq?}fKoF6fKnm=0MH=-0PsQ}I*}p3Hj!1z|Citc6DqV1K>70%K>7W9L8<>k z0g=E1U)H0afKq?}fKoF6fKnm=0MH=-0PsQ}I*}m&Igv-omu^}CA|n#W|Cjdz04jh6 zK)L?{0Bhg_7Ajx`092^S|Ce9^0FVHHJD0Os0VM`#$^VzH#+Tk&0Uj1(qkvLEAUcs? z0Dw{<008hIpgfTt%9j#b0X|Mf$p4r00{|+(001@M0~RWJ$^VzM*FmYa-$JQ}|Nj?4 z$^Vy$#{ZY*0|2V!0~9JE007V-008jh0~M+xq<~UFAUcsDU_Oz60Dw~a$(Np60Ua-} z^;-g|_5%Q_V4_F)^#cK_L11(_Vk&^rp#uRq0RTYxq0a#M0RVu~pa%hw*u|IXTLB*d zqL&I>0Ww1;$N!ff1ORbh|Nm6C8vyyY0|AlZ0~e};Du7bq0~M;^0~V@6$^V!10{|)k zG=Ngz0~e~o$^Vz20su5%0RWHyfIF9eTmdf-I*}oOKam4KfKq?}fKs!`m%dy9CVwTx z|CjxHzp4EL0Fl4~0JPx)0IEU&0FVIS1d&3(0Fm*<|Ci(g04e|gfKr4205$!4!KlCk zU)CQW1Jz@ufKq?}fKnm=0MJ1I0FXi;I*}pZ0Fji*|Cb;DfKvT?!KlCkU)JNLfKq?} zfKnm=0MH=-0PsQ}I*}p34v~t<{~MPe0Dw~cd%>u{17Fttd%vmu0|1dg0sypM#g~3v z0Ty0EOMp^<0Dw{=007WI0059eAUcsDpaGG0$^Vxi0Dw~cd%>u{17Fr3AOqE7N`O*; z0Dw{=007V-008hpAUcsDzz&gY$^Vxi0Dw~cd%>u{17FsF006WA-~^Gj$CvV50V03# z$p4q%0~4zA3qbk(d%~y#B!SWb0Dw~A0|Tmd#Qzr`AOqFF17Fr3AOqDv006WZ#{U;0 z007V-008jAeSlIzAUcsDzz&gs0Dw|T$^RE0AOqFF0syrAd%vjxkN}bW0|1c%1VFg} z-~^G*$p4q*0{|)j0Dw|t001@pd%+*5zyn{_gQ$Q~fB=9}A^-reApijILLfSkAz%WL zHp!Q=UI8is6UCR_UI89PLluBhfB=9}A^-reApijILLfSkA;1HX9m)TfAOL_;vkO4^ z{d>Zwzyn{_Lll5gfB=9}A^-reApijILLfSkAs_>h63LfBUjZW-B!SYk69D=10|2T5 z0Dw|88~_sG0|Tlf#Fu_w0TzE9#Q&Eg0058z008h~U=NW(01}Y_Ab`@~0~D&J#{ZY# z0~4x*#s8OJ0RT1N0~4w>$N!hW1OPSQ0~4y!#s8OJ0{}I%Q$eY<6F~X>d&8)}17Fr6 z006Kd008hpAUcsDzyy(_dVo@Z0Dw~B$p4oh0Dw}p6F~X1Q$eZyd&7UIzyn{_qj`W* zfB=9}A^-reApii-LLfSkA;1oi*2w>tAOL_;{d>cxzyn{_-~$t?nZ^H?A^-reApijI zgM5HeLLfSkApi!E=>Pv0-~$t?k;VU)A^-reApijIgL{BdLLfSkAz%fO;Q#*@{d>Hr zimS6!c2JOWEmvf?*%3uL4I`E^ZfKoyrI*}m&2$7-4 z|Cb;DfKu=O{}+G&fKnm=0I(qd0PusUfKoyrI*}nj2a%Y_|Cb;DfKuj{!D;~$6+@|j zQbHg)ks+W4k&ej!mmmOuQrwp@VF42+W2t~rLLfSkA;1oigvkGwAOL_;(*OS#{lov4 zfB*or03ZsH&c>ILVF4ozAPSLTate_G2tc|1$Ctuk0U|{suYgj30Dw{=006Kd008hp zAUcsDzzC6J$p4oh0Dw}n3qbk(d%~!|17Fqyt$4Jv0Qm#}fYSFH0Qt8A z0g>PX7pgYI|CjRy0JLEWK)Is=fYM+ZK)C_{fKs?00Qq<#3X!#20;zSv4v{th0Pxo0 z36TK{0J-4<1F8TM0J-1;6{=Rl|Citc6{;fu0Dti00~V@dAPSL!0)Wz?0)Wz|!~d7{ z0{|)?1OPDr1priJk^qq<007V<007YD0~V^K02`5GpfZtSB7o9?B7oB10~e~A!~d7_ z0{|)?1OPE$1priHB7joi0~e~`0~o5m#{ZW91^_j+TSBS0A3*t(qDT3nuR!^M0zmme z0GB#r0V026uz*rRAUcsDAU2T*$p4oh0Dw~O|Nj>sAOqDv$N!gy#Q&G&0|2Tb006Kd z008jh0~IPlAUcsDKnjtgw18580Dw~Z$N!fg0Dw}sA3*uFTSBSd|Nj>sAOqDf$N!gi z#Q&G&0|2Tb006Kd008jh0~0DEwSZDWAUcsDKno+0@c;i8Nt{R04zzytua10;dcvkL(E;R6Gz z^8)~?0sw$g^1}a@BLD!90ssK;V_*-FLI4tx0U&_VV3Gild&U2k-~$t?S;YUBKmhBe{T5LLfSkA%G8&TF3vFAOL_;+yDO; z)58CkfCB)u0gRV6W&tjLnZ^H?U;;onlm-A1wL%4vVU9%kp8^1DEC2sgwAVqYfdBxs z-~$1wE5rYne~twC^9w-v{d>cxzyn{_zyn{_A0PwOzyScX0RTWbAprn10gOcX0sw$g zKYRcwLIeO2AwUI@0{}ode*gk&Z2$jM004kezyn{_A0PwO{d>PxsUy9BQh)$}QX&8V zu>Au7kwPFkks$y8&>Au7kwPFkks$y8&>_GMkrc<5vS$G!0Va{_^Tz*| zGXQ{6fR04@U;_ZO+rs~spN<6iv=2b}3jhEVA0PwOA^-reApijIH*HQ)miDzpzk`SS}v`Tcvs zsK5hX)?>whQh)$}QZoR6QX&8Vups~d&_WQbxo7m*fKgs$<50Qh)$}QX&8VumJ!7&_Wml|mS8h>lV z|Ca;EfKnm=0I(qd0PsQ}I*}oO5|MxafKoF6fKsc*|Ch86K>5i3{}57?{}=sxys7;I0Fm4H}0Qm#}fS0pr0V*ksMEO7m0JH-tfKoyLK)InC0QsS#0Fi(I zqno6o06C&B0J*%v|Ce*3mj-G9Ef(;j%79WrAUcsD02PsN#{ZWf0Dw~OmoZ@h6BXmh zfKoyrI*}p34v}WY|Cb;DfKug`!D;~$6$8qEQbHg)ks;s{kzB_AmmmOuQrnloY5@}t zBg%kMLLfSkA%GN-QpT6MY5^J|L(70tLLfSkAs`l!NXGw{AOL_;$N&Eq!NLERKmY)= zlf;+yY5^t!#l@E%YXKgAW6pq5fB=9}A^-reApijILLfSkA>bB~E5`qqAOL_;vkO4^ z{d>Zwzyn{_!_9zFfB=9}A^-reApijILLfSkAs_>hAjbcfAOL_;{d>Zwzyn{_{d=mZ zBLD!w^;-g|0USWN_5%Q__hSQ>yK4aoe*zeh0t`U8 z0RVu~p#p%?fT9GEdcpsf9|Qm~ z00;n7xB~%^f+B#@0RVtf-~$+{>%sq*<^vU~B>(`>BLD!<=K~iir2rd|#F7A!V?ZU5 zVj_Ujfg*s?-~$+{XuPX7phjl|Chf@0J#DxfYJc~fKuQC7^+dh z|Ca+Kfzkp1fKuTD1FGNy7pk;R0J*j+0Qq{r|Ca+QfYRXu1F8Z5fKuQC7^-!^|Citc z7pmjI|Cay;05#$R6{_I_7^>g{7pjK9|Cb;L05u{2fKnQvBLR`%0~o5Jg9bT-!T*=^ z0{|-E0~o3a!k6xD0U9O))PPb#AUcsDU>}iy0Dw|D#s8PJ6F~Vi0Dw|+qDT4Q0~9Kv zXO}K-0UUoTk-!6A*5m^eDk1;?kRbp7(4*6UQbHg)ks-hxk$?bzQZxX7QY6Lym+b%l z7a{-vkRbp7&_W`L|<3sp0?s7at%4)gk}@ zkRbp7(8JS!QbHg)ks+WSk$?bzQZxX7QV+%dm$iQrK>61H{}&4As0Qms`fYLJv0JOIo0Qq;~1d+8{0;vQjpi-j(fYLw#fKucV0J-4<1FB#`0l5MI zfKq>do&foEf(en}0~M+^007Vm!2g%lA|R3U!T*=#0{|)`007Y70~M;|0~V@dKp>HV z0)Wz?0)Wz3!2g%^0{|)?1OPGM1prjF0|AkOB7o8X0Dw~90~M;s!2g$Ik^qq<006Kf z006M&0~V^K02`5GpfZtSB7o9?B7oB10~LR&M!^4<^8)}X9|Qm~palR_Vj_T2;R6+_ z-~$(`Ys3GSz{Dfd&Az^HV{o z{d?A^zyn{_A0PwOfB=9}A^-r8Apiid7W9!l=LlU)CQW1JxtkfKq>e0Dw{= z00597007WJAUcsDzyy(M#Q&GG3qbiG0Dw~cd%~!|17Fr3AOqE-+kjGl0Dw{=00597 z006K;AUcsDzz&gK#Q&Ed0Dw~cd%~!|17FtQ0~4w(!~d7#+<;Pm0Dw{=00597007WJ zAUcsDUZwzyn{_A0PwOLjeGgV*o(;zQdOSa{;z6VQg<_ zMr?I+XaE2J00000C389@WI7;oWpp5PXmVv?WM6J!ZDlB7Y;R{qY;|*Jm(hCx7ng5& z0SF*eXmVv?WGG>5Z)Zkqb#rJbAaQkRbSxlnX=FVmWG)J~w{rnu20}qoLq$$RUsFs^ zM_*7R8L=1R76izQ~&?~001!n00000002Q#Lq$$RUq?(&LP1PlMOH;lR9{6- zK|)MLmr8m88z4bbLq$$RUq?(&LP1PlMOH;lR9{6-K|)MLC?#YmE(({OdI23RWp-t5 zbRcYHc4cfJC?#YmE(!nu002Q#Lq$$RUq?(&LP1PlOixE&Ohr~jOqWl30Tu^AQ$s~g zL|>O>dI39^pLzlDxA=SkSOK??djW6(7II}_bY&oPVRLIBMr?I+XhCprOmAnGA$|cG zm!Ek72)BTK0p|f2OmAl(Xkl|8Vr5}&AZc!CbY)~N3IG5Aw_kt(F9RbWVRUqIEFf@c zWIZKyEFg4waAiFuWGoz{j)MWh4wsOU0e~zeb2=qtIv`1I zX>=fCZ*FEFVQyp~b7gF1AZ%}EAaG%HXdrWSV`F7=b1n+EM3VtS4jE2wbRc14V`X!5 zAa`$aYjbd6V`U(4VRUG>sG$MZ0wZ^FX>?^EXm4|LAZ%}EEFf}abUh_?EFf=YW^-k9 zJtcK63YYSv0WOz_#sLnO9;pE%5o2~BY;R{EVs&O9VtF8FWMh}X#sLmu{v3IhTjV0T`EXy8#H7pQHf|mlUr7BbTS7 z0azhsY;|*JAZTxMbRcYRXDlFcWpq6ybu1unW@d9`bUh_?moU!(6qgLA0V|i+rU5XQ z*QNm#6=rO8b7&xAVQg$-VPk6`W?^Y;Wn`BD!T}SPJf{IGm+z+mNgqK|Lq$$RUrbL& zUqnS#Nkc_nMod*xNMBS*O+`;tRF_YB0TzEjQ$s~gL|;r#M_)umR!KueUq(z-Q%GM_ zNlishRa7V?WGOBR00000001R(IwfQ}AZ2cLAVE_@MNULtOixE&L`7CfLq%UkOjT1z zUsOp=MNd^!C?#YmAZc!PVQgt+E(!nu00000001R(IwfQ}AaiAOAY^4`VRdYDAY>b5 zc4=c}AZ%}EAZBcJb7&xRX>Db1b#y2tWGR=IrU5{gAEf~Ymu{y4A(ywO0XRQIY;R{E zX>MtBX<=+>dSxJHX>MtAXk}y|W^ZyJaB^>BWpi^b3IG5AC389@WI7;3Y;R{EX>MtB zX<=+>dS#a%r~w(5Z>9klm(QpHFPEOI0T7p%uK^gBPNe~1m-navl>v2^ji~{1mpG^a z9G6_E0YgSaY;R{EW@&C=Y-xIBAZBT9X>(|0WG)H-C389@WI7;BZ)ZVgWo~p=a%psB zP;zf$OmAl(a%FTqC3P$yZ)Rq5Wpq6yb(fW>0VB4m0S^V2H?IL%7IbNCWp8zKEFg4g zZDntDbUh_xE(!pbey;&Cm(Z^PP?w0O0TelGX<~9=a(N(TVQFk-WG)H-0000bb2=qt zIv{LsVPqh3b#8QNZDk;AX<~9=a(N(gbz@^?Wn?Z2m#wb>BbSJ$0Th31X<~9=a(N(T zb#7yHX>V>IW?^Y;Wn?Z20000bb2=qtIv{LsVPqh3b#8QNZDk;AX<~9=a(N(Tb#7yH zX>V>Ib9G~5Wo2Y83IG5lb2=qtIv`_gZ*yfJa&>NWX>Da7Y-wV0VRCsOXkl(-Y-J#3 zVQFk-WG)H-0000bb2=&|WI7;YY;SXAAaZqXbZKp6AZ%%3a$$0LAZTH3WNc+1b9G~5 zWo2Y83YQ(Q0d{|EX<~9=a(N(TVQFk-WG)H-00000C389@WI7;hZ((F0WO8YCWpW^F zX<~9=a(N(gbz@^?Wn?Z200000C389@WI7;hZ((F0WO8YCWpW^FX<~9=a(N(Tb#7yH zX>V>IW?^Y;Wn?Z200000C389@WI7;hZ((F0WO8YCWpYO#Y-wV0VRCsOW_503bZKvH zAaiwNV`XJzE(!nu03~xeC1g4vV{C78Wguj7X?A6DAZ%%3a$$0LAZTH3WNc+1W?^Y; zWn`D}d;t_6WO8YCWpW^FX<~9=a(N(VVQyq>Wgv5PV`F7yWG)J~#IOM{0tjYdX>4U= zE(*7C9s*nix3IVY2LUEkb7df8WoBV@Y;+)GWp-&}Wgu-~cpzqJY-J#GX?kTnC3P+e z0GH*t0pbc(b7df8WoBV@Y?pDw0Tl^zX?kTnC3P+em#4%5*_W`n0S6;hb7df8WoBV@ zY;+)PVR#^6aBv`IX>4U6Zgp)vC1frN005VI#R1g{RdZz^WMyVyb!?YW#sL!rJtcK6 z3YWow0T#Cpy8%E53RQDuAY^4`VRdYmQNsZk20bNYE(!pbf5QRr3RQDuAY^4`VRdYm zal`=|20bNYE(!pbdd2}>7GZN^Wo~3IY;R`(0000003~xJb0u?^?#2PEx1z@Z?FP5s z&H>8<1T7_GUzeuR0k(e>3IG5>0RR9Sv43IG6bu>Svi3IG6ju>Sv$3IG7uu>SwJ3IG5&vHt($3IG5= zvHt(~3IG6Lvi|=p3jhFcvi|=-3jhG9vi|>63jhGXvi|>Q3jlur(6avjdf z2)zFPqzwQ7=)L~`@C^U}D8Byx6b=9Y(7yivOb!45h`;{-d=3Bru)qHQkPZL<_`m-D zxDEgSkih=`2oHY%0NBC)|5OhE0C>ax|AY?!0FcA}|ELcD04T)%|Ktw<05HV<|M(98 z0Eor@|6C9N0AR=d|GW?Y0D#B-|IiQs0MN<)|1c2%0NBa?|3nc00D#K=|8x-m0Km%r z|C|v309ere|5OqH0HDzR|7a2b08r8X|JV`$0I1Ra{|tW<003~;{{M6n006+({{N^G z006Mq{{Qe3005xb{{J`>005BM{{Mg!004m7{{P4n003~@{{Ija003~^{{LhZ008LT z{{Pq&008jb{{QS1003~`{{JKv008*k{{LJS008*l{{M^?003y={{O%h005xj{{Q3_ z004O6{{Md<7XSc&;{N|M7XSc=;{N|g7XSc|;{N|!7XSdz;{N}D7XSd*;{N}X7XSd* zHhyv z82|vd>i++n82|uq>;C`982|u)>;C`T82|u?>;8ZL@EHIA*z5lP7#aWoNbLUqKpFr5 zK<@tk;2Hn`c<}!Jgc|?=Sn>YN~b@bmuvU>pDd2=xB{bQ}NxAoTwKh#UX_SoQw@ z>>L090QUa>5FG#j5cdB6Bpm<%u=oD|bR7TyNcsN%_#FTM(E0xV3?2XgSp5F~gdP9@ zi2r~7{|Fxd04M002M`|NpEc005X0|Nqn^004h* z6aW7(B>(`Z6#xHtB>(`J761RRB>(_07XSa;B>(`>7XSYUCIA3X7yth_CIA5F7ytiw zCIA2s8UO#dCIA4C8UO$6CIA3%8~^`kCjbDT9smFACjbEW9smCnC;$ME9{>MDC;$MU z9{>MXC;$Mk9{>MrC;$M!9{>MKq&wJbSnS=Kq>$K#47**NGbpS*eibk08lCa z|LiLO0B|Y){{Soi0H`Vd|0FB`0Jtgt|2QlF0Kh5#|4b|Z0LUr-|6nWt0MIG_|8y(> z0N5%2|A;IA03a&=|Ew$k0B9=z|IjP|0FWyG|MV;X0LUu;{}?R*001lh|2!=K04OW} z|6nZu08lIc|AZ|709Y&k|CoO*0059H|Np!#005vX|Nqb}006iv|NrDI0077<|Nr0?|Hw7~0Qf}z|M)fl0FXuh|2Q`Q02oI9|7%P0C-dX|6Do%0H9O<|8P10 z0C-gY|EM|u0N{UA|NqcB001CW|Nrzl003ZB|Nj^}005v>|NlHY007`s|Nme+001CX z|Nn$L003ZC|Np2v005v?|NqQ8007`t|Nrni001CY|Nj&`003ZD|Nl5V005v@|NmS( z007`u|NnqI001CZ|No>s003ZE|NqE5005v^|Nrbf004jBSpWYJJpce8S^xhuJpce; zS^xi7JpcgUS^xixJpcd*TL1sBJpcf(TL1s_JpceOTmSzsJ^%o4TmS!9J^%p7T>t-& zJ^%n9UjP5dJ^%oCU;qCAKL7yGVE_L-KL7wIVgLVNKL7v#WB>oCKL7woX8-@3KmY(x zX#f8xK>&XMKxzN~TtNT;plbjBj6nbZxNQIb5JCU|2yXxXWI_M{XmJ1k@In9pz;XZo zC_?}M5On|lz(W84V08cgN)03eC~ z|EPaS001b8|NqcQ002OW|NrDk007vE|NkUP006*@|NnqW008if|NqoV007XA|Ns0- z002mj|NksY006L$|Nl@+008KZ|NnGL006j=|NrPq000=1|Njt7001bH|NkUR006j? z|Nmr6005Ac|No#&001bK|Nq=f001DE|NkObO#lE8n*aZ>O#lEWn*aaAO#lEen*aaU zO#lG+n*aYWPM6Bu0U|1drT+gc000000000G0000i0001>rT+gs000000000a0000$ z0002YrI!}n0VWpgrT+f}000000000a0001d0002srI%9O0VV<%rk8r%0V;n?rvCp3 z000000000m0002w0000$rvCpR0RR91K@L!z3Z4l=4IM$129^avlL(XtaSGnu)z;J0 z3?BeO3jhER0000O0RR9jr~dy8000000000G0000i0RR9fr~dym000000000G0000$ z0RR9@r~dyG000000000G00372Q~>}0P^bR?3;+NC000005C8xGXaN8KOsD?;3;+NC z000005C8xGd;tIeNT>e)Gynhq000005C8xGkO2SyY^VPJ3;+NC00000kN^MxqyYc` zXs4Gl-T{sYI066wl&JpyK$i{r0U4L#-T@kaGynhqcme`3&1e61R zl>(gxo&{Rn-qq983?Bdh0E7Sl00;vB0OYLx|NH>}002P_umM4x5S|Y~3>|<0LYxIc zm|<0lMa*( zl?;{(mkO8(nFyK(n+BW(oe-W6Y17ms)z;V8+1lGelMa*(l?;{(mkO8(nFyK(n+8Y# z0058!003OBmlEIsMh4si008i>mtNoj8bUMx00004000cI{{N@|0000%4j`Qbo&!S- z9Yd1>Mbkmv-3%W9OAH+#lLDOto&!hILf+jB9{?->001}y004ZjmxAB{DjH}6000EB z{{JKZ00000001Na004Xh000=Vm&V`$A_k-c0020#m+s&JN(S@<004lompnm!RPR5DFjw003|W008W>mmcu}8VWc7005K)007{#mp0-7HXYCf z002C+{{I9300000003YB0086#001zw{{L8)iQ)lL4^Pz7003M7002k^001Plm+ImH zR(}ux005{4005k}{{I{R00000003M7006uO005}A{{Kt@0000%4xkA`os_ey z1fByy-rWoz07ncRAd>=}1fBy$(?Q_eo0-Xe&14q+B-rWoz05kvq01ylS07S|D|A+tp002P_ zAe{uB149iRLz4nU(?Q_eo0-Xe&15DFF-rWoze*i21002k~005N9{{H|0 z0000%4iKFJo&iA(9a0M3-3%W9LJL(2-rWoz06_}?001xm004Ll003ml{{L(M0000% z4j`Qbo&!M*9YK=%a=Ci0cv6Z z0000e0000?4gdh?%>Mt(00000K@LEj2%ZN)4IM#~29yP53f|q*)C?a0K??v70001Z z4gdg*&Hn!s000000000G0001t4gdg<&Hn!s00000000130001>4gdg@&6n=!0agb5 z4gdg{&X-E+0Xhae4*&pI(3gtp0Y(Xk4*&oZ(f<4*&q5(f$);-rd#H)C?a0LJLUV-PP073?BeI z0000i6#xL>;FpH*0Xhb36#xK`;g`nn0UkP#6#xJ{;{N|w00000K@Jd|0-gav4IM)Y z-rWoz0745x3f|oe9{@)SLEhaA9{>O(0002Q6#xKW;+G!r0U8bf000170002&6#xKG z;+I140a`*#761S!0VW1q7XSdj6c>k0XBay0000?82|u4>i+*&00000K@K3D1fByy4IM#~0+azw z3f|q*)C?a0LJLFQ-P6Ol0001h82|ue>i+*I000000000G0001#82|uy>i+*E z000000000G0001}82|u`>i+*I000000000G0002I82|vF>i+)>004gg000005C8xG z&=~*#oa+An6aWAK000005C8xGp9cxa$7@C;$Ke000005C8xGAQ}Jw%}8{@0Tj~0VWB^8vp=I@c#dtmy!Je6PIlF0Sr1U0002=8vp>n@c#dN00000LJlCE z1fByy4IM#~0!Iqo-O~&o06_~*-rds-9{>P60000a8~^|i@t4B)0Xhd{8~^}l@&1C0000001yBG07M=D06_Ns{|o>C00000 z01yBG09YOX06g~o{|o>C000000C)fZ0Bjxr066ykmkasP&`u_iXm#yRh9%NG9-P6Ol0002U9{>Qv z`u_h2000000000K0002o9{>Qj`u_g}00000000000000W0002=9{>QT`u_hg00000 zLJkm}0-gav4IM<@-3%W90000W0000OAOHZg`u>+6{Q()5@caP;2RtAE0J!@8mmmEB z8JF<<0R#tNAOHZo`u>+6{Q(^tgdhL_!216G2mk;800000Gynhqm>>WEwEF&+SN#Do z2hbn@0Hpi=myi7cH3tkK000pD{+G-B0Zj*BApiix{Qj38{sBotups~dbp8JS1ONa4 z000005C8xG#329xX#M{G1ONa4000005C8xG*dYJ_T>bw4AOHXW00000AOHXW>>&UE zX#M_|tNsBUe-I)70DS%a{|o>C0000001yBG03;#+0C@fW{{#R40000001yBG05~E5 z0BrsK{{#R40000001yBG08AnP0AT(8{{#R40000001yBG0AL~j095_{{{#R400000 z03ZMW0CXY%07(7*|40A;002l15S;>^0YVKOMBd#D7asrs001BW005XG003PpfpZ@_J2LvMk0I2=`m!JOu z9S0~Q006xG{+A#90U4L@`~d_9Od|jQ!2SN0AN>Ix8*n240L1LzUYC6)!j>000#J{{I*N z00000000mG002xR000>N{{I*N00000001BW003Yl0012R{+AyC0v!j0Bme+7{{ELA z{Q(^ts3ZUYJpTUw5C8xG00000AOHXWyd(esIR5^ZdjSF+2jC^0YVKOMBd#D9{>OV03ZMW z0H`GZ0CfKTmuCV39Yf3|005Bw{{IL700000000mG007`6004yk{{IL700000000mG z008tQ004OY{{I9300000001BW000Ok003It3n%~p0Ei|40CfKTmq!BvB@VPE00030 z{{Jul0000&m(BtL7Bko;000dC{{I9300000000yK008VJ00030{{IjF0000000000 z001xm000Ch008*@{+E;c0WAkOCjbCU|NfW9`vEBjXeR&wbpQUB_xk}&2D~Q#04M;L zO8fyf2KXlc0Gt4qhWr5{e_Q|n03;{?0Qdm^|1bdn002Y|K%EGl2SE)TL6ilR1Vxhu zLY4zj)`p|29yPr1eODx2%ZN+mjYJTMAJdmLX!rT1D66p*VaPQ zK@1-N)YaYI0E_?t0E8$20LTIV|KtJy002P_zyX~Yo)000nr{{N@}0000%4ltbto&`Y-9YK=> zlmkMQ0%{80-PP073?BeN3jja>008_c001Bc|NrO!0000%4j`Qbo&!S-9YK@|---rd#H)C?a0LJMD(15DOK3YG&%3qcB&14s)(3YG&% z3qh6x000130001tDgXcw2><`|0RR91K@NZcoe-W6K@A;2m46JD3qq3)lnqFi3PhR* zRSMqS+Sk?A)6@(f0745vnFyE(TG`k^m z-PP99)C?a0K??u?JOBUylq&!Ls0;u96afGL06`8wod})>K@A;2lLnLpRSMqS)6@(f z0745vl>|lALX`wh3e`aiN7YM}1ONa4Gynhq)GGi0Sa$yZ)Bpeg06`8god%u-Lk%54 zlLV9lLX`rR0bvT>-PP7V)6@(f06_}?0000001yBG01PYu07wk~{{#R40000001yBG z03a*?06Yx;{{#R40000006YKy05mKB05A-fW(EQ}26QX{0N@OlrUn8!2DB^y05}bo z<^}>P2IMRN0Qe1;76$@41|%&208kE>RtEwu23Rcs0Hh9=iU$HF281mD0Dupdwg&<_ z3B)Y`0KgFc|Hzk-{Q(n2R|?+U)6@(f0744@C;$Ke@GSrUToM2OqyPW_06`8god%u- zLJb{3lLV9lRtnzT)6@(f0744@v;Y7A94-I=*b)E#xR+4~0v`lNlnYpwbO-`13!M(0 z4Mds+n*^K#m#YW@P!03|007i3007t&|Nlgn2?+ul9GnYHloOK_M4Anm4ndm?0#4gO z)YC!Q*-F?!+?PNJ0wyL_)7078+e(uZloOc_nhl!_MGDl@LE70t+d&IT)7078+ez3# z+?R|A0zwm<3rh;vLEJ$LN($IP+(8RY*xZ-l2?7&;Pyhe`%r5`{C>a0$0096106`8g zod%u-K@A;2lLV9lM+)BE)6@(f0745vl>$eW0an)4Lf+le)C?a0K@1%*lLV9ll>(gx zo&`zOL6riQ0Z7)>Bme*aATR&`)ENK&WB>pF06`8Qodli(LJb{4lLC|hQ{LUv)C?a0 z5C8yw07x(Z02CSj|0Dna0000005kvq09-Ht04N#%|JVQk002P_Ae{uB149iRL6iYS zlLA)LL*Cug3?BeN3>_eo0+a!r1fBy((Z5FM8~ z4gwq;NtJ{FVhY{f)zj1r9{@oM001-q004Y6001x^mpTpt93M%QgaKj--QLyH)C?a0 zK??u?Gynhqv@`$!P#^#QRF^#t0vQ}hm4pFe3fA(xg80zw9qH2?rmBbVL|0v8`lmJdUi4VMl~l@LOj z3rW`1LfF?q+DG2q)6@(f06~`&4+6Lbh&BKKq$QWq4+4e;6gL0>ASahz5CSd+NH+ig z+$Wck5CSGafB*mhcsBq53@QKr6aoMM06`95oeZ7}LJb{3mj*(V2}zR*LYf0cmIsvx zN0|he1#b%3*g*?H+1N(b)j`ui+Lr+l0vdms1Dy<>3qje~LDSXN+DwxQngdSLLE1u= z2bBmxnFN>xL6ZtWngaj;Pyhe`{5Jpqpeq0WOaTA@074Ehod%u-K@A;2lLSTz-rds- z9{@rNL6riO16|eBLJS`O)7{=e3>`3&1e61n0-Xk)1zZZ%)ItkE3f0s>3q#e^hyVxx z08lsp0GunAnh^qq2HZFR0B|jrCK3WJ4U_-?02Da@0FW;K|4^4<5&{!{mnuk>E0rul znJAbkLz*W+lr57k0!7r*Le|wm+1NqaNZ#Gs*9;#3LJS>%1CuV4EtM>mE0-#mDVZpm zCz~dnFrF_#)YC!M)j`?VLE1u-E|e{mES4*nDVZpmCu7sp)z;YA+Ch{plP*D)E0ruk znJAbkL7FE3AOHXWtT_M%07x+Z|4f&W3IZ1jEC2ui&^Z7AbTF4z69Ot706G8w%rO7| zGynhq00000XaE2J6gmI^@G$@Xl$V_o0w4iemj)FAA(!710wEoMIsgD1GXMV!00000 z000130001#IsgC|GXMXSmmL%W9RXUGs}uqrmsu177e(+o002}o|NjgC00000000mG z000C#002-k|NjgC00000000mG000;}002xg|NjgC00000003M7001mI002lc|NnHC zO%(zc0bZA76#_pSj5`1TU^M^#cmMzZ06`8Qodli(K@A;3lLAhczZC)*2EaQ20HidR z-W38u1_(R=02DQsCKdu(253A00K7Jrh86-Qf0R4`02nv_|6~II002P_@B^Jbo;^Vg z9YK>klsiJ4Geng-mN`S1H<>nG3f|q^*xA+A)6@(f07464mpECQG@3PX*V@}kmpGa= zn>0<^+Dr=9Lkm(0*Fp!nl(X}IM4t90Q@`v02n&|{}clN002P_P(hsto(Dk; zmn# zCIw!P6oh0000z2m);rz zA_nL{007KGmj)XGCK4b3000<4000z3|Nm$J0000_m);5j76v>)003A-mxdbx9tL1R z003-6m#P~AZU)pr0078EmmVAfA{zWb0000-|NjgC00000002Ay000m|008_(ms%VG zItfrh002x!|NqpNF&Y9Cm$4iI9|?>?008Jn|NoemZ5RR z0000%4$u`to!SDP*#bcg9YK@W0+iPRLYCD6NR`$CMVQk9m(&7i*w;bTLY3A6NYzUU z-QL#I)C?a0K?_Qi)&iH*0+`bRN!3Bv*Fu%n0!h_DmDU26)B>2(0!G!>*g=@n0+-YR zL6z15002|~002Zo00scSO8@`xmysO;6c&QD000006aWAK000002LJ#700000 z7XSbN00000C}02p000002LJ#70000I0Ju~D0000000;m8000000C1Nv^a4LII8*=t z00000|NsAQ000001ONa400000@c;jB00000$WQr~v=~ z0000o000050001}r~v=~0000p0000500026r~v=~0000q000050002Er~v=~0000r z000050002Mr~v=~0000se*gdg1poj5$fyAT00000Hvj+t1poj5(5L|b00000H~;_u z1poj5;HUur00000IRF3v1poj5=%@hz00000IsgCw1poj5@TdU*00000I{*Lx1poj5 z_^1H@00000JOBUy1poj50000000000|NsC0|NsC00LK9U00000e*gdg0000002s#s z00000000000000005Hb^00000000000000007%CH0000000000000000AR-f00000 z00000000000C>j%0000000000000000FcK40000000000000000I0_S0000000000 z000000Jz5i00000e*gdg000000079x0RR910000000000007v>0RR910000000000 z008jE0RR910000000000000Qc0RR910000000000001b+0RR910000000000002P9 z0RR910000000000003CX0RR910000000000003~v0RR913;+NC000000001h$d@7p z12zn3m;e9(0001dm;eBmKn4RC48VT?000000L*^?0GB`p0~id`%<00000000000000000000000933IO1L00000000000000000000000933jh$6 z00000000000000000000000933;@i70RR91000000000000000000934FKSS0RR91 z0000000000YXATM0000301g1i=m7u#0000000000000000000301p7*@&Nz<00000 z00000000000000301yD!@dE$=0000000000000000000301*K2@dE$=0000000000 z000000000301^NIm%#`F6$29haF_82178vNF9ZMp000000000000001mvIXNAW4`2 z000000000000000073u&00ICD0BD#10000002BZK0000006zc#000090Kk6$00000 z000000000006zc#0000C0L+7z$_WE5Da?NW00000000000000006zc#0000C0Mvs4 z0000000000000000F9Sn2?HO002~1T000000000000000n*aa+0sspD7$5-v00000 zECK)k00000KL7v#000XBz?J|2000000000000000ssI20000mG={000316#%d;1ONa4 z000O800000000O9000316#%#`1ONa40000100000006200000j01W^rwgCVD00000 z00000000200000001p5F8v+0T00000000000000J0hjs>10GR8nE(I)0000000000 z0000#0000001E)h($0000000000000200000001yD+@dE$=00000000000000S z0RR9301E(cx&QzG0002Qm*EQo9|1|10Sp5d8C3xQ0096L01N;G0000000aO400000 z0Be^q30RR9202Kf@1_S^A0000m000000001^0RR9202Kge1_S^A z00008000000002Im%$7J7k|$I000316##Sw1ONa4000C400000007?s000316##e! z1ONa4000C400000008s>000316##$+1ONa4000mG0000000033000316#$S11ONa4 z004jh00000000vL000316#x(i1ONa4000C400000001Zg0003160ssI302KgO2Lu2B0000400000 z000100ssI302KgW2Lu2B00008000000001Km$3~47g2r!000316##4p1ONa4000C4 z00000005E#000316##Gt1ONa4000C400000005@~000316##Sx1ONa4000C400000 z00620000004FKSmfe!;05m@s90000000000000000K1p@4FexCfS3RP0000000000 z00000#sUBU0sspDfS3RP000000000000000$N~TW0sspDu$Y%h4g)3`+X4Up0sspD z@R$Gq000000000000000@t27X10R3z@dE$=0000000000000000{{R401*K2@dE$= z00000000000000d0{{R501E&(nE(I)0000000000000200000001yD!@dE$=00000 z000000000p0{{R401yD!@dE$=0000000000000200000001p84@&Nz<005UK4+9|q zQkM}C10O*c&Hw-a0000000000000200000001W^jmH_|&00000000000002000000 z01p5t0RjL300000000000001Bmk|*IAAfk#00000000000000000620000004FCw3 z0RR91000000000000620000006##$-1ONa400000000000040V000316##$-1ONa4 z001xm00000004ah000316#%dY1ONa4000C40000000620000004FCw70RR9103rYY z00000000200000001p5d0s;U400000000000001tmk|*I9~vO*000000000000000 z00620000004FEW$m!c2@E|&-r0~si#0RR91000000000000620000004*0RR91000000000000620000004*)nL0ssI20000000000 z004*tm--C@9zrNW0RR91000000000000620000004FHhH0RR91000000000000620 z000004*(!00ssI20000000000005T+m--C@9)I{l0RR910000000000005-~00062 z3jjDo0RR91003A400000006)Q000623jmNs0RR91003A400000007+t000623jpXw z0RR91002}000000008;~000623jjb!0RR91003A400000000;T000623ji=#0RR91 z0Dk~f0RR910000$1poj701E(!S^)q60000`0ssI20001G1poj701E(6Zvg-R0000K z000000001v1poj701E(sd;tIe0002g0RR91000200000001W^b$pHWW0000000000 z000200000002Kh>E(8Do00000000000Dk}gtOWo70RR;M;4TCJ000001ONa400000 zy#)XO0RR;M@7e0RR;M=q>~R000001ONa400000+ywvt z0RR;M>@EZV000001ONa400000?F9e;0RR;M@Gb-Z000001ONa400000ssI2004x9v z0C3I$0000000000000000IC200000F0Qe>X0000000000000000RERz76Tsvz?Xp! z0~ZnaHUa00000000000000008<740096LN&qkh1ONa4 z0000000000003bI000623jn}_0RR91000aC00000003VG000623jn}_0RR91000aC z00000003|X00031@&6E)!50G;Dd2wq0000000000000000EPwt01^NT01y%Z00000 z00aO4000000FsxH7y}mpr$$3>RTl2mk;Q01E&})Bpeg00004000000001E2mk;Q z01E&Z5&-}J0000W000000001P2mk;Q01E(UKmh;%0000$000000001l2mk;Q01E(c z{Qv*}0002+000000001#lfeuZF{KCq01^NT04NRt0000000;m8000000JjJL01^NT z0D$)Z0000004M+e000000LhcV3>N|1lfeuZN#zIt01^NT0BAk|0000006+i$00000 z0Q(3401^NT0FV*^0000000aO40000000jvE01^NT0N^eG0000000;p90000002Pyw z3>N_|laUM;VL=H101^NT06@zC0000000aO40000008$A601^NT0Pvpx000000DJ)e z000000A~pR01^NT09bwj000000GI#(000000DTDn01^NT09auG000000CWKW00000 z0FaZB3>Pt@2><{R01E(E%K!iX0000y00000000292><{R01E(k>;M1&0000a00000 z0002RlaUM;f9weW01^NT0C1ZC0000002BZK00000009aB01^NT0BA7*000000LTCU z0000002B%U01*Hc0C*S#0000000;p90000003iwh01^NT00>e6000000AK+C00000 z06Pi*01^NT0KgUj0000006+i$0000007?n~01^NT5&-x^0RR91002M$00000002~% z(H#R90dtqp9Rn8ue3#K30~ddf3IG5S01E&(1OWg50000m0ssI20001{3IG5S01E&Z z&Hw-a0002Y00000000253IG5S01E(!5&-}J00004000000002F3IG5S01E)n7y$qP z0000G000000002U3IG5S01E)nECB!j0002+000000002k3IG5S01H|GKo7Xc)b;Sd)AFO%UA7ne{U0|-GN z>i_@%0000u0RR910001E3jhES02KhZ2Lu2B0000G5C8xG0001V3jhET01E&xG64Vp z0001_000000001qli?5-0iToM5Eo&{3jhET01E)1nE(I)00017000000002h3jhET z01E(kr~m)}0000)000000002(3jhET01E(kLjeE)0001d00000000303jhET01E(! zbpZeX0000i0RR910000KlhF|uF((WF01^NT0EiX=0000005AXm0000005c2#01^NT z0N4ou000000AKv z00000oB#j-00000f0NM>7k`}$000sI3jh#L0RR91000C600000006fP000sI3jo+U z0RR910000100000007Jk000sI3jjF9000000000100000007?%000sI3jp}600000 z003A400000008q0000sI3jm35g8f{000sI z3jn~$00000003+N00000001kKu?-hNJq-W=5&#PTSgZg500000lmGw#00000Qw;zB z5&#PTNGkyV00000@Bjb+00000V+{ZR5&#PTz!U)h00000Y?sj?10Mm2ld%mKF`Nwm z01^NT08nlL000000Ehqp000000JjYQ01^NT07$R^00000089b^000000ML`M4HseP z4FCWU02KhZE(8Do0000G000000002;4FCWV01E&pn*aa+0000C00000000094gdfW z01E)nZvg-R0001B000000000X4gdfW01E&}!~g&Q0002+000000000vlTi&90ZNlm z4Hp4jlTi&9NpKDT01^NT0I2r>0000004M+e000000D%qw01^NT02omL000000Pp|+ z000000F({@01^NT0I;S20000006YKy000000H~8u4Hp5slTi&98O{y>01^NT0FV>` z0000005AXm000000NRsL4Htj=4gdfW01E(^;s5{u0001l0ssI20000A4*&oX01E)H z#{d8T00004000000000O4*&oX01E&x76AYN0000m000000000Z4*&oX01E)HcmV(a z0002c0RR910000s4*&oX01E)P+yDRo0000K000000000!4*&oX01GMrcntvn00000 z1ONa400000P7eS85&#PTC_VuI00000KmY&$00000W|J`t7iD-4000sI3jnA=0RR91 z002M$00000004#$000sI3jm0-00000005W;00000006cR000sI3jlb*00000008g+ z00000007Mo000sI3jnxq0RR91001xn00000007z#001JBVF42v^$!355&#PT5N!bf z00000Kmh;%000002a{n87k?fQ000sI3jpXp0RR91002M$00000001x$000sI3jm<1 z00000005u?00000002Y~000sI3jiSA00000005)_000000031G000sI3jo+$0RR91 z002w?00000004Fn000sI3ji=30RR910086y00000004*(000sI3t9lU#{d8T0001h z000000001^5C8xY01E)f_5c6?0000e000000002C5C8xY01E)f7y$qP0000400000 z0002N5C8xY01E&h_W%F@0000C000000000<5dZ)Z01E){tN;K20000400000 z00018lOYWkF>Vn601^NT06541000000Ehqp000000EZC(01^NT0I2Q&0000008{_~ z000000FjfC3>O)g5dZ)Z01E)1%K!iX000000RR910001{lOYWk0lSky5f@?75dZ)Z z01E)<+yDRo0000q0RR910002y5dZ)Z01E(+HUR(t0000)1ONa40002-5dZ)Z01E)X zmH+?%0001}0RR91000005&!@a01E)nVgUdE0001Z000000000Fmyss}7a1rL000sI z3jjbo0RR91001Na00000002UlktYKe0b`euCj%ErZ4v+g5&#PTfD!=!000001ONa4 z00000coF~r5&#PTAQS-r00000H~;_u00000g%SV&5&#PT7`gxe00000Q~&?~00000 zqnD8<0~Z0dmyss}7XitaktYKe8Qc;801^NT0O-yD0000002BZK000000PB~LCj%FM z1QP%N5&#PT;Clf8000003;+NC00000BohDt5&#PTXm0@k00000JOBUy00000I}-o^ z5dakcfEfe;000000096100000NfQ775dakc_$~wh000000096100000R}%mL5&#PT z02cuO00000KmY&$00000W)lDa5&!^~=O_aae~uFX01^NT0C;!-0000005|{u00000 z0IL%K01^NT08rEb0000008jt`000000K5|b01^NT003kG0000000;sA000000Ll{p z01^NT07$a{0000006+i$000000M`=$01^NT09Y;o000000Ehqp000000O=C|01*HH zlVbr9fBX{w01^NT05EU?000000Ehqp0000001y-a01^NT0JwGm000000N?-s00000 z04Wpz01^NT0GJp70000005AXm0000005%i=01*Hc05BK?0000006_o%0000007eu5 z01^NT04P5J0000006+i$0000009_OS01^NImuD&i5lL|r000sI3jjbp0RR91003kF z00000004><000sI3jkOb0RR91002M$00000005K}000sI3joOM00000001xm00000 z00500000006cX000sI3jnYH0RR91 z001Nb00000006`l000sI3jk<80RR91002M$00000007n%000sI3jp{P0RR91001}u z00000007^Yu`2@?8SoVV01^NT04T5k0000002}}S0000000xsm6cV$B01^NT0Eh=pn35&#PT zXt@9Y00000tN;K200000{1yNJ5&#PT(DwiU00000EC2ui000005|@E40~dcA7XSbf z01E(E>;M1&0000W000000000g7XSbf01E)nLID5(0001}000000000s7XSbf01E&( z3;_TD0002E000000000$7XSbf01E&p^8f$<000130ssI20000@7XSbf01E)X836zQ z000040RR91000157XSbf005VVE&~xYY!?6k5dakc$QJ|v000002mk;800000cNYKv z5&#PT2oV7Q00000`~Uy|00000kQV>|5tCs769J=_fi43Vf3+6?01^NT0MMiW00000 z0N?=t000000K*pm01^NT0En9a0000001N;C000000Mr)%01^NT0LU5v000000I&c6 z000000OA(_01^NT0MOh30000002lxO000000PYt601*Hb05Aar000000N?`v00000 z0Q(mJ01^NImxnF`5q}dH000sI3jk2}00000000yK00000001W#000sI3jp930RR91 z006WA00000001@^000sI3jmm>00000006iE00000005~7000sI3joO800000000aC z00000002!G000sI3jkQD00000002Y*00000003ne000sI3qk;>n*aa+0000q00000 z0001e7ytkg01E&hDggih0000K0RR910001x7ytkg01E&>nE(I)0001B000000001V zld%mKf1nru01^NT0B8pR0000001N;C000000IwJT01^NT0GM|H000000CWHV00000 z0L2&p01^NT0D#i~000000Js1E000000Noe>01^NT07zj00000001N;C000000Q49D z01^NT0BFJh000000Pp|+0000001Fua01^NIm-jFO5dk2V@h}4yVMZAM01^NT07%OK z0000000aO40000009P3R01^NT0HEvu0000003-ka000000BRWk01^NT0DvU{00000 z0Q3O>000000DBn#01^NT09X?N0000005|{u000000EL(FFasAcpcw!F5&#PTU=;xX z00000KmY&$00000tQi0R5&#PTIMe_D000003;+NC00000x|i`V0~cY<82|th01E)1 z5&-}J0000;000000002i82|th01E){3;_TD0001d000000002w82|th01E)X1_1y7 z0001(000000002>82|th01E&x$^ZZW0000q000000000Alfe}i0Vk8e6&C?Jlfe}i z8BZDj01^NT0O&6P000000DJ)e000000A7>96&HVd8UO$i01E)+{&0000$000000002L8UO$i01E)+{&0000$000000002Z8UO$i z01H9@=s*Dg00000KmY&$00000;~D?}5&#PT5S0J`00000)Bpeg00000@)`gD5&#PT zI4l7G00000m;e9(000001DBCA0~dc48vpj`5&#PTs6qh%00000C;$Ke00000n;QTC5&#PTXa)fQ00000 zSO5S300000rW*hN5&#PTu<8H+00000SO5S300000yc+-j5&#PTcqah>00000H~|0v z00000%o_jz5&#PT`2GL@004gg0Gt2-000000OT7001*Hc0N57<0000008sz{00000 z0QDOH01^NT0PuSO000000FVFx0000001_Mk01^NT0O*wf000000Mq~g0000003aLy z01^NT0N8E;000000C)fZ0000005==}01^NT00`><0000002BZK001-q002!K000sI z3jio#0RR91000aC00000003hg000sI3jmlG0RR91000C4000000045A;WPsme}o(W z01^NT04QAn000000E7Sl000000HGWJ01^NT0H75C0000004M+e000000IM7T01^NT z0C*b#000000Ehqp000000Jj_f01^NT0BAu00000006+i$000000M8r%01^NT000mH z0000000aR50000000<2L01^NIleY~KN$eZ|01^NT0I2Z*000000DJ)e000000QMXJ z01^NT0B{Wf0000000;m80000000SKW01^NT0Pv;&000000H^=}00000034UWHUk#{ zE|tN;K20000K z000000000H9{>On01JNr@aO;l00000>;M1&00000ARhn#5&#PTcn1Lh000003;+NC z00000FdqN_5&#PT@PYvV000001ONa400000L>~YE5dakc&=&*%000002mk;800000 zQy%~T5&#PT@b&-z00000Bme*a00000XCD9n5&#PTn9BeF002J#00aO4000000CgV# z01^NT00`Fr0000005k#s000000E!;~01^NT000gF0000004M+e000000Gl5G01}g7 z0TVT@9{>On01E(+69E7K0000u000000002D9{>On01E(U$^ZZW0002g000000002Y z9{>OnlVJf9G2tHo01^NT064$^0000008jt`00000001BW01*Hc0C)!k0000000;m8 z0000002r4cIs+F0E0-ZU0~bk2AOHXo01E&x4gmlF0000e0RR910001CAOHXo01E){ z69E7K0000e000000001VAOHXo01E)%=>Px#0002c000000001lmmxX>7a67?000sI z3jo*+0RR91001Ze00000006d^AvyyWf65>L01^NT00?sd000000GI#(000000OlY7 z01^NT0N@w_000000Ehqp000000P!FI01^NT0H{9!0000006+i$0000000JQZ01^NT z0ALmY0000004M+e0000001Y7k01^NT0NCCD000000Gt5;0000002?6y01^NTe*k#k z00000005W)00000001W;000sI3jhEk0RR91008I!00000002NC000sI3jhda0RR91 z004vl00000002)R000sI3jkQd00000008g+00000003tp000sI3jjdb00000007(o z000000049$000sI3jpwP0RR91ZvX%k000000001tApigp01E)1)Bpeg0001N00000 z0001#Apigp01E(sdI10c0001B0RR9100021Apigp01E)17XbhO0000O000000002I zApigp01E(^{{R300001d000000002YApig%lVJf9f9N3q01^NT0FY(@000000AvFI z000000Q?~U01^NT01$Wq000000B8UJ0000002U$u01^NT05~fF0000000aO400000 z04E{<01^NT0FYk+000000F(g$0000006Zc901^NT0N64C000000JsAH0000007xPL z01^NT5&-z^00000006K600000002*yF+2ko8Db&;01^NT0B9Kj0000009XJ300000 z0Bn~rJOdX2eU~vj0~axeA^-pq01E)1a{&MV0002w0RR910001{A^-pq01E&x69E7K z0000e000000002JlR*&|0kD@bJOdYh&LRK+5&#PT_-O$E000006axSN00000-XZ`1 z5&#PTI1&K>00000H~;_u00000=^_9C5&#PTxEBEc00000C;$Ke00000`XT@T5&#PT zIGX?f00000Gynhq000007$X1x5&#PTuqXil00000r~m)}00000DI)*?5&#Pp0H{6z z0000006+i$0000006!xD01}g70TX3YBLDyr01E);M1&000000RR910001-BLDyr01E(6 zZvg-R0000K000000002DBLDyrlVJf99myjA01^NT0N8>70000002BZK000000Nx`2 z01=a60TTuFBLDyrlVJf9H3=jD01^NT0N|Sd0000001N;C0000002(9!01^NT0H|*P z0000003-ka0000004gK^01}g70TUfPBme*s01E)HT>$_90000q000000000{Bme*s zlVJf9Wo9G*01^NT064|~000000C)ia000000Ei?201^NT0H{3y0000006+i$00000 z0G=cO01^NT08q;S0000000aO4000000I(zg01^NT04P8K0000006+i$000000LCN$ z03efL0TUh7Bme*s01E(M`v3p{0002g000000002wBme*slVJf9fBGZ<01^NT01)B; z000000Ehqp0000000kug01^NT0O)Z60000000;p90000002(C#01^NT0I>J~00000 z09XJ30000004pT`01^NT0N4Qm000000DJ%d0000006irD01^NT007Sb0000002BZK z00000081qR01^NT763>h0RR91006WA00000003Mi000t`VF42bZzTW#Ad_JM6Mu;% z000sI3jly$0RR91000aC00000005mO000sI3ji3;00000003M8000000068d000sI z3jnwl0RR91000C400000006ut000sI3jlDT00000003M800000007Y?000sI3jlx= z0RR91000mG00000007%1000sI3l;!)(f|Me000080RR910002-B>(^tlVJf99Rnr+ z01^NT0O%3{0000007w7;0000002C$w01}g70TWUuCIA2u01E)f6#)PM0000G00000 z0000oCIA2u01E(U6afGL0000O000000000yCIA2u01E&B?*IS*0001_000000000+ zCIA2u01E&h+Lzis1G{BZXmVv?WKv~pWnpt=E@N+Qb98WWZ*VR$0B2utY;R{qX>4Uo zX>)V{XJ2q^Z)a3!a%Ev;0B2umb47S*bO2{xW^8qHXjEx!Wp8zK0B2utY;R{nY-wd~ zbO2{xWMy_!bYXO5m%Tp&l@vK=Uu0}=XGUywb7(_hY;0m-V{4a9Km)Q6HD_OFVRKSt zXK8bEWpZU?xB5T>EddlqWpqhyW^Y1yPGN0jE^uLTbS^NLCPD){mu*4=DJFAhVRB_( zZDnmPV*qS#XJ2G;b}nN8Y;R{@a&>NWX>DaLV*qDgm$0t^87606Y-wUhVQyq>WdLMy zX?A6DUtw@*E@J>^Uu1H2Ms;puNp5Cum%2g&HMtB z08nyiZgfduZBu1zW=wBqMsIRcWpZ|9asXdiF)~SWRA_Q#VPr*kX>@kC4n_m80}o+j zL~LwEb#7x*X>Mn1WtUH10~NQ>NdwgZ31?q$OmAmGb!NAuO9Phz8edv6F;#G6VRU6h zc4cmKOl5XuY(jZOZgzH;I!*&-5=3%#Ms;pubZKvHb5(9>ZfSIvpH2f*7++d3IaP3E zVRU6oZ)Z$pc4cfrc|~q^c9-%_15CGBPXmhrEM#nBY;SXAKtM-KNkT(kGA=PU099^m zZ((Faa%pyDazt!wMs;pubZKvHbC=^$1GW)gS}`$3Y;|*JL2z(PZ)Z?;mzYumpd4RX zF*ZbRRd8fsbY)C$XG~>wWo&i;Y-4C|cW#$(U;`SHtV|7;Wm5z93rufeWJGLiMs;pu zw_8*LNC6T=Y;R{$WpHnDbVg}zVQgu7Ww)YL1N#CHRc>r=VPsNuZggpFWkhUjx13l5 zX#p5tS}{3EZfSHyc4cmKOl5XuY(jZOZgzH;1X=^T8(&&7GE;P6a&%N^a%Ev;MsIRV zZ)Zkqb#rKTlQB9Jx6)bzTmqLvT?57#Np5L$OmAmKY;|*JRB3HxZ*_D+c|~q^w+LPX z6ao}PY;R{$WpHnDbV+V$bZKF1X?kUs8(;%>7GGa*a&KpDVQpnVKu1hTLPK9NE-^Q^ zkzfN?0T4`YXGUywb7(_hY;0m-V{4b^VFTB<%whv%0hcah17ZRk3YU^(11=H}3;+NC z0000G3;+NC0002^D3{V?11=H>H2?qr00008H2?qr0002M8|i~00000005VXXahG9aP$KJ00000aP$HI00000keAVD1AhX@DVI`d18xHNFPFk; u15XS(00000000>P006?59clwC2|)t@00000StgfKY6CU`L1>qXY6B({sz(I? delta 88078 zcmV+W{{#Sl;0>_V4UiZT0FM9w00000KmY&$00000@MDoGHh*v)0ssI2003|v0ssI2 z000000RR9100031000I6003a}0ssI2003a}0{{R3003a}0{{R3007Vv00000001yL z00000000000RR9100062000I6004;c0ssI2004;c0{{R3004;c0{{R3003|T00000 z003|T000000FmEGB3RP_0000009exj0000009exj000000CWie000000CWkHN&!wH zVDbV000000VDbY100000VDbY100000pbY>300000pbe9{0UjNbt7h`opA!rxV|_$& zbbD>_uD06%000070RRAYll1`~4C5)pk{|$wNPs9n$de!fDSxO!5`nM)7_}h+g9AVg zAZRF9RS-HEII{pV005_e0t6!p5HJv6P(&f5hM<@v01g140nUiy2^dfip#ZRD1K0xy zKnNf~K}-NZ004+V7yvN1002q=01kx^0776E0pmhk01ALlM3902ga{Hg5fC6|P{|7c zP+>jL0T5CJ2!93@8884=2mzG@ssIQ~XHx*w00;;YMAiThAjnB1kOBY=0H6RVK7t5{ zPy~QLL^2Rj0Inngg8+5_00000004IY00000004Uc004Xd00000004ae004jh004mi z004vl004ym004>r0000000000004^s000000052v0Dl0G0001y0001!0001$0001& z00000000000001(000000001+0001<0001>0001@0001_00000000000001|0001} z0000000021000230002600028000290000000000000000002C0002D000000002G z0002I0Dk}gy#N3J0000000000z5oCK!~g&Q00000#sB~S00000#{d8T$^ZZW%m4rY z&Hw-a&j0`b(f|Me)c^nh*8l(j*#H0l+5i9m+W-In+yDRo00000-2eap-v9sr-~a#s z;s5{uHq)$>i_@%?IZvI000000Pg?*000000Pp|+0Pz3- z000000P_F<000000QHk$0ULkz000000002@0002^0002_000000002|0002}00030 z00000000010RR960RR970RR980RR990RR9B0RR910000C0RR9E0RR9G0RR9H0RR91 z0000H0RR910000L0RR91000000000M0RR9100000000000000N0RVph8UX+R00000 z9svLV000009{~UWAOQdXAprmY00000A^`vZB>?~cC;q<%00000mjM6(m;nF)ngIX+oB;p;p8)^>paB2?00000000L70HFZ@0HTu_2Uipn zX6_>}4Hs~)x8eMZ%5qR22aigVj0Z>{ly`x{k0tKx3N9Lgf{@?VU5Y+>hhFcJEESZ@ zOnP%Ulkf*1lN1OrG?K1l&CV|Y9E@3B5_XF$;uKd$)yXGrt5&JqrzdAH$<>u86yErW zYbt5)i4AljS?WOnW_|*b%n5*#PzsEb;0kDyObb{P z1#3+iYo4~eF?MZDJP5fxr>7Rpleh~YliUkT1_J;I0I+(KG7KgG%aaig7Xew5aSRs$ zj*}4&7XfCIF%TC4#*;A+7Xea}0Sy-c4wLZ>7Xea}Q4tpbcaw1p7XhS`F%1_1NRy!q z7Xh%7p$r!Rw38tb7XgryQ4SXY@00Nj7Xhu4F%1_1wv#ap7XcrW@eLON%ag$k7n3jz z7y+@D;VT0d0auez4i^DmlQ9q%0k4w*4HpG*0RR9IlVJf90cw*04Hp3wlW`0e0riss z4Hp4mm*Fb|7Xd<(5f2vu;FB>87XgivK@S%N(*pni5|d#869JBsF%1_1os)447XkZ| zQ4tpb9+zP)0~Y}TlTi*A0i=_03>T6R7y;jt5f2vu&y#Ts7XjRp@eLON*^{9R7Xh`C zArTjokPsLJ0~!DT5tCs769FQVp$r!RHj{A-7Xbp30Sy-cS(o7}0~Y~+mr)-B7Xj*% zaSRs$_?Mv?0~Z0@lTi*A1$Get01=a60TTf^lQ9q%0a}y64Hp4)lTi*A0hE&w4;KO2 zlQ9q%0dbQd5f=eqlc5Y31)c){01}g70TTg{mr)-B7Xi1Ep$r!RS(7mk7X`Tm000q_ zVF42X@sn{37XdGmp$r!RLz58?7Xd$$ArTh=!jn-B7Xc@eaSRs$`;##c7XcTOQ4tpb zO_xz00~Y}olkp7~0TGj-3>N{(lW`0e0j`q)4Hp6SlK~AE0jiU63>N|Mlfexa0q>Im z4Hp4;mr)-B7n3*<7y%`hVJ!m~mr)-B7XgWraSRs$a+gsb0~Z0#m!TR17Xh%7aSRs$ zwU<#J0~Z16lfexa0R@v$5f=d$lTi*Alh6_vleiTa0XUN(5f=e=lQ9q%0Rodj4;KXm z8UO$hlVJf90S=czG6NR@sh8m^0~Y}$lQ9q%0RxjU5ElW4lQ9q%0oIdo3>TO1ZUY39 z+!ho8nUg^e7Xez6ArTjoAQu<`>5~Bs7XeF?Q4tq^rw{-D5&#PTXb}Mb000001ONa4 z00000#|;1g5&#PTKn(!^000001ONa4000003jzQD5&#PTu$KS;00000Kmq^&00000 zlo$X25&#PTpkV<300000m;e9(00000-WC7=5&#PTs7nC=00000i~#@u00000v;Y7A z5&#Q-04S9J0000001N;C0000001pEI01^NT0I2N%0000003-ka000000Db}h01^NT z0Fayj000000N?=t000000M`ou01*Hb02lxS0000000aO4000000O%C}01^NT0LVQ7 z000000AK+C0000008aq`01^NT0PvLn0001g006)N00000006!V000sI3jmPp00000 z008U&00000004Us000sI3jp8{0RR91001xm00000003wL000sI3jk>E00000001li z00000002f9000sI3joMo0RR91004*p00000004Ir000sI3jlB&0RR91008g+0001g z0000k7ytkg01E(UUI73A0001B000000002a2><{R01E&B+yDRo0000m0{{R30002< z761Se01E&(O#uJ^0000C000000002^4gdfW01E)H3IPBB000040RR910001I7ytkg z01E(sU;zLC000080RR910000m1pokl5&#PT81Dc800000C;$Ke00000y#fFL5&#PT zpzr_y00000SONe500000hYJ7z5&#PT7!Lse000007ytkO00000J_rB+5&#PT@C5+? z000001OWg500000c@+Qv5&#PTur&by00000*Z}|l00000_yPa`5&#PTke~p600000 z0Js1E000000J{tT01^NT0C)}o0000004M+e000000Luyh01^NT0EpuN0000003-ka z000000Hy-~01^NT0GOiy000000H6T?000000Br*R01^NT0Jxz5000000GI#(00000 z03i|p01^NT09cU#0000006YSJ00000006KU000sI3jjD`0RR910086x00000004Fe z000sI3jlD!00000002Ay00000006QL000sI3ji<;0RR91000C400000001l&000sI z3jjz#0RR91003YC00000001Ks000sI3jjD70RR91008^|00000005?c0000I01E(M z2mt^90000e000000000c0RR9J01E)9l>h($0000q000000001G0000I01E(!2mt^9 z0000e000000002=1^@sO01E&(!2kdN0000G000000000O1ONaM01E)HrvLx|00004 z000000001P3IG5S01E(r*y8{I00000FaQ7m000000ucZJ5&#PTND=`6000001ONa4 z00000-wyx)5&#PT2oC`O000005C8xG00000JQV-{5dahb5CsGP00000AOQdX00000 zCjtNf5&#PT@R000000AvRM01^NT0BFJh0000000aO4000000Qdy}01^NT z05}l=0000005AXm0000001OoX01*Hc0B|b=0000000;m80000007DP}01^NT01yxX z0000006+i$0001g0027%000sI3jk2M00000008g+00000001oq000sI3jlz@00000 z007hg00000006xW000sI3jjC`0RR91000C400000002J@000sI3jp8<0RR91001Zf z00000005g2000sI3jo-00RR91000yK00000006EN000ty01E(!ECB!j0000;00000 z0001f4*&oX01E)<3;_TD0000u000000000M3jhET01E&J$_90000K00000000157ytkg01E(6UjYCB0001g zFaZDn00000tOfu85&#PT5WfHb00000cmV(a00000W)%Pc5&#PTa54b^00000Gynhq z00000G6?_x5&#PTu+snl000007ytkO00000-Vp!*5&#PTh#vs}00000d;tIe00000 zU=IKQ5&#PTzzhKZ000001ONa40001g0J;|d01^NT0O(u+000000Ehqp000000R0#M z01^NT0H|gG000000N?-s0000000|cW01^NT07y*%0000008{}0000000F(y+01^NT z0Jy>c0000000031000000B{Ka01^NT0Jzlv000000Gt5;000000NWD)01^Oy3jlx~ z0RR91000O900000007AZ000sI3jnwa0RR91008^|00000001Kc000sI3jj!=00000 z001Na00000007Vd000sI3jhG&00000007Vc00000003nY000sI3jn}p0RR91007Vd z00000007$z000sI3jm-F0RRAh0000e000000001R6#xJd01E(EH30ws0001300000 z0000h3IG5S01E)n;Q#;t0000;000000000N82|th01E(!X8`~J0000u000000002X z0000I01E&(l>h($0000q000000001?82|th01E*4Zvg-R0002s0RRAh000001PTBE z5&#PT==lHu00000+yMXp00000w-x{Z5&#PT2uc9}00000i~#@u00000of7~65&#PT zU>yMf00000lmGw#00000jSv6;5&#PT5D@_Y00000C;$Ke00000F%AF#5&#PTsMP=f z000003;+NC00000m01^NT003?Q00000 z0Q3O>000000B{8W01^NT09djB000000H^=}0000004)vx01^NT0N4lt0000000;m8 z000000FM*^01^NT02nO+0000007w7;0000008bDA01^NT08kKr0RR91001Ze00000 z000XR000sI3jlBr0RR91003+N00000005#5000sI3jhcO0RR91000C400000008I` z000sI3jpwO0RR91000C400000007q%000sI3jkm(0RR91002k;00000000;a000sI z3jj#)00000003Bj000000001C3jhET01E(^4FLcE0000;000000000Q2LJ#P01E)n z?EnA(0000e000000002M5dZ)Z01E(UA^`vZ0001x0{{R30001t6#xJd01E(!Hvs?u z0000)0{{R30002+3IG5S01E&J01^NT05IYJ0000005|~v000000D2Sv01^NT z01z($0000007w7;000000R8{~01^NT0Em?U0001g000aC00000004#q000sI3jk1} z00000002Y*00000007+z000sI3jna=00000001Na00000005#2000sI3jhfJ00000 z000aC00000006-V000sI3jh!W0RR91004*p00000005X5000sI3jmN<0RR91000yN z0001g000052LJ#P01E&}!2kdN0000q000000000$1ONaM01E)%4FLcE0000e00000 z0000y0ssIK01E&x#Q*>R0001(000000002*1ONaM01E(ctN;K20001>1poj50001Q z4FCWV01E&Z{{R300000`0ssI20002H6#xK#5&#PT&^iGC000001Ofm600000JqrK; z5&#PTF!ule00000bN~PV00000i4gz*5&#PTkP`s_00000KmY&$00000;sgKy5&#PT zFsuLo00000JOBUy00000*Af5#5&#PT*c$-=00000m;e9(00000Z4dwe5&#PTs1N~v z0000005|{u0000005ll@01^NT0Pt!70000001N;C000000QM9B01^NT05~oI00000 z07w7;000000C5=r01^NT0BCIi000000GI#(000000GR*)01^NT0LYX80000009*h7 z000000L2Ud01^NT0I2r>000000GI%O00000007tn000sI3jjE!00000004Lb00000 z000sb000sI3ji<}0RR91000C500000003bO000sI3jhf900000008g+00000008L# z000sI3jlDH00000000yK00000005o>000sI3jlDQ00000007_t00000001g~2><{R z01E(^(*OVf0000K00000000283IG5S01E)f0|5X40002E00000000122mk;Q01E(M z$^ZZW0002E000000000m5&!@a01E)%8UX+R00004000000002h6#xJd01E)vJOKaz z0002+000000002)3;+NU01E(rK==Rv00000Bmn>b00000#S{Pl5&#PTs4W2i00000 zNB{r;00000QwIP55&#PTV8Q?Z000001ONa400000ZwLSY5&#PT7|Q?v000003;+NC z00000301^NT0LU%@0000007w7;0000002~Sc z01^NT01)5+000000PFw&000000Kg9b01^NT06-1_0000005|{u000000PPU~01^NT z0Qe;V0000001yNK0001g002@3000sI3jjd700000008g+00000005{L000sI3jnBF z0RR91002M%00000002oB000sI3jlag0RR91003wJ00000005N@000sI3jhEG0RR91 z000O800000006uS000sI3jk>200000001BW00000000CA000ty01E&Rxc~qF0000` z000000001s2><{R01E(E*8l(j0001(0RR910002L2><{R01E(k*#H0l0001l0ssI2 z0002v2LJ#P01E&J5&-}J0000$000000001&2><{R01E)f`v3p{0000m0ssI20001T z1ONaM01E)HsQ>_f00000)Bykh00000J_!H-5&#PT$kPA-00000Gywnr00000MFIc- z5&#PTSepO<00000Gywnr00000R2cvO5&#PT0BZpN00000kN^Mx00000p%efB5&#PT z@GSuV00000NB{r;00000Y!3hc5&#PT$P57h00000AOHY=0000004opx01^NT0Qe69 z0000002lxO0000007?V^01^NT08pp^0000000aO4000000BHsQ01^NT05H4&00000 z0Pp|+0000009XY801^NT01&bO0000007w7;000000C*Ar01^NT0O%S4000000Pp|+ z0000003QW^000sI3jjdx00000001Ze00000000{h000sI3jp8`0RR91000;O00000 z003YU000sI3jk;<0RR91002k;00000000jG000sI3jmmv00000000aC000000006O z000sI3jioT0RR91000~U00000002u5000sI3jnx(3;_TD00004000000002|2><{R z01E&x-v9sr0002s000000002D6aWAc01E)1F984m0000;00000000137XSbf01E)% zQ2_t|0002A000000002y2mk;Q01E)<%>V!Z0000q0ssI20002(2LJ#P01E)f4gmlF z0001g2mk;800000O%wnC5&#PT$RPm$00000oB#j-00000gbe@y5dakc&=&*%00000 zQ2+n{00000lmY+%5&#PT2o3=N00000H~;_u00000cnkml5&#PT`1Jq)00000m;e9( z00000_YVL75&#PTKo0=`00000FaQ7m0001g0JaAJ01*Hc0B{Ec0000000;m800000 z05}5x01^NT0C1rI0000007L))000000Cg7t01^NT0H9O>000000E7Sl000000FVy= z01^NT03Zzk0000001N;C0000007wh~01^NT0MPRQ0000005kyr0000006`J}01^Oy z3jhcq0RR91006)M00000005#3000sI3jjFk00000003A500000007k&000sI3jlCk z0RR91004LZ00000006}k000sI3jp{l0RR91002k;00000005%|000sI3jm0#00000 z005`}00000006fS000sI3jk0J0RRAh0000;000000001d7ytkg01E(!VF3UD0000K z000000000w761Sd02Kg`D+B-l000000RR910002!6aWAc01E)h($0000C000000000e4FCWV01E*4{Qv*}000130001g00000 z@dE$=5&#PT=>7lz000003;+NC00000Dg*!k5&#PTxTgRB00000lmGw#00000E(-tv z5&#PT`2GL@000003;+NC00000oe}^55&#PTI2r)}00000r~m)}00000oD2W}5&#PT zkoEuo00000oB#j-000003<&^#01^NT03gr+000000Nelo0000005c5$01^NT08st_ z000000GI#(0000004WRr01^NT0Qm9%000000Mq~g000000A&^c01^NT0Qg7&00000 z089V?0000007w=901^NT0H{I%000000AK0RR91000OA00000003SD000sI3jkQC00000002}000000004jm z000sI3jnZ?00000005|e000000002^5dZ)Z01E(+X#oHL0001B0RR910000)4FCWU z02Kfz7z6+S0000%000000001f2><{R01E)n4gmlF0000u000000002$1pojN01E(s z1OWg50001d000000002h2LJ#P01E*41_1y700026000000001g+X(;w5&#PTFbDwv z00000FaQ7m00000qZR-F5&#PT&`ALR00000Gynhq00000UjqOD5&#PT;Kcv{00000 z6aWAK00000-w*%*5&#PT@DTw3000002mk;800000oCp8_5&#PTs0RT600000hyVZp z00000>k|L~5&#Q-0MIM}0000007w7;000000NV%v01*Hc0KgXn0000000;m800000 z0EPzu01^NT0IR000170RR91 z0002M1pojN01E(k5CH%H0000G000000002D5C8yw5&#PTa1j9j00000KmY&$00000 zjtBq%5&#PTD9Zo<000001ONa400000l>q<%5&#PT`0W4y000003;+NC00000BL)Bf z5&#PTV7UMQ00000@Bjb+00000<{R01E)H2mt^90000e00000000053;+NU01E(+?f?J)0001l000000000T3jhET z01E)P<{R01E){ z*Z=?k0001p000000002|3;+NU01E(OX!!sD00000d;kCd00000y#W9K5&#PT0MP&d z00000m;wL*00000gckq+5&#PTC{+Of00000WCH*I00000=K%l!5&#PTfb9SP00000 zC;$Ke00000{1N~F5&#PTFlLkbG{uuLHo6>NUvqR}V{2byXlq|*bzyR3090>pY)55u zP-$e7>o#r=UvqR}V{2byXlq|)VQFlWS2r>%RB3HxL}_Mb0AE@$HB4`3Lv(U%Np53I zZ)a&^b^v2+Z)0m;XJvGBX>Dbbs5g|8RXE_6qE7=Blk7PulQcPtmmpmOB$r=V0~wQU zIpY*T1$M05aDZ*Oc;adtWdjvQV_|G;Z({&$Wo=_{d0%q?L~?dQb!Jy`X>?@(L}hkRY;R{$ zWn*?!XmVv?WB^2Rc2i|@b7^mGNp5L$07P0 zX>DahY;03$Zf9&|0B2uha&|^_ZevMqW^VvYZ((Fob#8QNZDmAkYyf0zZ*XO9v#m>{ z1e0%2&yyTc5|ge`29sD(Jd>_b46~$BZ~}h-00000009620K6Fh01yBG03ZMW0GE-L z1^@sB08ttM000000096203aFw01yBG03ZMW0GE-L1^@sA08ttM000000096205%!` z01yBG03ZMW0GE-L1^@s908ttM00000009620QngJ01yBG000000GE-L1^@s808ud- z00000003a}0{{R30009700000003Z=00000004OM0{{R30009700000000=2lj2k^ ze}I$$0000005AXq0000000RU7000000Em0R#X5000031ONa400000 zl>q<%000260R#X5000031ONa4e*gdg2$cZ<00000zySmR000000|Wp7000005S0M{ z00000&;bMh000000|Wp7000007?lA400000-~j{x000000|Wp700000Ae8|C00000 z@Bst>000000|Wp700000FqHuS0000000IO6000000|Wp700000K$QUie*gdg01yHM z0000000RU70000008o_y0000003ZSc0000000RU70000009cg)0000005Ads00000 z00RU7000000BDr~0000006+o+0000000RU7000000C<%F0000008j!10000000RU7 z000000DzSN000000AKq<% z0002M0t5g6000031ONa40002Ul>q<%0002c0t5g6000031ONa40002cl>q<%0002s z0t5g6000031ONa40002sl>q<%0002+0t5g6000031ONa40002!l>q<%000000|Wp7 z000031ONa40002+f0Y3M000005Ca4N000000|Wp700000_>}L00000e_#Uy0000000RU700000 z05FyT000000B{2Y0000000RU700000063Nb000000DuDo0000000RU70000006>-j z000000FVO&0000000RU70000007#Yr000000H6Z|0000000RU70000008o|z00000 z0I&lD00000e*gmn00000003B)0RR91006)P1ONa40009700000003Z?0RR91007Vf z1ONa40009700000003x~0RR91007_v1ONa40009700000003~70RR91008g<1ONa4 z0009700000004NF0RR91000041ONa40009700000e*gf0mH_|&0000G1Oxy800003 z1ONa40001pmH_|&0000W1Oxy8000031ONa40001xmH_|&0000m1Oxy8000031ONa4 z0001(mH_|&0000$1Oxy8000031ONa40001}mH_|&0000`1Oxy8000031ONa400026 zmH_|&e*gdgU<3pJ000000|Wp700000xRwC`00000a0CPZ000000|Wp700000z?K03 z00000fCK~p000000|Wp700000$d&;B00000kOTw(000000|Wp700000(3SxJ00000 zpacW}000000|Wp700000*p>kR00000uml7Ee*gdg00RU7000000N|DZ000000KfzU z0000000RU7000000PvOp000000MG;k0000000RU7000000Qi;x000000N?}!00000 z00RU700000005T(000000Pq9^0000000RU70000000@@>0000001yQP0000000RU7 ze*gdg008K)0RR91001Bb1ONa40009700000000270RR91001xr1ONa40009700000 z000=V0RR91002M*1ONa40009700000001zt0RR91002-01ONa40009700000002m_ z0RR91003YG1ONa40009700000003aIe*pjh0001R1q1*9000031ONa40001Zu>k-8 z0001h1q1*9000031ONa40001xu>k-80001x1q1*9000031ONa40001>u>k-80001> z1q1*9000031ONa400026u>k-8000261q1*9000031ONa40002Mu>k-80002Me+2{p z000000|Wp700000*s%cs00000&;WTLAz700000 z00000h?9|C7n9gt3>k0%0000000000005Z(0{{R300035000b={azOudI10c00000 z00000sQ&{1000000R#X5MgWs8UltjS0RR91000000002E{{sL3000011ONbplU-jI z8A|~G00000000000LcFX00000009I50LYV#Ul$r>00000000000002k{{sL300001 z1ONbO0F%C778zmz0000000000008L!0{{R300035003~4?Ozv@>|YEe`2Pa{00000 z0R#X5qW}N^00000000005cLB9000000t5g61Cz~R7a9Zr0000000000001EM0{{R3 z00066000F5lm1~A8GQf%000000000005J6f0000000IO60KJnfViy@b0RR9100000 z0000$^#cF^000021ONaAlU-sL8T`4F~`L00000000000DzOhVHcAqV+;+70RR91000000001xlL2HGlUQR6 z8VUda00000000000HE~)0000000IO60E+;Veq$C53jhEB0000000000u#-_@7n8VS z3>jAe0000000000006-C0{{R300066004KB-D4LG4FCWD0000000000(37!Z7n2xd z3>l;V0000000000007|i0{{R300066000t`J!BUR`2YX_0000000000@RJc^7n68o z3>uOE00000000000002?0{{R300066000mGlb&Q33;_TD00000000005R-vr7n9gz z3>tp`0000000000001EN0{{R300066000pHlm28D84>^h000000000005J9g00000 z00IO6027lfWfu)I0RR91000000000$laXW>lW1iO8La>S000000000008sV=00000 z00IO608NvPWfvM000000000000001B_5%O_000021ONaP0F%CD78(`+0000000000 z0040I0{{R300066008{}lkQ~}8W;co00000000000D$%b0000000IO6022X|9%dFA zPyqk{0000000000koE%r000000t5g6832<`W)>Q#00000000000001>_5%O_00002 z1ONbq0h4}a7L#0M3=OdM0{{R30006600115t!5V)MF9W+0000000000!1ei_@%00000000005cdNB000000t5g6 zAd}5!7a1V{0000000000001EO0{{R300066007XF{bv^$A^-pY0000000000F!uuh z000000t5g6dy_3_7aE2D0000000000002Pu0{{R300066001KZlU`^R8YBP!00000 z0000008sY>0000000IO603`sEj%XGcKLG#$0000000000VD|$6000000t5g69+SOj z7a6_)00000000000040J0{{R300066001VF?PwR1%x4S^fcFCc000000t5g6P63l1 zX%>_GXABOI_X7X`000021ONar0h3N?78=F?0000000000005x(0{{R300066007wl zlYVIy8X5rr00000000000I>H10000000IO60PFyhu4xt-g8={l0000000000!1n_H z000000t5g6DU;o47aB$Z0000000000007YU0{{R300066000O9lMZSYlk8{=4dC|! z0000000IO604tL{Y8R6nX$%hV_X7X`000021ONaa0h4ZO78;WP00000000000002^ z0{{R3000660055xlb&i88D{|i000000000001)^C0000000IO60HKr3Y8M)Z00000 z000000000W_yYg{000021ONam0F(Y|78;@e0000000000001!f0{{R300066001oj zlP+r(8H)h`000000000006_Qy0000000IO604|eVYZnch0RR91000000000`laXo{ zlbCA^8ZZC=00000000000ATn70000000IO605t)VzH1g5DFFZg0000000000aQFiN z000000t5g6ECG}5YZeVM00000000000001hlc8)ElPGKq4Kn}$00000000000FaYW zYZsGPYz!Kh00000000000001>_yYg{000021ONbW0h4}g77eoi0000000000006L) zv1=ETxNHm!HUIzs0000000000z?0!?7nA603=R4K0000000000007XF5o{Ne7;Ow0 z5CH%H0000000000;P?Xo000000t5g6zmq*}7aBPL0000000000008j#0{{R300066 z007+plWuJm8o&Sm0000000000008*|0000000IO60Bix1o^2Ky4FLcE0000000000 z5cvZD000000t5g6!2pxaZ5A1X00000000000000W`2zp|000021ONcVll^TM8hHT# z000000000005JIj0000000IO605SoSE^Zc+d~Xa5K=}gz000000t5g6JCj{*7a4s4 z0000000000002<=0{{R300066001n}6 z00000000000040L0{{R300066007sM?QR#7{A~;kfcXOe000000t5g6K9e197a67i z0000000000005Br0{{R300066001A8O>Y+s=>Px#0000000000pp#K<7n6u@43oTW z3=XjQ0{{R300066000yLldf+Tlk9E`4#4>X0000000IO60GI)j-ftF@9B&K`(D?%Z z000000t5g6n*fsza26Vw00000000000002s`2zp|000021ONa-0Fyp&78&ON00000 z00000008j$0{{R300066004uNZEzQptZxhs0Qv&}000000t5g6E&-FCa26UH0RR91 z000000000G`U3y}000021ONbz0F%ye77axJ0000000000001D9VRILg2yqM7aIQn0000000000002Px0{{R3000660046V zlU{Ka4b1=m000000000008o>Wa2J!9aSR!O0RR91000000001B`U3y}000021ONb# zlf7{l8RP%}00000000000C4&P0000000IO6005KiaTgh!00000000000001h`U3y} z000021ONbplO1vw8c6^E00000000000Fe3v0000000IO60AT@>PI49*q5%K^00000 z00000p!x#<000000t5g69Fu)=7n8hk3=OdQ0{{R300066002vqt#TI{0RaF200000 z00000!1@CK000000t5g676Fsqauy9t0RR91000000002clRlF000000t5g6&H$6na~2v=00000 z000000000W`vU*~000021ONcA0F(Z677Zf-0000000000001zP5p@@nICKn?ymSl= zK>Gs#000000t5g6T$5dN7a1l20000000000002?G#bQT#{0RR91000000001R`vU*~000021ONa> zlkIdD8gu~w00000000000D$`g0000000IO60Ga@k9(5KPV*vmF0000000000koyAw z000000t5g6HvyAQbrub70RR91000000001>lYw*>lZbT;8i)V@00000000000I>T5 z0000000IO608;>yu5}g}f&l;k0000000000!21IL000000t5g6>XY4d7a4c~00000 z00000007YY0{{R300066008xq4R#k9i2wiq0000000000;QIpr000000t5g6RFge+ z7a8RM0000000000008j&0{{R3000660032!ZFU!vJa!BX0Q>_0000000t5g6SCgG~ z7a98i0000000000000pD0{{R300066005Me&2|?VSO5S30000000000Ap8RW00000 z0t5g6S(E*C7Y#xI0000000000001zP!FCsuICl&U@c;k-0000000000K$G!y7n5jr z3>sVj0000000000002<@0{{R300066003wKla6;54UYf-00000000000AQ0*cNde$ zcMKV}00000000000001R`~v_0000021ONcxlkImG8eRYZ00000000000D$}h00000 z00IO60BHe}9(WcSi~s-t0000000000ko*Gx000000t5g6LjjXccorJp0000000000 z0001>`~v_0000021ONb^0F!=r78wEo0000000000006N30{{R300066008Zit#}t1 zd;kCd0000000000!2AOM000000t5g6NR!=o7Y&&K0000000000007XF;dd947WdlniC0RR91000000001x{R031000021ONb_0h3O9 z78=n20000000000005x<0{{R3000660012UlYV;^8F~N!00000000000I>Z700000 z00IO60P>TqdlwC|00000000000002Mlks^MljwU48N2`h00000000000MPvd00000 z00IO60B4g8d>0yM00000000000002s{R031000021ONb*0Fyp^77Ydg0000000000 z008il0elyeczg^Q1OWg500000000000R962000000t5g6x|5xJ7aHCG0000000000 z000pF0{{R300066005E!lg@k=8EXIl000000000003iMY0000000IO60P~amd>0w? z00000000000000m{sRC2000021ONcZlP!H08EgOm000000000006_i&0000000IO6 z0I!o>eHfF?d7LzP}3=Uxa0{{R300066003_QlfHcx z8YBS#00000000000C4^T0000000IO605}1Y?tK=MTzw1c80000000IO60CoYBu6`C7asU7T0000000000!2SaO z000000t5g6l#|_l7a4~E0000000000007Yb0{{R300066001_V4SyGtJbw%U;FA%4 z7n4YT3=QxA0000000000008ilp?(*Wcz+BThyefq00000000000RIC3000000t5g6 zjgy^!7cIL00000000000000pG0{{R300066005r>0000000000008}azp4EL0FgJ7 z$$ub!hC=xeA{3Dz0w2~NAOqDOAOqDOAOqFV0RWH@ibDAiLKKl80w31U0RWH@jzakm zVib`e0w31U0RWH@l0x|qf)tS;0w31U0RWH@mO}Xuq7;!J0w31U0RWH@nnL*y!W5Ap z0w31U0RWH@oLirHl7m*+WAJ)(T0FV)X0z>%_ z0vM4X0w31U0RWH@21EG}A{db%0w31U0RWH@3Pbr2LKu-C0w31U0RWH@4nz46Vi=Ji z0w31U0RWH@5<~eAf*6q?0w31U0RWH@7DM?Eq8O1N0w31U0RWH@8bkRI!WfYt0w31U z0RWH@9z*#M;uw)20w31U0RWJH5h6qR5CR#IAOauO&;bCD5hg?V5F#0oAOauO&;bCD z5h_FZ5JDM|AOauO&;bCD5iUdd5MmjTAOauO&;bCD5i&#h5P}(zAOauO&;bCD5jI2l z5TY58AOauO&;bCD5jsQp5W*ReAOauO&;bCD5k5ot5aJn;AOauO&;bB{kP$*d`49pc zkstye*3ba}kP${h`4A!+kstye*3ba}kP%8l`4B=Hkstye*3ba}kP%Kp`4D0nkstye z*3ba}kP%Wt`4EB{kstye*3ba}kP%ix`4FNSkstye*3ba}kP%u#`4GYykstye*3ba} zkP%)(`4Hk7kstye*3bce0FV)4L-`N_8<8LaAJ)(T0FV)8L-`OQ8<8LaAJ)(T0FV)C zL-`Ow8<8LaAJ)(T0FV)GL-`P58<8LaAJ)(T0FV)KL-`Pb8<8LaAJ)(T0FV)OL-`P* z8<8LaAJ)(T0FV)SL-`QG8<8LaAJ)(T0FV)WL-`Qm8<8LaAJ))+0RWH@fz*0w31U0RWH@ibMGjLL8AG0w31U0RWH@jzjqnVjPhm0w31U z0RWH@l0*3rf*g?`0w31U0RWH@mP7dvq8yPR0w31U0RWH@nnU>z!W@wx0w31U0RWH@ zoMAOauO&;bCD5vD`=5F#CsAOauO&;bCD5voJ^ z5JDZ1AOauO&;bCD5w1h|5MmvXAOauO&;bCD5wb)15P}_%AOauO&;bCD5w=755TYHC zAOauO&;bCD5xPV95W*diAOauO&;bCD5xztD5aJz?AOasH*3ba}kP*T|`49pgkstye z*3ba}kP*g1`4A!=kstye*3ba}kh2tq?+JhK5k^M&5F#>>AOauOumJ$@5lTk+5JEDM zAOauOumJ$@5l%+=5MnZsAOauOumJ$@5mH9^5P~w1AOauOumJ$@5mrX|5TY`XAOauO zumJ$@5n4w15W+H%AOauOumJ$@5ne|55aKeCAOauOumJ$@5n@L95CSuiAOauOumOJn z@DXN4`4A#Akstye*02Ep@DXZ8`4B=gkstye*02Ep@DXlC`4D0=kstye*02Ep@DXxG z`4ECLkstye*02Ep@DX-K`4FNrkstye*02Ep@DX}O`4GZ0kstye*02Ep@DYAS`4HkW zkstye*02Ep@DYMW`49p$kstye*06s80PqopM)?pTG?5?zAJ(t|0PqotM)?pzG?5?z zAJ(t|0PqoxM)?q8G?5?zAJ(t|0Pqo#M)?qeG?5?zAJ(t|0Pqo(M)?q;G?5?zAJ(t| z0Pqo-M)?rJG?5?zAJ(t|0Pqo>M)?rpG?5?zAJ(t|0Pqo_M)?o|HIX0!AJ%`c0RZq3 zrbhV?A~lg90w30}0RZq3sz&(`LN$>f0w30}0RZq3u15I~Vl|N<0w30}0RZq3vPSt3 zf;EvK0w30}0RZq3wnq67qBW5q0w30}0RZq3x<>gB!Znc~0w30}0RZq3zDD^F;x&;V z0w30}0RZq3!bbTJ0ydE#0v~_YumJ$@5ynRO5F$2_AOauOumJ$@5z0pS5JEPQAOauO zumJ$@5za>W5MnlwAOauOumJ$@5z`49p)ksyBpAJ(t|0Pqn8NBIyUIFTR%AJ(t|0PqnCNBIy!IFTR%AJ(t| z0PqnGNBIz9IFTR%AJ(t|0PqnKNBIzfIFTR%AJ(t|0PqnONBIz5Mnx!AOauOumJ$@5mHC_5P~|9AOauOumJ$@5mra}5TZJfAOauOumJ$@5n4z2 z5W+f0QrHYNBJTEK>4CK0Qmv{fYKjKAGmS*|Cjay04m@K05zaz zK)D|T0C8Xe092HwNBQ6b7Am4QK>2|JK>0xc0m^>?3IMhA4?y|$7eM*_d&j8217Fs+ zV*{zz0RXVG3jq1^;wh22YXqr*007XL007XtD**XA0}+vPA|#PJ;2@E;69D-Y0Dw|q z0zmn}006Y$0~4y*{QsBu0{|+Z0{}ID0s#571^^Lrf&h`f0s(Qs{{K_}0D#i8`~R2V z;wgWT0C+_C<^BJcpaTFk_u?s$-~$(`k^cXe-~$(`gZ=-PA_EbT0}+5yfdBxozyN?! zAz&hrwg3N@w_`)8xobqJvkO4^wG%-3yDLEX$p8Nr>i++iRs8>#<^uq#&ou;UJZQbGU`ks*K@kzfFTQWgM!5|sb{m#qK)7q?>rskv(esj~|J z`Lz=O`MWCs`91spm;HObsr>^0k@E`x`5FHImtX+^HJ||i(4qkV&;X!E`9j=B`Qd*9 z0;-5O0F?6s04g8?05u>0fKoR7|CfpS|Ch4?0Psit|Ce*(7?G*{|CgZr|Cb>EfKp%@ z0l8r|0J*UI|CeCi0J-xEK>7W9!l=LlU)HPs|Cbj0|Ci+h0IK8z6DsoyK>7W9!l+^Z z0MKCo0PusCfKozW43Qy#0g+$;fKq>B|Nj^Ld%UUr0|1fpQvs>70RYforbqd|HvqW- z06_Vn9{~9Q0D#h^{{NTY2mm#p(*Tj$`u~^o0{|+Z2mm#pH$b@`1ORbB0RU8UrbqeU z0~9KvA3*tG0zmme00GLu4gj_DQ$eZyd&H=~17Fsl0RYed%t!e(`~R2V2LOLH0|0^2 z;R6Gz0sw$g)BymHl;SCofZ`*Ok^29a-~$w@U;6)-zy|;|0SJIn-~$w@gZuxNfCvCJ zp#XqVfWjk@FZ=(OKnVaf75o2}zySa?paB5T0Psio>HGhe^aB7YfC&IKf&c)pfdBxo z!wP^>LI4qwA)p_TzyN?!B>#W^m(&0M7l8l(u(cBa`6CK|Qk4JzupuBGkzfFTQlbJ8 zksJU2m!bj@k%0gJutN)gQXxPfkzfFTQWOAy5)}Xcm$ef>`NIGI7Zd=162Sle7vuu~ zDuMt2uz>&ou!9GHQbGU`ks*K@kzfFTQWOAy5(oePm$v`^7lHr)uz`O70I(wnfKoyL z5s@K481}z#frc0Dw}W0uhnd{{NS}|Nj@Y69D;y`u~@p0RYed^hfzX006Xz{{I)i17FsY zfS4Tt=#!q9I|2BU+L$vCtVj8P006Y00RZp-06_Tu{17Fs-{r{IE`~R2Z0|2T+5r9&F0Dw|r007Wo007WJU<{EV zU=ERU{{NRC0Dw~cd%>u{17Fr3AOqE)0RZp-Xp=6R86VJL007Vf34l^UU<{EV;0}?1 z0Dw|q{{I)D0RZqIXp?4}Ckt#wxxfQo)*m1P)srZjA0ND!~K*HQ)mis+#%# zm*ancQsDytD&hkbs^9|?ss)qXoFf7N`jY~kCjq9DDxE?hLll5g;sY6~;R6_|+Wr5R zfDS?U!1w>P<^TT|fC@mlS(A;O9|3)nt(_ngrbqb!06_VnCj$8a0D#i>8vyy}li{5c z8Qb~)mtX(@(BprAQsM&>s(}H3QUCxEk-3u%o+eh%VE_Qo0Pp}H0FeO7c}0Fl50U)CQW1Jxfa0P&y!0Pp}H z0FkT(08{}CK)F8z0CAxH|5PCWfKmYr0J%l_|Ci(g04ir1ODSal0MKI!N-5$40J-A} z0J&iR0MMnN7Lg$U8Igd00Dw|r007W~9)MCpU<{ED{r{Ka|Nj>sAOqDw0Dw{f3;?+@ z`u~^Y0{|*#8cQjF0Dw|s3Q8&B0|2?>3jn!g007W~9e`4$pcau~007Wp007V-z!s50 zU<{G`{QsBp6F~X>dqJuH!vK-M17Fr3AOqC_1VFg~3;?-*egvt1f6@TC8v6g2WdH!sV+u+s;sXG=;|l<}VE_QorJxp(As`!(fB=9}VgLZp!ykZBLSPJ$La8AD0g?Yh0g>JL{}&Y*8!>Y0|BbI=L4zr0|KhL?*ys%0|P3y z-vX(-0RZs&0{|+Sd_a?X;~0?_0Dw}wF97)r000ypAOqDv3;eQTqRv^8)}X ztNZ_#e*yq;s{Q{|9}oa>pacL^A1nZIAPYl!zyknOv;hF{l;RkX;iHgJl|liLVLhsH zmJEPWg4RH}qk4I`LIeO2g4O`JV*~&Zqk1{GL42W`A^c_2!HNMJ!2h`QQT+D)S>i`TcuCssDolk-!6A)*m1P)u3+xxh?$v zm!NMzxts?85uXnLaUuHuRNepo7at%4)vEXZmtcNClavAgiyr_L>#6wvRPzHDD(CN&5enZwVgLZJVE_QI zBSe5wLSPJ$AwVgSzyN?!XOm5+OAa*n|CfLU0J&fSKshD*lcuOn0nwB7s9yqE_>)$t zG7;GJ|Citc7OJZD|Cjdz0IIB$nW>jDg7*KH;R6+_;sXGx-~$w@O8Ebmp#J|gpff=E z;R72g`U4y)Y5D(`;R66FUFNxdVWJ zQUU;hQlaAjk)D(Bsux0-`u~^n0{|)j0{}GL_y3pV0{|*w006Ln0Dw|q006KfRDe=K zU<{EVz$}ri`u~^n0~IQuA3*s)006Yb_WzfZrjtsmDt}S<|CjXx04ks(K>1(*0JOFC z|Chfb0QsOFK>1(*0JO38|ChfX0Qq(O|1`Sy|CbX0fD+{c04n4I6)Iu?0I*>I0I*|K zfKozW43Qy#Es=l#fKrJ1|CitY{}-I0I*|IfKozW43QxK zERlc!fPYeb`u~^I|Nj>sAOqF&0{|+a_y3pm0{|+s`Tv(5_y3pW0|2Vz0~IP_006LI z006L~Qh-uIU<{EVKr4}e0Dw|$`u~^2|Nj>o_Wzgv!xGV8006K6KrWH}djP5Z0|1fp zQvs>70RXV|TLP(JrbqcA06_VoR|5G00D#i?AAbP(cmM#f4EX<-AOQe0g7^QIrLVgLZJgrF~xLf|iv-~$z^r}_Vv z_J0EaDxU%XYd`}4RCoFRm*V#Sm-GVws$lp3m*xWiDq#Qsu;l|3s^kL}Dr0YeQUYTP zks$yukzfFTQZM@dmvg2^`2YZb(xO*F`C$S;`9S~y%E1W$wewR!sr6ezsr`FEsrVm2 z`TxTbk-!6A*0&n~`L=@rk++ipk>Udts(;`E7OLR`7pg4y|CjUw04iVz05!R51gRKf z43WDl0Qq?U0I<6O0MI$3GLd`Z7?HH20+9lMfYJc~fKuQC6sj)v|CgX+K)C{dfYRXu z6so57|Cjm$0IDAV0P*bu092X+0E=G(6zl2!|5Wn>6siaS02Ch}1J%I+P&Z)?K!3R* zKmd_*0s)JE0vhYW{r^;920*zX{~tDB+d#P=1ORd2{{K{M_Wzgl|Nj>sAOqFn0~M;_ z0~e~`0~V_P_y3n5{Qosz+d#P=1ORb?{r^<;|Nj>sAOqDT`Tv)X_Wzgj0|2TG_y3pX z0{|-J0~4y`0~)HsaDYNt{R04z^9um^@Am(f^8)}X+4YldtQu)9`Tv*T001=r0Dux-`Tv*m3qbh*0Dw~c zd%~zU`TrO4`2Uw(_WzgU0|2V!0~0D@006LI006K;U<{EV;4_iKU4T-60Dw}g`Tv&y z0Duxs`Tv*m3qbk(d%~yy0Dw{^`TrLmAOqE$lku!O0WFg)ttbJ~lUA)lEu!}SmlXhj zQsn~xD&_+eD&zwbDuZEwQh)$}QeprAuweiIutH!Aks+Wok#Cd5txP`x0g?Cg|Cb*y z0Pzg^|5Ts>0MGywK)HVc0Bef*|5TJh0g+*TK$C!aVYmPU01<$CA-JcQWfB=9$Qr`Igm$w^0`MU2!sjUD1 z7rO5Rska*d`H1xYmmeSl)%|u>Au7k@E`x`Lh84umGq>`Jtl#kz%HkzOot$ zRQCUuAOQe1bEcExvLb)^_y3n<_5YXj0|2Vx_5YXU0{|-I0~D%a006LI006MVb%0Vr zU<{EVKsk|M0Dw}l`2Ux*4?y|p|Nj@X4*>aH^#7Ord%mgt0|1fp3jq1E0RYf*;uw+7 z_5YV)8bP@~003)%0030e_Wzg4_5YV(8UeZU3qbk(d%~!|17CmEA0PwO|3d@O{d)qb z{eu9J_%8tY`Zoai{67HsS^z+~SQtRLz$pN*o8lOM_aR3yb8ej7OaY+dPR3HujG=UcYwBQ2-DxUQJm-7Pvs+{%z zm!U^M`Q`%vD&YeF`Fj8Wu;Bw7s$&2EuxFzdk>vv#D&zwbs$;-Ak>Udvs^9|@suT48 zm;3_&DxU`cG2kEoR38HXakC0Q`7H?mR22sRQFj0Uu(ccp0Fn1XKaq736^jD^02F@$ z73&}f094=u6sqf!t+X3|{PX{p`~v_g9|Qm~VgCPAi1+`O^Ys6h^8)~?arOU~G`ZqxN{69eX|3d?jzyn{_A0PvN)j$UTG~o#VwBQ2- zDpmCVm-7Pvs^9|}Dt7b#m!LlY`B3%$mwNyJu%$mh`Q`%vDxpU}`C|Y8u;Bv$`DddR zk>LXzs^kL`s$&2;k>Udvs^9|@s>k#Hm;3_&DzyLq7vKXFs;l$=mm&{AslfsOOC%RS zsiF@GsiH$b`J+RB0+FN_38{d>Ad3?8|Cjs&04g5@05RYg08|_RfKtr={}*8q0JNlg z0J$#p|CgkDK)L1v04kwJK>2$B0I=Z$0QqA80I=Z$9I9ud7Lnxx8Y*KTJ(1!A7pmX` z6so23|Cjs&04kFI{}%xZ0JNlg0J-1;8Y(~Y|CjRv0IC^(_5YW9006M0dqBD70{|+a zM?m>w006Mz0|5DFqZX0j101U50~4xaU^|iG0~e~`0~D%|^Z%Fp0{|+0|Nj>sAOqE; zdjPo)^#7NndqBD1_5U?sVgr$L+CaH`006KBm_R9|8x1Mo0|YAm^Z%FU0|2Vv0~#u) zI{^6}^Z%EBpg#cl`1Jpm;0pk`p+`XZV51h1r5jBt;ST`$r#nFTV*miKp+7+R+}DYqCpe`Q!r=Dq|o#k>Udvs^9|@s!j9%m;3_& zDmwrF7o>Xtxy|$cm!x|@xxn=QHDF=`k#pKWxqAQrkOi1PDWn$-DW-D(x!?mDD&O+| zm!Lm?0QtG}|Citk0J)?WO(|ml0Fb70K)GY!I+3A2K>6nb1S+6MK>6YW7pma{0Quwt z6Dr^j0Qq2}7Lni!0Qunq9ID_06sj!q|Cjs&04f>({}-ft0J*I5|CgkDK)GP<|21G@ z1CevuK)HJW0FVWkKq;h`4JoF30J)p=|CjoI0|2?CmrW_)3jn#MdqBBhqZW~&M?m@K z0|YAJ4*>aN005BS3jq1y101SjpgfV{0~e~~0~0FX0~D$c^Z%Fp0{|-g{{I&o0Duy= z8$kIc|Nj@6_Wzdz^#7Oh0|2Um^#7OT0{|-I0~4y}0~#u3qZW~K005A4VhoWZgMd+}DY^8)~?Y4rb>>C%Ih`uk!zwq5%M~;R66F-~$t?LadXPxhiF2 z006M$0|2UH-~f^0e}K{ffPm7o^8c3~1OPE1{r^-z_Wzfw^Z%FR0|2Tcjet^s0Dw|r z0059-006K;U<{EV00EKv_Wzdv0Dw~F|Nj^Ld%mgt0|1fpQvs>-0{|-Y0|6?FlP$U% z4K(%tm*E2xD&PYXD$$cwx;O!*la9JJ0qv8;x;Ft+ll{6wA`aHE8nxm!JXw zHFKs%`JwMZ`CtM-`5*uQ%3vgumb@N+VgLY;VE_QI!;pYdLSPJ$Az%WLfB=9}nD+md z?f?H5mh=CY;R6^d-~$&bWAp!)Uu1OO2MFaf#9^Z%D&|Nj><^Z%Fn0{|)l0Dw|K0D)2<0Dw}pll8qTe`@momjDd- zH@Ra2k;4su(gFa0QsDyws^9||sz>qvm-_<%D!>N-H6Q?hQlPT|dEWE?m*N8es(}rF z(jW@}k$Um}mtY70HNp*mQsDy`s^9|_Dy{MVmp}smG?d~Pk-=%0Qh+K!xdH?LQC|Q7 zYe@(IR388Uao`u98h|PRf4LU*|CfRC|Ci(g0IFgD0FYq-0I&vXm{R2f8Y;skfKozW z43QyV1(ARNfKt!(|Citc7Ah71fD%0Q|CitZ05!WSK>6SU7AkD@|Cb;DfKo>F|Cf>f z{}%xD|Cej>|Ci(g0IKB!8Y*G{0FYq-0FWamfKozW43Qz=Gm(G*e}GcD_5YWu1OO2MECIQ3^Z%C$|Nj^7^#7My^8c6R0|2V#0~#t~0059-005BW0~;zMC4f>w zU<{EVU=xvm0Dw}c_5YW*|Nj@{0{|*w0059-006M%0~#vhC4f>wU<{EVzyy(i0Dw}S z_5YW(|Nj>N1OO3$e<}gF%>Vxv)%5?DK=S{Wad%fB=9}kM;kTmjC}3*FmY`|Nj@X*8!>3@c)^0k@E`x`Lh84kfIL}siYSYsiq$jsbZ#+hQdM)!1Mo?U;zL%wAVqY zbEcEl!frCp{r{J=*FmY`|Nj@X*8!^0k@E`x`Lh84kfIL}siYSYsiq$jsbZ#+X2e1cNb~=fU;zL%wAVqYbEcEF#BMTI z{r{J=*FmY`|Nj@X*8!>J@Bf$md#tJb0|1fp3jq1E0RWJrlM$(2f~ldi>X0U47W#upx?@Bf$md#tJb0|1fp3jq1E0RWJt zn-Zy~p8)w~rjxqNViv{n|Cc}n05!DNL8-RiLaDePK>2f~lgh?AHAAj|QbJ%1ks&|} zkzfFTQoZy4m$VN+`Ro7x7h(VakYNA-up_R3QbJ%1ks*KzkzfFTQnZsf$5R2lla0q8 ze@E~Cm%syG)*m1P)dCbiDFg^WxxxVeHT`?Psr>^0ktG5EG@twcaVP@-RG|R?kRjk0 zkpKXIQX&)^DJ0+kx&3>=sK5hX)*m1P)u90ZkRjk0kpKXIQX&)^DJ0+kxxfQo)?fgD zQbGU#@L>P|kOTmLQp2@?QbJ%1ks)9VMUi*&|Cay&fD-Qi{}*5YfKoyL0PtY|0FXnr zfKozW43Qxq4Uum1|Cay&fD+~Z{}&%11J$7c0FWT81Ni^|fKtE%U)GZ_$`}FolS#^B z0?Y4{uF4uVKmY*nc0VE_P-!@Ph}Apj1MfB=9}IP?FP>i_>2p#cDp z-~$sXGYkN^3-Xh$$`(FC008h|005BW0~0D^y?|0eU<{EV;0}?10Dw{z0DuxD^Z%FL z|Nj>sAOqDF@&6YeAOqD1^8XhfAOqC~?~_u?HWF;`|CjxH#i+moU)CQW1JzXTlbXvT z0j86_%SIC6@&A{A0RT0$*FmYa7eM)QrjxG4ZUXQ7lODzv0(2f~lgh?2K0*Kh@L>P|kb}j5QbJ%1ks&|}kzfFTQj_xk zm$VN+`Ro7x7eW94@L>P|kYmMwQbJ%1ks*KzkzfFTQi}4EI>%B1llzm7#~xN4?*Etl zd%UUr0|1fpQvs=y0RZsy0|2T30D#hArbqd|{{pEZ06_Vo9{~9R0D#go0RXh2qX3bB z0Dw}8@Bf#e0RT0D0zmno7eM&{00HX&P@kG}rjwq|B{`5`005991AtOOpbwED;1H2Y z^8c3r0DzM4|Nj^F?f;hm7z9&c0Dw{e1OQPV6aaAmFo7Dt17FsX&vY)^@c)K7TOp|CjRv04hT9 z|Cg=q|Ci+h0IK8z6Dnc=0FYq-0Fc87fKo!B50N2Y5s`oZfKn3xfD-%h|ChKQK>4)S zL8-RiLaFHg{}&$b|CjUw04gx?|CgKY|Ci+h0IK8z6e?l>0FYq-0FXkU50N1t5|IN6 zfKq?}fKux5|Citc7XzvU?vrBBV+I2gkvXFelgrQ{GVSjFmj-H>Qsn~xD&zwcsv`}6 zQsM&{s^J3|s(=81QoZs2mjHc$QqS)Hm*4{ws^RRD9?>oWk?xaD(G~)q@RMfIGA5Gm z|CjRv04jh205#wOK)D9-|Cit7r^lUmn-i7m-Pbxs;Tai{?QgH0}g;vLZA+JuR0t5h2;Q@kDUjPAXC<6dgpaTJsA50&(u<-wv^aB7Y9|Qm~ zU;_YDpf^Cd0|0(ldX+Cy^8YfD$S3 z|ChBBK>6?g{}*5Y0FVISD3N&V|ChBBK>6zb{}*5Y0FV>_fD!-zDv@&R|ChBBK>6hV z{}*5Y0FV>_fD!;eCy{LI|CitY{};6r0Qu4E|CjxHy{Y{J0Fm=k0jZM#0Pyq!0IFi9 zNBKhlK>4E=0Qmy|fYOts(;O;r@c)+|1OPDr0RU8>CqTIa0Dw{g0)kTE0~D&I?*Eq` z1OPGM6Q5dhrjzQ^Cx5^vk?-*TmjD2O67&E67hnJYkN`j@kv{DImjD2O67K*17o+U| zmw*8P@Bo-c`M?8T)*m1P)lTpK7at%4)%|^0k@E`x`I7+v@Su|bk#nX;`C$S; z`Jop8`2hfc(nIk7mmdTGF`ySf`C$S;`2hd{%Ao)Nwet%=`IC^;3_6MI|CjxHy{Y{J z0Fm48;0Qms`fYQh9|Chi405qVJ0FmhL|Ce*7lh)KK z3IOc?m-7Gr7jEp63e`1N@Usg5`SSw+s@Bp+`2dN4(zhD``PTwK`C{+?m-Pbxsz3_> zv;&EN(gFa0QsDyws`%^wm*N8xs^JHK(%=IXsxa*Tm*Iba((?lVD&hkas-OY^kt^(z zmeoEEBO!oNLSQP9ApkCsi13r*)gynvERn+?fKq?}fKp-r0FXjpDv^5d|Ch86K=~j5 zfKvT?#HhdnU)CQW1Jy$yfKq?}fKp-r0FYq-0FXjpDv=?8E0J#S|Cb;DfKvT?#Hhdn zU)EzFfKq?}fKp-r0FYq-0FXjpDv==|ERkgJ|Ch86K=~j5fKvT?#HhdnU)Ga^))iG? zDv=@JFOkC{fKq?}fKpcQ|Citc6{>LT|Chi3fKs;`K>4#%L8-M{LaECC{}+ny|Cjab z|Ci(g0II_wfKq?}fKp-r0FYq-0FXjpDv=>TFOfy?lkV190%GiwO4nW*gC~GefB=9} zVgLY;VE_P-LSQP9A)qsn5b%@2*D3GUmU)DnGUmU)CQW1Jwxc|Ce*@|Ci+h0IK8z6DoiJfKp-r0FYq-0FXl{fKozWDv=>TGy{>s z@00r2A`U|)fKozWDv==oGm*CMlPcLKf8rgH5AFY#9|Qm~pacL^l>q?IpwdVA002Pw z-~a%$vkL(E6CgtQzU%*&-~$t?v;Y6KvkO4^fZ`pIt?vJq9|Qm~U;zMBl;Rzc-~$w@ zc<%p~wbDoVpE3ZszXAaH^w&YD{d>cxzyn{_A0PwO%SY7AoKa6{_Xx|CjRv04ksi05p#7|Cdw%0Py(h|Cf|vDv{*_0IK8z6Dr~Z z6skc00PrDTHjzU?fKq?}fKo^A|Ciw69g))jfKpWMlk(as9zp;B@Ie3o@a2DiQbRz1 zQbJ%Vks$ysk$?bzQZnz8I@=y5KmY*nfB=9}GXQ{6LI42p zLSQP9A@7rZ+ZumkKY&tz0Dw|60Dw|L008ho008hpU@DOzfGd#`@Bf$J0~0Fq3qbk( zd&H=~17FtQ;vJE50)SE+?*Eq`1OPE1`TtZ!?*Est>;IQyLV!|2008jh0|2T*U@DP8 z008hIKsAwo0Dw{e@Bf#y*FmYa-$JR*|Nj?*KY&tz0Du-!GXQ{6LI42pK>z^oLSQP9 zA;2|}^^+~!9B1MkkrM!bQvB`zmmdTGF(LK;R3+~JmyqlKm%~ATQsDpp7at%4)gbQw zmy7HFm*fKgs)Im)QbGU#@Ie3o&_ZA;ks)9Xk$?bzQrqtTmv{gF7ZvXRmw@a4m*fKg zs$)TbQb7O!@RP9IOn+1W0PvIoIg#@N0IIcH0;%Bx6sosl1F7@r|Cd$(0Pyt#0IIE zfKq@005sSD0JOm(Ig#K47^)ua|Cb>EfKtE#05kyrfKrnI01LF?LOGG(0~4wj?URPx zK1QQNfKq?}fKoyL0PsNo0PsR!Dv==oIFX6&|Ca#(fKuQC0V@4_#HhdnU)EzpfKq?} zfKoyL0PsNo0PsR!Dv=>zH<5nslM3EO2SWe=@MB;(lULq7KEps z_U!+c^8)~?zy<)cBLITZ0sw$g;R6GzjOqWEzXAa9KmY*n0Dw7>A^?I?;R6$@005Ml z{p$ahKm!0Z-~$t?!s-8);R66F-~$t?*Xom2-y(lN008jh0|F{R008g;U@DOX0D@A3 zOMp@#z&eqD0Dw{x0D#gN?*Esy6F~X!|Nj?K?f;j(>i?JJ0|2Vz0~M-5008ho008hp zU@DOzAUu%+O@LB>0Dw{q?*EtI0~4wf0D#gZ>i?It*FmY^|Nj?4?f;jp>i?JI0|2T+ zOn`qz^oLSQP9AwVyYfB=9}{q6skwAVqY)c^k%z^oLSQP9 zApkv*<4u54fB=9}^X>na-~$t?699nH4C?=vwAVqY$N&EqA0PwOLI42pK>z^o!%Bcs zLSQP9As{-DfB=9}699nHHnA1006WC0D@BC0~4y?0~D%f>Hn8N0{}I(69D-F0D@BC0~4y?0~D(5 z?Ejbb0|2UE1pu_*0~4y@>HnAD0~M-4008hIz&?=`?Eja*0s#4-1^~3+0~M;->Hn9r z3qX?-;UPlL>Hn8N0Dw}n3qbkw4?y|$7eM*_d&8)}17FtY?EjZj>i?JI0|2Tc8-P-P z0Dw|L008ho008hpU@DOzKrfM_?UQ!lA|62i0PsR!Dv=?;J&^+)fKq?}fKrw1|Citc z6RNN2leXb19zg&A@IqiJks;tdkpmupQh)$}QiJXPm*4{xs-5YR{^79{(Dxev`S$|@ zs)VLT`SSw+s-U1tlf&XI7G&xFm*E2zs-pt|k>UdtD&PYXsvGQ+2IC?E#^{qO<4yv$ z=#zTmQZk|F|Chl2|20AY0PtY|0FdMZ7OEkj0FeVOfKozWDv^KyfKmkQ|ChKQK>1+; zfKrsElm6p60=DdvI^-Hfd+7g{0RVtf^8)}XpaTFkxF10I<^TT|A0PwOAngB_i|PND zz^oLo9$&A%H)TLSQP9fB=9}+LM{&97&)7kwY(lQbJ%Vk$?bz zQqS!FmjMERQn(*L`OE+R7YFSBmvZUP|kOMG)QXyahkwRc9 zk$?bzQo-z#{^S}0YUq<1-~$t?%j*A^ z022T;AOL_;U@`!?;R6_|phE$Xwd<44i?I31^~36V^TR20D#gL>i?G?8~}3R{{K{f zYI8Wl=>L}=8~}1*1^|)Z0030;0|BalYI8Wf=#x(78Ups`lWOK30;1@Xisl+#m+AkP z^8)}X;QIeGkn8`K`{@6d;ISA=>M1H z0|2Vz0~0D?0059fH-J*&0~o3yAO?|u0Dw{}?Ejan|Nj>sAOqE8>;IR}=>M1H0|2Vz z0~0D?005A~H-J*&0~o3yAP13v0Dw{-?Ejab|Nj>sAOqD{>;IR-=>M1I0|2V!0~0Dj z008h|005BW0~o49G=P6nLSQP9ApiuCfB=9}a{z!+5bXb#&j0@xxobqJ%>VxvNbCQX zv*`bq<^uq#F58KyZ`?eA0PwOHtYYFa|VD?py>aXP|kV0T8ks&|> zk$?bzQt<2lm#+W+7rAQ$sh#Klm;HOasY3t&@L~V}kYj*3k^KVzk@HglsgwZ#&_W;y zkpTjN(vzA;`SlY3`Sk+;D&PYXs-Nlqm*4{vss-o&mjD6)GywvFQs4s6SU0V?xTL8<+F!>GUmU)CQW1J&RI6RK0_ z|CiwY|1@<005kyrfD+&X0V?$qK>71iL8<+F!>GUmU)J*I|Citc6RJq)|CfOM|1^P` zNBLj?0FVGc36Zbs|CjX>K=}axfD+&X0V?xTL8<+F!>9zn17FsY@aP$T;vJC@>Hn7> z1OPGM1OQZ&0RXUo(nt9K06_Vm006WTAVT@U=l_@B0~4yV|Npg=(ntB=0~V^E0s#3Z z=>M1V0{|+Z;vJE!>i?G?1OPF>0030r0~0FqQ$eZqTSBS*d&8)}17FtA>i?HJ=>M1G z0|2UHJAhJv0Dw|L007WVK>z^ILSQP9AwV^ei|hZF-~$sX^HV{o^;<%z{d>cxzyn{_ zA0PwO!0P{(E9n21z@-LSQP9Az%)XfB=9}d+U?d>8c(hRDe=| z0Dw~c0|1di007WJU@DP8008hIKnjsb>yw`99)B6<|Chi305$XG|Cay(05$!4!KlCk zU)CQW1J(U|!Kgq00Pp}n43U54{}=sx!Kgq00Pp}n3z2)~{}=sxzp4EL0Fhwo|CjxH z!Kg;){}&%11J(U|ys7;I0Fm?$0QvRi|CgW$05!7<0Qr*v0I>7}04j6lNBLj^K>5G` z0Fz7W83EIiW$Pb*XXgKxbpk;7;pzXEK>z^o;sXGxAwUh0<061k-~$z^i|GHCb^<{8 z^#cGZ*y;b5;sXGx)?x&aK>z^oA%G2$<061k-~$$_f$0C2b^<{8wE_St&guV`;sXGx z*1`yp<061kK>z^o-~$$_A;1lhcj*6@wE_StbpZf0U;;n~`Je#+v<~N!3hYuUKmY*n z000h=A>>E-Apk)6KL7yn004oSzyn{_fdK%pKmY*n0H6+$A>@;W>>dh0008g+Ko5~2 z2f~lUnUI0yyZC zlI=nwLjVBq-~$w@W8e>wq6Pp_C+7c`v=2b}+yDO;v=0FJC+3s(?M@Bz<^Pue0suAO z0~D(ClTq#$0_x?HYVJlV4Cw!t-~j-%0RTWbp#cCigz5j6U;zL%66yb!AOQfh0RTWb z;Q#m}lS1!4M-b@$m!UU6`CtM-`5*uQ%76d>wet%=`Tcv+sK5hX*1YBa zmmeSl)%|<1sX+h$@cjb-k@E`x`I7+vu=W=K`F5s9`5~l1lgIBJ0*~dB=IHn9&2LLs*=l_=iG=Ne;2!K+^>HnAX0{|)j2lzL^0Dw|}=#zr*CVxif z|CgeJ0g>SY0IC20fKpEA|Chi4_&1?TK>6T*pVHt7pz=Qe0P8IQ094=u7pj)#|Ccr8 z|Ca$IfKs>V|Ciwd7pmX`6{;lX|Citc7pl_b|Ca#)fKuQC6e?fn|Citc6e=O<|Cfm8 z|Cfj6|ChNVK>4)SL8-T6L#cD7N0TSHnAW0{|)j1Nb(0001=y=Kq(#0RS|%-$JRl=R>Kw zCqVhNrjttYG6F^ElYa6V4pQX*m*4>aHQ)mls^8?3rt%^(vgrSp-~$w@ROkPf(f|J! zdg%X`<>mjE;R67wKmY*n0DusYE$RQ4-~$sX699k`rs$LU@+LCe3jq0(0RXT94FahI6$7cJBLn%TCjb}0|2T7000ypAOqE54ng=K0RXh%0~D%# z=Kq&~|Nk}M0~4yW4?y|z3qbk(d%~!|17Fq?0D#i84?y|g0~4zA3qbk(d%~!|17Fsc zutNbD5+CpYap(X5RDl5iumF5~lOfoXlJ_M6(380LBLUEp-1iv*;1QGe_ar~Cw4(r# zW2Q&>-~$w@WB@?UXCKmY*n<9~os;sX?_K>~nM001qK!{`5(fB^tB z004kebEcD8_$n&7=Kq%;8~}1a{{K`$0f15g0D#iq0~D&W7WpFq?Xx=hDFFn~AwV3HXZn%>q?7vkVgjq;lV1BG1y1Jwm%)>T`!){H zK>z^ILSQYCA;2$@;FHPwA`Z|&007WJU@eg$ATN>Blll7|63{^a0MG*zfKozWEs-IB zE|JKSDf}b>rjt(mCJJ%p|Cc}k05o%^lYabG0(It-*8CoSZRY=%^#cGZAOiR|wOc}| z-v9p>%;o==H{<`8<^uq#z^IgBXBPLSQYCAs{f3fB=9}h35a4 zwOc}|(EtAzzUBXyDdYc_<^uq#z^IHVbNv!Irbqb!06_Vn9{~9Q0D#hT006Xr0zf$+0su8a007WI007W~8-P+m zU@eg$KrxYk0Dw|t=Kq&-rjx$?Hfa&&|Cb>C|23ftMk%!u0Qukp6sn-(|CjXx04jh0 z05!D}K>6wa{}&%11J#D*|CjON|Ci zF_C}(fKor^|ChBBK>6DL{};6r0Qu{aUH%*arjvU9A_3=7W9!l-@W{}&+ufKuQC z0V==)U)CQW1Jxk|fKu}dK>6SU0V@4_!l;!H0UQzW;QyBZ0RT1O0Dw~90~4yrl|2C$ zS^wn!m-7PvDnS4LH2CEImt*4pm*N8es^J3@D)S3K`Tcvss6zk%&|qLQk*MYW7at%4 z)xZN^)*m1P)$HW|msjHdm-7oi`Tcvss6qe$(BT6BszAUpk)4&T0V`(u0syoE5J36h0~D$#;s2NO0{|)$1OPSU0~D%4007WI007VfE`U-(U@eg$05_4q z0Dw}C<^Pwo4?y|g0~0Fq3qbk(d%~!|17Fr+006My0~D%405Opj;g>!E0UCcJEPzr% zU@eg$;53nd0Dw{x0DuyC<^Pwo4?y|r|Nj@VsAOn}z0RbKbAUKhd;FtOV0Uk1nsAOqFn0~D&^0~IPj007Vx0Duwzz&nxYy*CRLSQYCA>cHTi{zIk1pz5b(8D}{QbJ%Yks-i8k$vR`2YZb60=i5sh|J< z7l-5jm-68Mm*)cjs^$X~D&+$cmp29h5e|brfKozWEs-Gr0g*rCmsSP=HWJ0(|Cize z0IJ~w6RO|?6snEkm!1XzHW{`M?8T)*m1P)qnv2kN~_#`M?8T)*m1P)qnv2kN}XE zeg^>>9+Lq8kdy%ckYJ`q`2hex`JpEQ`2hfc()AMn`IX9-wg&+oD*xaAmp}jj@Z*1g zQsM&^s(}H3QUCx1k=^6}mp}#pHMbi8`5*v-(v9Mm{s#dTNh9C?mm&ayQsDy^s^9|_ zs+ZvZmjDO=H3JBM(&7Ubsv!t~(%=IXs)^wLmp};sHQ)mms@~uKm$w^0`IO2>`Jn)S zQs4s=s==3C2mu!wV@QBfLSQYCApi%FwB!GmwiiJ8004kebEcP$2mvjB&_VzJuwy_3 zkwPE|ks%0x(%=IXs$k*&m*4;Y7r+33QbGU#ut5L-&|^e^QbJ%Yks%-hk(lHEmjD2O zQsw{u7at%4)nEXCQb7O!(BuOYssl)XQbJ%Yks$yEk&fg4m$nx``2YZb659X&7slfM zmoeb~m*oQhs^kL}szLyN0I)#-0MJ5UEs-IB1d(G#fKq?}fKq_t|Citc7pj`y|Cay& zfD*RfLaECC{}?`f0aO6+;~#+1;sY0| z0U&_Vz`jTMVeK z(g7fV(yQM8m-GVwDjx&@F+u$QR5#-Pm!jYQm*N8eszU$((BT6Ys$d`pk?G?9m$u(R zsign^7cS!emzm%Hm*N8es^J3^DnI}L&;S4mk>KM0m*4{zsuTc#5(VG?m$u(RshIzN z{}&?S|Cf#5|Cize0IEX(0MOwB7OG$%3z5{~|ChGkLaC4c{}&nJ|CfZ{|Cize0IJ~w z6skY~0MGzn36aa<|Citc7pfEhfD-WD|ChGkLaBrQ{}&D7|Cba1fD(7#|Ciwd0IEO$ z0MG#72$8?y|ChGkLaBQH{};C30;zWa-j_BE0XjZR-v5{P0|2Vv0~M-C-v5{20sxC2 z02Av-0svIv0~D&^0~4y?0~V@2-v5`N0RT1P0~M;_0~4y?0~V@L-j}Wn0aIP#NBMvP zKsmqw05o9YNBQ$pL8<+F!l=LlU)CQW1J$_Q|Cize0IER%0MG*pfKnj<43Xdi6siW` z|CfON|1`q{fKq?}fKoyL0I)#-0MJ5UEs-H043U1~|CeCmmtGA4BA4J80UQ=N;{TTd z1Q#;Eh>21G0D{us>8Nrr-96`WgWmDtq1kmwy5PYvTa` zRHH&Ri{S$kDnbANutFdSk>CRqs-NHgm*4{xsyW@4J{ti^7x18y0Fhy)NBJTEK>4B< z0Qmv{fYK%5m!2B|779P$|Cc}k05x-_m%bYTA~Epb|Cd|d|Ciwd0IEU&0MJ0-50Rtc z|CjOq{})}||Cc}k0JQykzo`KLfYSW~0FgYGkqrSW0V$Wg4FOL9m6r_;0XPAsmqrc& zGXbWTdJX|D0UDR74gn(pk(bR50Y4R;-2az=3P8Ex0~V^FGXS~Z0~M;kmmv=UGacvL z|CjxHy{Y{J0Fm=k0jcx@0jl*A0QvO;0IDpPaSs7c74Y8wmmvQCG!p=T67v857vKXG zst1?V4*^08@b=#VsrLf_s(_}KA`k&W8Asgzm*4{vs?FU0mjDC+HQ@sms^9|?stuQY z5CIn+%ijN&^#cGZ-~$t?q}~6QzXAYpvkO4^_5+_6SUpIVfrm#Pr~LM2h%|Cay&fD*F{K>6(d{};0h0Qo%J z|Cb*i1J#n-{}&%11J#h15fT9x0n(Q#5&<&-6_;HS0UIGk;QtpNAOqEq-2WFJAOqFL z-2WFJAOqFh-v1XLAOqD(mzfd)qXDUxkqrS8mrxS{AOXymaT5VI0lt^169Fp$KbMgW z0TY+d4FMDZnV0<&0UrSgmmL%V8v&}9ITQgS0nwLT6ag0zf8GBVAOL_8zyn{_KmY)= zkC%lM0UrW`-^0k^SA5ZWaMD0eF|876C&6 zgO}wN0Y3td+?Q?^0UDP;7Xc6fbeB;V0V@F+mwgujD*-B(sTTno0@K@5P7y&E+rk7qA0WtxYmys9&8v;Svm%a@F8a*=H{}&%11Jz&w0MG!$ zNBIB%K>5G}U)CQ00P!RkKze`y095^Zzp3H_0V?4G0IGli0FeCy0FeOTDUoNFkqrS8 ze*gf062Jpr*8O|Dsr>^0k@Z^wsr3T_D)$=z`St?=s`mo`s-b28kstto(jVIYmmmlL zw6jwIsq+H>svjr-d4K?bQsV;^s-Xq|5nu`cdGrGns&!TYDdGbbs(%syxnN=dktN*! zm-PbxD&PYEHG*OQ5#a+9s^9|`s{7slf0y+G04g8>05#wP6RN-3|Ch5x zLaFx~K>7W9#HhdnU)JOU04hQN0I)#-0I)&;5s@Jv6Olt0fKq?}fKrv-|Citc6RNM; z|Cje0K>6SU6)Lk+L8Q$eZqTSBS*d&H=~17Fs) z-T#*r0Dux7+y9s30|2U{7JyPh006K-006K;01=TP;1Q940Dw|&-v5`)|Nj>sAOqF? zd$y_l0|1fs-vX(30RYhRQvs>-e**xj;HF3Uzdr!^0RTYxp-%w$0RVu~Vgf+9U;F@Z zs1N{DlmP&cfZ{2Uq2mFO0C+_CUflneAPE39pg%zQK>z@-xE}!d0tJ9lAwU(8kN^Ld z_yYhc;1K{dxoZTexq||cyK@Dpx&s1{VFEz8TL1vCwG#mOU;`MDRsaC7f1o!2`M*N| z`Jf{Lk-rBKDZi5dxj#=GxS%rt`Jg{Q`BeY_u;T+6sUdys_WYSmw*ERHM~PW z`MY;Ox%>zKwA%;(G@&;@`N9N%(%=Ies*>CPmmmxPG@>&=`QZZ`s(|7tk-Likxn$k{ zmtYA1HK0R4`CtG5v_#tff0w^Q0Qq48K)DzIfKs48K>6bX8LGc`0J-7=8miFR|Chl0 z|1{(S04l-)5s^Uv0I(sT7LmgnfKq?}fKnLV|CbmC01 zPeA$M0zmme00GK@2mrP7Q$eZr-$JSVd&;Q517FsF0Dw|K006M%0~;#j101S@9Dq{7 z0uhlRKo^ne-T#-Me?vg|U;qHL0^0wVze51|zjpw+VFEz8qW}LFA0PwOLmq%q!U7SI z;R6+_fB=9}+};0|vH$-URsaC7z@-;~9Wb!U7SIA>b2{fB=9}lmP&c(%t`; z%K!ftfB=9}K>z@-Lmhxp!U7SIA)pwM%iaH%phH0UAOZljf4JvEsk-k(skIY8`MfVc z`Mv-D7at%4)m8uiut5L-u;c?1s-qf!Qo;feks;s}k$?bzQoG&%m$)B5`Lh527u4DR zm$ef>`MBppslP)2`MU2!sk|>h`K$l`7r5sGsk-k3skIXT`MfUx`CZxnm;HOYsr>^0 zk@Hglsq_N?e=2|h0FVISDUpJHdy@d*Baw3f0MG#d0*hp(NBJWNK>39LK>4L70Qm&~ zfYKuZ0JK5?0I)#-0I=i(6e=SifKoyL5s@L_B9VXqfKrv+|Ce*7NBN;AK>1(-K=~j5 z0Lp*_0JZZ|L8<+F#i+moU)Dna0I*|#5RqpA0MJHYe*lq4Kmd^j00xU>6hNsa1OWMI z7(lsd2tfJcut)g=Xd?KcqX3aK7y!8d00@humjbD(9{~BOLjbwr+y9rK{Qoqx-2ay! z+W(j30|2Vz0~9Jk006K-006LKA%Ids01=TPz$1}>0Dw|%-T#-_|Nj>p+5eXxAOqF? zd%dZGe*pln{R04z^HTw-a{&O*p~C=?^aB7YW2Q&>K(s;m;sX>aWB@?z@-cxzyn{_e;*(N)u0nV`9S~xu)+j@QXv2sk%0dH zmtX+^G(rFXut5L-u;c?2Dgz^cQbGU`ks;s}k$?bzQaj!Mm+Ali7at%4)u0nV`8(SG zm!SOrG(rFXut5L-u;c?2Dq|ynQbGU`ks;tDk$?bzQZC*9m)!sV7u?wYmmeSl)gSx- z4{<02095^Zy{Y5_04n1Wm%=CkBY!|9kyQWyu%ZGHks96qm*4{vD%1f0HB;OFm*oQ# zD&zwbDxv}rk@XWm`SVjjsX+h$u>E_(sG}2rQXv2*k-z|eQViYy7at%4)#lp&m-GMs z7eW94ut5L-u;c>(Dx(sBQbGU`ks+WZkzfFTQUTrn7a-XG7at%4)%|`2YZb z65s#-7qt@r`E%HpzAXV994e9b8vyzA0|2U5006M}0w|Fc0Dw~90~D&^+LvxD0WubY z2Y^z50Dw}a-2a!g1^^L&41iL%*_XB~0Vxa6^%DU3^#cH^V5XPqECDhC4cwO=Edd(= zrk6S`0YVPL34l^U04R|mfGd%1+?R?i0U8>k34l^U04R|m04tGX-2ayV0DuzU|Nj>e z*q6R70UR7Gk@p(_`Sb$-s#gF2u=fHekre=dQs4s=s$bfd_ALQ276S}`Qh)$}QbgSU zm$e1}5r7PUQdQZPJ}v<&0*=?0YAyj9Dr?*Sm)6+-m*oQhs^kL`Du4ihQbGU#ut5L- zu!9qTQbGVIks+WhktN)ht}X!@G6NHUQbGVIks$yrksI9qmjD2O66pW`7yWy^sUH9U z@%;k;k@HglsdE7U(DVZVs$r&=`Yr(;CK}oQmtX<_HJ}qf`QQWqw8Gl|mmmWGHK7wg z`2YZbQh@>h`E#b1J}&_}f8z~+QbGVIks*LCk?-68mjD2O672u~7vuu~DnbANut5L- zutESRks+Wjk%JC^Qh)$}QsvwKm*4{ws^{4MmjD2O65#*;7k~hOQbGU#ut5L-umcW& zQbGVIks%;2k=on;mjD2O64(F#7l+sXm;HOZsr>^0k@HglsdE7U3()if0jl8x0xDvr zmjW;WGAbL`|CjXx04iVu05zZ+K>2&x|Chi705zc-K=}XwfKs&+K>2|J0QqyKmtHUd zEq{sAOqFn+W(hL*#DR10|2Vz0~IQO0Dw|L z0059d005995r9%c04R|mKr)e>+y9pU0Duy;6F~Xk|Nj@{0{|+c5r9&F0Dw|L0059d z0059e04R|mpf8b;+y9rK8$kJ!*#DQc6MsPY004jz)Bpb$fB=9}LI41eK>z@d0}y~x zLI5a{ApkLvh1>s^004jz%K!ftfB=9}LI41eK>z@d;}3vRLI5a{Az(0(eB1w*004jz z!TKs{R04z^HTw-a{&Ob^aB8@V5Ud;0RTYxp%(!80e=91 z(li19w4jp!k&D^?mmmWGHKZp%xt|08ai{?RQ~&^gQiK8kxpSsR`Jxv<`C$S;`9S~y z${_;)wewR!sr`GysK5hX)&Ky268HcA7k~hOQbGU#kU;9-fK>4E=0Qmy|fYRUr092xr0Fj;9|Chi405zc}K)C<_ zfKq`10J(FfNBN=`K>1+;K>0xc0m=aa0JZZ|L8<+F!>GUmU)BHsfD-rr{}{ z0Dw}n3jq1Eg8`AZ8vyyBe+()20|F}G0~4ws+5eY7696@~7XbMH|GyHpXF$2Z1^`h% z003)F2oV5Ov=0FJ;R6;bAOL{UVb=eb^aB8@fD{0&IoSV~;1K{dpff=Ez!aCVGXXXNSlE~9GXWYsjoSa000;mz zp))}F0|0Tszy$zQ-vSeB zgO@=x0V97o*#DRH0{|*O{Qori+5eYi*Z-H~0|2T*0059d005999)MCp04R|mfH;wW z0Dw}p+W(iJGeG$j0Duy}0syqp+5eZkFF^Ua??kEK0~D&s*8i8YQ$eY=-$JQi0RXT8 zq(}MS0~IQwH$eFz06_Uc00GLO3;?zG=R>LW6F`6Y{d>o#zyn{_A0PwOpff=Efc^iq zy6;4(ye~lc?En84A0PwOz@dqaJ`#LI5a{As{)CfB=9}k=p;4-2eX< zA0PwOz@dBOicLLI5a{Apkm&fB=9}huZ&_(*OS#A0PwO&XMkfR@fQbGVIks+Wuk$?bzQhnP0m&pJB7Zm`2643wu7vuvLDnbANkU;u9m$M5%`L^Fesl@;P7vuu~DnbANkU;A-vX(+?*ys5F97-L)t8Po0U3X=_5%Z|p>F`Wq#8i_LZnCe z#2P^QLjXYeqgMd=0|0>1_ZtBDAQ=EuKl}i3=okQ0p~C`^xE}!dx+ehnA^?C=zmovD z_yYi{Ka3vuY1jXk`U3ze;1B>cp-({hp%VbKphE(Yphp1tpi=^ovr_@7phE!pGys4S zpo0RDyK{d9sktKn`Jg`l`3?X86dxc1)t~}E`9J^wv?|vBm!MBT`Md%E`O^XbxqjCF zm-+($Du5FJHJ=6maiLE@`5+PiRHR2h`6U2=QUd^h(gFdXQs4s^svFqK+ zHKLD4sR00hQlL*j`M;L{xxZ5Y`8(DBm-+($Dqt8505!W)K>0fj0JLEvKsg`)fYR3j zK)HFBy*B|De@)f@m;3_&s^A9zw4wq)`DcDWllKD`Dk1>@wBrL8s^J3%s;4^u`B&Hf zmp}*rHP-?_xp@OXxu-io`G5l#i{k?sDq<%9k>LXes!rGcm-hnzDuDR^G!NPTmwML! zmqRCjQse^wszLw&kU;1(*0JP=U z|Chf{0QsN-K>2_M0JPoJ|ChNVK>4#%L8-fQMXA360Qp~M0C6A>090R-0C8Xq08|$M zfKr+Z05!NDK>4~SK>1(+0I&h1NBQ6b7b>DxK=~m6K>0xc0m@+x0JZg7LaFx~K>7W9 z%c#Hue_z%gAOqFp*#DPL*8i8}0|2T+CV)~x0059d0059e04R|m;1Q940Dw}T+5eZ< z|Nj>sAOqFd*#DP9*8i8oCV*1O|Nj@{0{|*Q0059d005A~Du7Z#04R|mU^kI~0Dw}F z+5eZ+|Nj>sAOqF2Q$eY_b497SBS87j|Nj?4e*gfGK>z@dz@d<0^nsLI5a{Apkg$fB=9}7XW}#dD;J$xF10I zx+g&S!vFslA0PwOfdT-u-~$1wlGy*3vr|E-xaUKuy6;4(ye~lcx&Qwch`KbT@7o~4Nx#R;HDnbANkU;na!m$)B5 z`MM`S`J4a$7o=}Mxk3N{kU;4~S zK>3gV{})040FXfd0FWavfKoyLD3Kw+JdvbtK)Lk){};1U0jap>1F5?21gX3)0Qu+C z|CjxHzN!5K0Fm5G`0JH!AfKu}dK>7W9!l=LlU)CQWe*@J) z0059c00597z&?=xKtGWt*#DPc0s#4-{{OXL0Dw|L0059d005A~1AtOOfIpETARdt& z+5eXS0Duzh|Nj?a0RXV!0|2T*xJUUy06_VI006W=006K6fB=y|03X%>0Duy}17Ftt zd%me*0RXT-006K;0)SHe0|1dBfBZ-J0H6So^9um^9Mu1ppaB3i^8)}XLI41;K>z@- zBL;v{LI44gA%FsrzyN?!|JeVR-~$sX^9w-v{d>Zwzyn{_LI41;K>z@-!v%m+LI44g zApioAfB=9}699k`^Vt8F^#A`CA0PwOWB~xMg#1VOgaknOWB>rPfB+xXe;*(N)%|Au7kwO3gkwE|eupuA=k>c3@mjD2O68(F@sK5hX)?onvup#_M z`5_EI`C$M6v>*T<)*m1P)%|Au7kwO3gkwE|eupuA=k1_<0JMMrAJ!ir1J(U|zo{b$fKq?}fKoyL z0I>Z70Fgof0g*uf0I(q-1ChSi|Cay&fD-+C!KlCkU)Es(0I(taNBJQfK>1++0JIZ70Fgof0g*uf0I(q-1CgxQfB%;N0Duzxd%>u{ z17Fr-0RXUL{73m?BtZFN006XL03X&LAOqF?d%vk83xHC90Dw|L006N40|1di00EIf z006KdAOn$_*#DOR0Duzxd%>u{17Fr*0RXTe{73mAEI|2T006Wg03X&LAOqF?d%vk8 z41iLA0Dw|L006N4e**xKLI44gK>z@-As_>hh}i#^004jz{d>Wvzyn{_VgUfKLi|Vh zLNq}6VgLZNKmZ@sA0PwO{d>QuBMpF3fB=9}LI41;{R04zLI44gK>z@-As_>hcG&-y z004jz{d>Wvzyn{_V*voLWBfAu7kwO3gkwE|eupuA=k!0BamjD2O68(F@sK5hX)?onvup#_M`5{C=`C$M6v>*T< z)*m1P)%|Au7kwO3gkwE|eupuA=ky61++0JIZ70Fgof0g*uf z0I(q-1Cc=3|Cay&fD-+C!KlCkU)Ey*0I*~HNBLt^K>1?;0JLBLAJ!ir1J(U|zo{b; zfKq?}fKoyL0I>Z70Fgof0g*uf0I(q-1CcJ+|Cay&e}EGGd%>u{17Fr;0RXUs{73nO zTtN9`006Xr03X&LAOqF?d%vk85`a>G0Dw|L006N40|1di00EIf006KdAOn#c*#DOR z0Duzxd%>u{17Fr*0RXTe{73mAWI*|0006Wg03X&LAOqF?d%vk86M#~H0Dw|L006N4 z0|1die*giIK>z@-As_>h3fTXb004jz{d>Wvzyn{_VF3WJA^b=AA#6bTVE_QMAOIiM zA0PwO{d>QuBNTvAfB=9}LI41;{R04zLI44gK>z@-As_>h_}BlJ004jz{d>Wvzyn{_ zVF3WJA^b=AA#_0bVE_QMAOIiMA0PwO{d>QueAu7kwO3gkwE|e zupuA=k>=O`mjD2O68(F@sK5hX)?)zxuw(p3`D1)Q`C|Y8v|s=q)*m1P)%|Au7kwO3gkwE|eupuA=k<{1!mjD2O68(F@sK5hX)?@(yu!Q_a`GkZ( zfB9qp0JMMrAJ!ir1J(U|zo{b^fKq?}fKoyL0I>Z70Fgof0g*uf0I(q-1Chel|Cay& zfD-+C!KlCkU)E*;0I20>0JOjWAJ!ir1J(U|zo|nQfKq?}fKoyL0FeCy z0Fgof0g*uf0FWUd1Cg-T|Cay&fD-+Cf5E8017FrZ008ks7(jXi7(n`92LM$4d%LOq z0|1ftV*{!80|2V?3jq1^0|Bb_69D-k0Dw~U0|F{@)c=?G0|2TZ1^~3R1^`iDLI9Du zYXqr50059#0058`P&JD=;|Y;EAPkYT4*>bL7XbNm0zmoT0~o5|0~D&10RXguf7Ab$ z_5%Q_zybiYK>z@dizzyn{_A0PwO{J`SSw-sswdR{m-GVws^A0wwBZ92szcNNf0sf40Fd+p z0II+N0JK2>0Fc87fKo!>36UY-43WS9fKo=+|Ch51K>4*3K>6SU6sq(OK>7W9!>GUm zU)CQW1J&gN6{_R|6RHCVfKo!>36X#RfKovK0FWUd43Rh2|Citc6sofeK>4*3K>73! zK>7W9!>GUmU)B@=fYRUte-x_p4?y|-d&8)}17Fr3AOqEA*8i8$)Bl&{0|2Vz0~M-5 z0059d005992!K*T;0ci-;0lp|0Dw{;*Z-HZ3qbj`6F~Xe|Nj^Ld%mgt0|1fp3jp~O zOaPJK0~4yN(*Kv?0{|-E0~4x{)Bl(A3qbh*8~_sid%~!|17Fr310Vy{myksP7=LpC z0Fbof0Fh&+NBLv`K>4H}0{H|0fYS970QvO;0IEO$0FVH{MtPwY0J#AGfKs4y0C}M6 z0FfHg|Cc}j0FdKmYwQ64RG=CFx$2iKMFAH|BVT}0LLeiNApj_ma@PNs00N*= zpc(+V-v9p>z@dE`p)Bpb$ z6VjKKNdYEO(bWGJ{d>Qu0|tOnfB=9}LI41e{R04zLLeiNK>z@dApk6qP}cvK004jz z{d>Wvzyn{_|HA^&{d)qb{eu9J^BVy9a{&O5^&jQh)$}QbGU#kU;FOl5UfB%;t0Dw}o z4?y|R|Nj@X4*>ax(f^nId$y_l0|1fpQvs=S0RWKn0|2UHrbqd|e+a2y06_VmPXPG< z0D#iJLjpO!KLGjE4*;~b7XbP80|Ba`1OT+RV*{zT0|Jrw0|F`|007X0k_3@sAPJEI z0)WyX0)W!s0~e}k(f^kpe*^$AU<3eEwG#mO^#c^Dx+ehn0Rn*1-~$(`J=Oo0zeWN0 z`U3!}-~$z^+0g%&zyts^wOc}|004kew_`)8x+g&SbEZf6qEA5iVFEz;K>z{Dp%VbL z^HV{o{d>x&zyn{_A0PwOfB=9}LI41eK>z@dBMyL4LLeiNA>c5Pf1K6-m$nx``5*v* zQtto%7u3}Mmp{_~m*fKgs$&j-Qh)$}QbGU#kU;s^9|^s>Ra(mjD3( zHR1ymsv!t~(%=Ics_@X4qD=uJ0$0(O#!Uetf205Z7at%4)qntiQbGU#kU;=sgnQy7r7%q`9c5ykU;Kr)dh)&G|u0Dw}pTSBR~V?(LB??kD0|Nj>sAOqE0)c=>q z(f^m_0|2Vz0~IPl0059d005AK5r9%cAS00>z%!A60Dw{!)&G~dBS87P|Nj>O0Dw{w z(f^kK0Dw}pTSBR~V?(LB??kC$|Nj>p0Dw}=|Cjbo0TLdz(EpboAOqF?d$_6n0|1fp zQvs=T0RWKm0|2Ulrk5s90W$%fms(E&A%BoLBO{UbqBW5OkOdn$*kt(O0~o3R1b|YD z)Bl(C0{|+Z2mm!^27tNc0~o4coo$3K&4S z9|Qn#%mDyYw%fHaYS0Dw|80Dw}d)c=>{|Nj>sAOqDx0059d005BV z7JyPhAS00>ATW`D0Dw|80Dw}T)R(4E0V03W)Bl$}(f^m_0|2Vz0~RVm0059d00599 z8GuqkAS00>;5Cte0Dw}A)c=>Z-$JRl=R>KwCqVhc|Nj@Z-vX()=L4y_Cjj|0(Epbo zAOqF?d%mgt0|1c#1^~3P3jp~6G=Ng`0|2Uc)c=z@dBOZWKLLeiNAs{)C zfB=9}Wz_$d?f?H5A0PwOp40!A9|!<(!2JJIo74Z72GReQz@d zLLeiNAz(O>108@;fB=9}Rn-5N-~$t?CRrs#wtfmox?dw1$>IDc=AEYl;H^RE3^@ zK>37H0FfgA0FdG-fYM{&FOdN%fYRUt6{@A!X8707|CjUw04g5@05O0C094=u6{=Fu z|Ci+h04e|gfKr7505pIAfKoyL0FXfd0FdMZ6{=$;5m^I)c=ATW^t)c=sAOqDn)Bl&F(Epd^0|2Vz0~9KN0Dw|L z0059d005AqC4f>wAS00>;5Cu*)Bl$s0Dw~A|Nj@C&;OVGd%CFsAb`^S0|1ft8vyxt z0RWKn4*>b}0|2Vxrbqc80DxT5LjXYeqb~sY0|0>1M$iA3U<&}WvkL(E^8)~?-~$w@ z70~~epeq3RAPfMswOazIwL<}sRsaBy-~$z^!D2j-Y103fAO`@nxaR|@IRF5VSO5T! zIpZ&p7=t{Q?^6LCDs#{OmmdTGF~9-ssq#im-GVwDj){{HNj#$k>CRrs#4Pb zmmma}HdFyBe_qf3mmdTGF@gR6RO{0Jm(&1&QdQ9Zm*fKgsv-aY@F4&I&_W<1ks%;F zk)tz!Qh)$}Ql-=Xm*4{vs<`JvslLzum!K;^`9J{xwY6J9sj~|}`3e946u9R@sT2T! zQs4sJK>6VUK>0xc0m@;Q z*;D}^f5OuLmo3o$m*fKgssk~AQh)$}QX&8V@F4&I&_W<1ks;s_k@N!qDty!bm+1fh z7q-&>m(&1&QXtU(m*fKgsv-aY@F4&I@IoLXks-i4k;5^7Qh)$}QgGA%m*4{vs*2D5 zm$M5%`Pu*f7qwdgskr9@sj~|J`54dtmmeSlf7Shay{Y{J0Fm?$0QvL-0IG`4|CgWu z05t#rfKv1iK>7W9!>GUmU)Hk=0Qpk^0PwXF0QqymNBQ6b6RI21|CjXx04ksa05oC& z0MI~qOSu6A08u{}191rh08}3U0CC_KpBg}T3%Q8W|CjU6|Ci+h0IKE#6)NNd6RHOp ze}GaV008hI008h~H-J(?AS00>pgfUa0Dw|J)Bl$s0Dw}n3qbj`6F~X&4?y|-d&8)} z17Fr3AOqC_1OO4U3qbiG0Dw}p6F~X&4?y`rcni7xd&8)}17FtQ0~D%n(*Kw90{|+Z z0su5&0RYe;;4P6L1VFh01OQQA7z1%>V*&tF9{>Px;1{17AOrxpU()}V%Fq9ob!Tm(W%LC^7>gfYJg0fKuTD1FG`_0IJ{v6sl9s|Citc z6sqI{6{;fu0I=dAfYM_i36TLJfYPMSmnv5QDjKEH|CeB%0Qq1D0JP%+6siIMfKuTD z6skMUmu^=9A{sKz|CeA2K)JsH0BdRr091CKK=~jDfYOlBm#S9*9wrRZ|Ce{q|Ci(g z0ICB-fKq?}fKnm=0PsNo0FXi;BatCs0+Gkkm+n^q8Wte{0Pq4JBauTvfKq?}fKnlV zK9Rf9mmXLFAP^&wA%FmpV?uyZfB=9}uhN%JSOFS;z@d z14DpPLLeiNA%Fpq@c;i8A0PwO13`dNfB=9}A^-sJApijILLeiNAs{f3n9~23AOL_; z{d>cxzyn{_BSC;tfB=9}A^-sJApijILLeiNA>cicj?({^wG%-3AOL_;{d>cxzyn{_ zA0Pvl=U4$5QohmumnqNxm*oQhs^kL{Dk1;?@F4&I@WVlXQbHgjks*LTk$?bzQh3t; zmxKTR7yWyxsUre_()|Mfk@HglsZ#*}@by~)sT7a|k@W)rs$iyG52kz?R5k>VkM(g7lX(%=IWs`<_TmmdTGG2j6JRN^6kQsDy> zs^9|^s_W4Imvg2^`J%5tm-kr#A%A$$|Ci&=|Ci(g0IFk1fKq?}fKnm=0PsNo0FXi; zBatCM1d%h+|Cb;DfKu!K{}=QF04i(I|CiRz|Ci+h0IK8z6e@rKfKnm=0PsNo0FZ-6 zfKoyrBatCs1Cb@t|Cb;DfKuK6{}b} z0|2UFr$_k!06;k+06_VozX16H0D#iK3;;B<3jq0(c0~D84FI&YTLP&S0Dw}rV*{zS zqXCh*YXqrS0059V008h8DS*;i0058}g9VW}V?2?(F97)=000yMB7o8X0Dw~90~V?w z&i|L-0~IP$&i|L@0~o631AhQ2LjVAfCRysxQs| zm*4{xDlN|cm*fK&s^tR!Dnp|Mk>CRyssaLl(jfwX(tgeVm;3_&Djx&@F=6@tR0`4m zmvzqnm*oQhs^kM4Dk1;?@Ie3okYm4qQbHgjks-hZk$?bzQpC~!m$h3$skdW8skv)J zsk|>h`Lhc^`5*v*Qh$4=NBN?^K>1+0xc0m=af0JZcFK>7PCK>7W9*r>n*U)Hr- zLaDc7L#erIM5(h2K>55cK=}XwfKu`Q{}&%11J$)#LaDc7L#erIM5(+lK=~p70PsNo z0FZ;efKoyrBatBh1(ARNfKsB-|Ci$b{};0BR^0Dw}J(f^mcFF^UVTSBR~V?(LAYecEh|Nj>sAOqFp0{|)_ z008ho00599!GKaiAS00>APA9w0Dw}5(f^mVTSBR~V?(LAYecEMFF^Uj|Nj@YTLP)K zV*{zVYXqsY3l0GJye|OxEY1IyA0PwOm)KkZ96F;$fKq?}fKnm=0Py_-0Fgo z0FWUd36X5k|Cb;DfKvT?!KlCkU)J*e{}&%11JwWo0JH*}mo8laJ_d2k|CgekmvUVJ zA_iy8|Chg>mzrGxVHWW80|2UErbqb!06_Vn9|HLS0D#hzml0k86llOkdBT5y(g6T~ zQlN7Hd9~vJk)Z1UkwMM>mp}jjkmG-VQsM&@ssRFkQUCx7kss0jmp}plG$H^1@Ie3o zkV8~}QbHgjks%-rk$?bzQWMetm$h3$sdJ`B`Jo>|`CtM-`5*uQ$^e&Pxtpfm5AD|C$tN{R2wOc}|fCd1$ z>Hq&1A0PwOA^-sJApijIqfvlTLLeiNAs{f3U;uzp^3ea6;Q#*@z@d1$}^0gHwP~LLeiNA;1d~k?7F>mjHc$Qngz`selFmxzm?9UjZ9515~nM000h=x6uEWKmq_XA^-sJK>z@dV_bkzLLeiNA)pbFfB=9} ztI+?KwG%-3bEZf6p&vr|U;;q-AOHZ$0GE+o0TUkJ0~M;L&;OSn8~}2F{r^-#0f15g z0D#iq0~M;F%$IIp0UBB9|Nj>sAOqDR008hI008jAS%6YPAS00>ATW_&0Dw}8(Epd< z|Nj@{0{|)_008iS0Dw|K0058$eSlJ01%OY(EpdU6F~U@eSlJc3IMs& zmpNYn8#E(ZfKoyrBatCs4v})u|ChBBK=~j5fKq@80J+8g{};6r0Qnru|Cb*i1JytP z0JO3B{}-3wT>%>`@bm)!s$ix^`2hex`Jo>I`2hfc(v$)Kw6zle`5*uQ@L>ReQh^Qt zxd6aMm(O1T6*h6q|Cc}j0FdK2f~NBN;2 zLiu02_U0J-S@{}&%11J&dM04gE?0PuhSfKovK0FVWJfKmfvfKoyr zBatD16p<#-|ChBBK=}ZDfKq@C0J+-#{}&%11J&dM04gE?0PuhSfR{^O0SSMDVt`UY zAS00>027fI(EpdU6F~VO0Dw|}4gk5#|Nj@Y69Dj04)%>RFvKmY)c<9~os;sX_`fdYV1000(|`p^HDpa1|h-~$z^ z`_BKD9~=O3VFCbDA^-sJK>z@d<7R+TLLeiNAs`u%fB=9}>d*g|wOc}|bEZf6p&vr| zU;;q-AOHZ$fCT`x^HV{o{d+*E{{s<`zyn{_A0PwO0RVu~LIHqM-~)dZs^iQ5mjV92 z5+48oYmEW`R29geQXc>nYpehORJB_|sem2;x$6J_7at%4)#U>fD&zwIDk1;?@Ie3o zkRxV*QbHgjks-hrk$?bzQpnH$mz5p>xwTtDsonqo7v%#MD&zwIDk1;?@Ie3okfUXQ zQbHgjks)9fk$?bzQoMi9|Cg2?0J*hWLaEXJ{};7e0;y-q|CjxHy{Y{J0Fm?$0Qock zfKng?0JO6U0Qn&Rg3`4U0QvI+0IDDWfYK1l|CjXx0II+P0JH%Bf>P|v|Ce9~05#6c z|Ci(g04n1H6{;ct0PsR!8<7DZfYNi40Fe{`fKuQC6RKU!|Cbuz0~M;b%m0@^0RYhA z0~4wNAb`>Uz(@He%a`I|0VaRQ&i|J*0Dw|5%>S3<0|2U{aDY;P0Dw{=007V-008hp zAS00>02+~k&;OUR3qbkc0~9K?6F~X&4?y|-d&8)}17Fr3AOqF5&i|Jo%>S3=0|2Vz z0~0DD007V-008h~bAVDpAS00>z#EZ(0Dw|(&;OUR3qbj`6F~Xg|NobVVgU~vBatDX z8j%BWfKq?}fKp}8|Citc6{<77W9!l=8-{}*F)fKq?}fKnm=0MH=-0MJ4p zBatB>Fp)&hmpWqsDkdq<|CjUw04iVs_&1Wx|CjRv04hKN05#wP6e{ymL8<+F!l0IGndNBQ*w1FH7l0+%CX0XYJ*&6i$e0U9Dm%Kw)l0D@BC z0~4y?0~V^h%m0_)0su6>0s#5o0~o5z%9omC0U8bg%m0_N3qbh*0Dw}IrkBEG0V+Cc z%Kw+)0{|-E0~4xH%m0@j8~}2F1OQY4008jd0~4wtz&?>8&HtD70|2TZ{QtC}BLR`% z0~4yT%$Fu*0V02_%Kw+(0~4y+%Kw)j0Dw}n3qbkV|Nj^E&HtBS%m0_;0|2UHf`C$h z0Dw{=007Vd008hpAS00>KrfN8&i|Ky0Dw}>|Nj@|0~4y`0~V?x007V-008hpAS00> zfE|&8gMdQu;sXLI-~$1w{R04zhXjDRB>(`> zhMYk8BLD#KrC>afV_+VUK?DF1girvH#&~D=F3SIx9|Qm~fB*ng004ke{d>Wvzyn{_ zxXu5UBg_An;W-@IoLXks*K{k#o-fmmmOuQvG|usK5hX z)*m1P)%|;-sr>^0k@HglsZ#*}(EKj|`TPR_s$ix^`LrXKu4Vxi8UiYS(g6TK`JwLs z`2hfc(!WCh`QQT-s(Z?p-ev(N5`E48m%s%8G;^j$`QQT>Dx&X~7H0tsAOqDR z007V-008hKDS%Q!AS00>Kp&BS0Dw|B0Dw|2&i|L+|Nj>sAOqDR007V-008hqDS%Q! zAS00>;2)8I0Dw|B0Dw{@NzVV5)&Kt&A0PwOw;KTYqXPnw;sX_`;R6|}-~$}08O#5d z_X7YbU=9E^AO}FXqC-IWffGQv0007u9{>UCtqcHEApn36eY8mc4XFOlK{8>#^+fYRUt6so0Y zc=(se|Cb*G05PBg08{}afKuQC6sqFQ|Cb>B|1==Y|Cb*K0CB+l|5P2#|Cfl$|Ci+h z0IH%tK>6eY6sjQr0MKJDfKnk~IFWz=fKu4a|5KOS|Nj>v007V-008h~DS%Q!AS00> zfFO~80Dw|B0Dw}@&HtBw|Nj>sAOqE)cL2Eu&HtBj%Kw+;0|2U|cR;zrEr3#?H$eG- z0Dw}WKS24z&6hrD0VXDy$^Vz)0~RXb0~M;^101U1%>S3S8$kK^0{|*x|Nj>sAOqEa z1DA?v0WLaYD}Yi$AS00>Kp~NU0Dw}U&HtCT8$kI_|Nj>sAOqFl101R&$^Vyt135eamp*C%8WU{I|Cjay z04jhC_%>hx0I&eyEthL*0U<7{%>S1b%Kw+;0|2Vz0~M+U8GuqE007V-008h~pny_B zAS00>U?h=X0Dw|t&6m1r0U8n`o`6z9AS00>z#@@=0Dw|i&6nnC0VW<}pMX+e0Dw|6 z0Dw{=007V-008hpAS00>z$1}J&6g@`0UjWuoq$q+0Dw|60Dw{=007V-008hpAS00> zKp&Ag&HtB|@M-}V9>bl0Qh)$}QZoR6QX&8V&>;W-@IoLXks;t8kt@xYo@)UbAmg2Y zQh)$}QZoR6QX&8V&>;W-@IoLXks%-=ksZzdmzOYW0T`F?YXJx&^veI2_X7YbfCfOh z{{jGO-~$#aU2$HQ)mlDp1V-m$cVGskYxjsfYjn7Z}X{ zmxIZd-fRIDe`BJ6QbHgjks*L5k$?bzQqj!+m+$}o7at%4)%|;>sUiRX(ES4dk@Hgl zsZ#*}(DhpasrCZ^s$ix^`Sk+(IROAb`JvAM`2hfc(x4{+k>1Gv zmo@?bv=jhVZ7pem)fKuTD6{_F^7OEJ`|CjUw04f1AfKuQC7pj=d|CgWw05o6$0I&ey zEtltQ0WT0Eks+WckwZU#Qh)$}Qi;r$CT;;HA}`4Qm;HOcsr>^0k-!1~wBZ8)sv-aY z@BrWhkwV}pk@v`#Zf*e@LZhUBQh)$}QX&8V&>;W-@IoLXks$yok!sBUmmmOuQvG|u zsK5hX)+3~VQh)$}QX&8V&>;W-&_W<1ks%;3kzmZ1%5DJ~e`U!3mw*5OHT`?RsK5hX z)*m1P)#L*JDuYUZQh)$}QX&8V&>;W-@IoLXks-h;kxk71mmmOuQvG|usK5hX)*m1P z)uTy(Qh)$}QX&8V&>;W-&_W<1ks%;3kwVP>mmmOuQvG|usK5hX)_?#2v;g1)k(A2+ z7at%4)xZN^mt$`M8BWK`|Citc6RPtIK>7W9!l(ltfzkp1fKuTD1FC?>{}&%11J%F- zU)CQW1JytP0JIay{}&Fp+=&fKnjLm&$Je9x|lM|Ci(g z04e|gfKp@t05$!4!KlCkU)IB>fKq?}fKnm=0MH=-0PsQ}BatD1E0GM$ml|*ZDgqhE zmkxXZ8kb;j0R%?l6o68I0Dw{=007V-008hpAS00>;4G2z%m0@k0Dw}n3qbk(d%~!| z17FtT6M#~H0Dw{=007V-008hpAS00>KrE5y%a^Kf0V4q(m(Fkj7Jn_r|Cg0U0yqO8 zfYJg0fKuTD1FGNy6sjl3|Cb{G0I(te0FYzA2a!S`36TLHfYRUt6snBL|Citc6RLj5 z|Ce9^05#wP6RH!+|Chi705#wP6ROO}|Ce9`05!8yL8-M9K>7W9!>GUmU)CZ30MH=- z0PsQ}BatECEs+CxfCo~50Dw}s%a=}A0TzD)cz{xX0Dw{=006Kd006K;AS00>ATW`v z%m0@k0Dw~cd&8)}17FtQ0~4x~$p4oj006Kd007YAdVo?wAS00>ATN>V|Nj@@0~4x? z$p4oj006Kd007YAd4N(vAS00>fG&~X|Nj^Ld%UUT0|Bc20|1fp3jq030RXUJrbn0m zaRDs`^T+>}bEcOvase$m(Br3oQbHgjks%;4k$21gmmmOuQt$u&7k~hOQX&8Vups~d z(8H&IQbHgjks)9(k#5WXmmmOuQs$Szasd-ggQtK}LLeiNA;2$@WXu1TAOL_;-2eX< zA0PwOfB=9}A^-reApiidqo;sULLeiNAs{f3TFd{JAOL_;(*OS#2gm=HfB*or000V+ ztjU-9aseX^01A;|QVNj*2tc{i%9kp00V02ct$;4zUo z%m0@k0Dw}n3qbk(d%~!|17Fr-tAJ8~0Dw{=006Kd007WJAS00>KrE3i%m0@k0Dw~c zd%~!|17Fttd#b4dB7o8X0Dw~c0|1fpQvs<|0RXV`0|2Unrbqc?06_VquK@W30Dyne z_ZtBDw<7_O-~$(`Jjef+^9BI4VG2OGBLaZZU>ZQV0sw$gxE}!dcmfKMwOazIb&?K| zHUI$7)&et;0Sf@R;R6Gz022VY-~$z^V8;KK-~$z^BLD!<+qSupVkM(g7lX(%=IZ zs-(vMm-7PvDjx&@F<=D%RN^6kQsDy^s^9|{s+GzAmjDI;HMLtpskk3N`IM%YmUICw zf5WbTQbHgjks&}Ik>1MxmmmOuQt$u&7at%4)fUSCmx0Ism*oQhsv-aYups~d(BuOZ zDncM5ks)9+kpr`UQh)$}Qqao(mmmOuQn(*L`L$a@so($q7at%4)dtG{mvP7cm*oQh zsv-aYups~d(BuOXDuc6tQbHgjks)9-f06M2{}|Cb;DfKs?0K>4*>LaE6A{};7e0;#wk0Qq9Z|Cb*i1J(U|zN!5K0Fl51 z0JH-hfzq=J0Qunq1FG`_0IC82fKvR$|Cb{G0FVLz0PthL2a!S`36TLHfYM-+e*lqT z$p4q%0~4xK$N!f=0RT1N0~4wU%Kw)j1pqbR0~4yT$^Vx?0{}I%3qbk(d%~!|17FsF z0Dw{=006Kd007WKwSZDWAS00>fHski%Kw+M3qbiG0Dw~cd%~!|17Fr3AOqE-w1858 z0Dw{=006Kd006K;AS00>ATW`Ff6D)tAOL_;{d>Zwzyn{_-~$t?vC03JfB=9}gSLQD zA^-reApii-LLeiNA>cQWcFO;kvkO4^AOL_;=>Pv0-~$t?rOE%7Zwzyn{_A0PwO{d>Hr-~$1wm%w%b3j?rW zrkBok0Ur?n3X#7L1u3E%0Qtbi|Ce*7m;QDEEf&zDw}4VYAS00>U^$UQ%Kw)j0Dw~O zm%(xY6BWa^fKoyrBatCMACWrB|Cb;DfKug`!Eyl;B7?VpQbHgjks+Wskub{tmmmOu zQrrLk7v07Gmw*ERv;lONo_7H*5_ib|mtX=wIg|zf5w#)(kzsb1-gf~Oe=)}Ymw$Ey z`SS}v`TcvtsK5hX*1!W_)*m1P)%|^0k$?pNw6jwIsS^Nz(zO!+`Sk+;suTc# zQUCxzIRFFzG`1H2`2_#~6rch?Ig|zf5gz~)Yb^r+RHAl7`9c5_i*q6ckycOuk>CRY zs^J3`szL}rxr537mw^8NLNw+B04n7J6e{Ec7OLaCfKq?}fKnm=0I(qd0MJ4pBatB> zI+5(j|Cb;DfKszlL8-M{LaF_G!>GUmU)Hu4K=}ZdQCI;JQiHpIQh)$}QX&8Vups~d z&_W<1ks&}Ik=n`smmmOuQvG|wsK5hX)<6INw6*vD7yWy`sRO=%Qh)$}QX&8Vu>Au7 zkwPFNks$y8upuBYk<7`LqIdx#0V$WpcmW=NbIAXffOZ7=pa%f7;{y|_;R6$@0sw$g z3B~`H-~$w@XU6}Rzykm@fB=9}A^-reApii-!^415LLeiNAwWEltI7YDGXQ{6fObUr zU;_ZO!^QuXpLPWKv=2b}3jhEVA0PwOA^-reApii-gTjDPLLeiNAz(X^fB=9}oXP)} zV|f7~e}~5Zm%s!7HQ)mjs@TZ?m-7PvD!>H*HQ)miDzpzk`SS}v`TcvssK5hX)&s(T zQh)$}QZoR6QX&8Vups~d&_W<1ks&}Ik%7tom*4{vD)S3K`TcvssK5hX*0{+3moor> zQX5i3{}57?{}=sxys7;I0Fm4H}0Qm#}fS03s0V*kUMEO7m0JH-sfKoyLK)InC0QsS#0Fi(Iqno6o06C&B z0J)^a|Ce*7m-c!AEk4jg$AD5oAS00>U_X&4$^Vxi0Dw~O|Nj>sAOqEa0Dw{=006Kd z006Lq#(+{nAS00>ATW_0$^Vxi0Dw~Em%(xY6BVPzfKoyrBatCMK9Lm3|Cb;DfKuC+ z!Eyl;6~o4WQbHgjks;tdkqXKGmmmOuQqh-TdjS(7Zwzyn{_W6FS1fB=9}A^-reApii-LLeiNAwVpV*q4!W0TUP_ z007Wqpaqfr0|1fpQvs>;0|Ba20RXU0RVu~Ap(HXfRY4}X2k!O9|Qm~00;n7xFZ3P0wRFY0RVtf z-~$+{)5QOm<^vU~B>(`hBLD!<=K~iirC>af#F7A!W1t3+;vs<20V06X-~$+{RK)+6 zfAs?ZDjx&@F`xnfRN^6kQsDy_s^9|@szt~Dm%s)9HQ)mjs^Z4~mp}smG~fdis)NP< zmw*6(QbW&xQX&8Vups~d@IoLXks;s(k(9{)mmmOuQV#$C6dxc1)y&8Lmp8@#m*xWi zs^tR}Du4ihQX&8Vups~d@ZPX7pgwQ|Chf*0J#DwfYJc~fKuQC7^*qM|Ca+Efzkp1fKuTD1FGNy z7pk;J0J*j&0QqLa|Ca+PfYRXu1F8Z5fKuQC7^-2z|Citc7plL+|Cay;05#$R6{_I_ z7^>g{7pii@|Cb;L05u{2fKnQv0|AlX0~o5JLk2l)#Q&G{0{|-E0~o61#FyTG0U9Qw z(121xAS00>zzLCn0Dw~R$N!hL6F~Vi0Dw|+rbqeU0~9KvN0%Oe0UUoSk-!6A*5m^e zDk1;?ups~d@I%mmQbHgjks%-mk$?bzQZxX7QryS?m+b%l7a{-vups~d&_W<1ks*LK zkz>+;Qh)$}QZxX7Qq#x(m$ef>`L|<3sp0?s7at%4)gk}@ups~d@MF+`QbHgjks$yH zk$?bzQZxX7Qpd;tm$iQrK>61H{}&4As0Qms`fYP%E0JLES zK)E9VfYM+VK)C_{fKs;`0Qq;~1d+8{0;zR!36VAc0MOQg3Xy-|3IMs`0|Tny5&*g2 z0~M1prjFBLR^D zB7o8X0Dw~90~M;X!~d6Lk^qq<00599006M&0~V^KU_6myU>=d;A%M~WB7oB10~M+@ z!~d7_0{|)?1OR_AfCT_l;vs-i;R6+_-~$(`CdU7lz{DVFm!T^HV{o{d?A^zyn{_fB=9}A^-r8Apii7gVcahLLeiN zAs{f3ZpZ(ZAOL_;@c;i8A0PwOrpEu55ybzO{}&%11J(Ze z{}&%11J(U|zN!5K0Fgig0JO6U0QvI+0IKQ3|Cc}k05#wP6RP^f|Cb;I05#wP6RJ1F z|Cc}m05!77W9!l=LlU)F#CfKnm=0FWU70I&_i*nm<(AS00>;4P6Pml1XW7#P@q zQh)$}QX&8VkRbp7kU}6Mks%;3kr}j0Dw{=00597006K;AS00>fG&{$ml1XW7zqIYuwwu~ z`E16Q%7X#6xA24kUPgPU^0000n000000000-Oiw~V zOkYe-M_)`uRz*wzm!Y8n6A5W&XmlWDZgwD-G>ic|mw37X6PJUG0o}K_jsb81w`h$4 zPyrGpb2=qtIv`_mWnpw>Aar4KYnM@x0UDQ}iU9<-V2}al0k>z80Ve~O{*?g?x6+jX zwhkCuS3y!vNlr&yAVY6%W@%?2aA9<4JtcFOPNV@C4;#t=xB)1aPs0IOm(arjAeWHD0U(#~!vRp2aK!<0F=T9PAZB4{Y-MCF3IHW@ zIwfQ}AZ%}8WFTa6X?A6DAY^Q8AaiwNV`XJzE(!nu006h##sM$_4<&OTb9G~5Wo2Y8 z3IG5Am+Qy@ArmEYAZB4{Y-MC1W^!+BASH7y3YYK50UZT%bz@^?w-CtzR|U6l&H)Dj z0%v8HfzJUJ79eI}X>4U=EFg7rWgui_W?^+~bS?^)ozDT{mp9@84wuQ#0S*>sVQFk- zWGo4U=EFg7rWgui_W?^+~bS?@200000m#5GH*_W`< z0naOEWpp5IVR#^6aBv`IX>4U6b7^{IAZB4{Y-MCDAa!$PAY^4`VRdYDE((`A+yPz| zC37!fb7N(0WG`%QXD=mW00000005V++ySfvC37WnmmbvtC6`~=0S%X6jsXX^kKF;d z2A5~q0S}i1;Q=5SWMy_~V`X1yWIZKhEFf}ab9HQVJtbr=3b#Mx0n-DQ8t4JCe{=}| z04)Ik0I0kE|L`#Y09d{L{}>Sf006%J|D-Vh08qgG|BML$0D!>$|DXu~0KmZh|Fj7J z00_bU|J(@x01(0c|L_R_0C2(n{}>7Y0MNny|3nG^0N}y?|5yqD02spl|7;2X05HP- z|9}br06@b2|C9;<08qmI|ELN8e*lQW{{OrR005Z6{{PSl003CT{{LhP005ZA{{N&4 z0002U{{Pqu00212{{JWp000=v{{OHH008*R{{Q?800216{{J8i008jK{{K)7000=y z{{LhR003al{{M^(008*T{{P$!002PH{{JWr007|8{{L7G003Cj{{O%Ze*ger)c*g} z4gdi7)c*ev4*&oF)&Bn^4*&pI*8cy94*&o#*#7_M4*&qz*#7?*5C8xe+5Z1P5C8zU z-v0mG5C8xG-~Rvb5C8zE-~Rtd5dZ)H;Qs%75dZ+V<^KQV5dZ)P=KlW}5&!`B=KlX+ z5&!_`=l=hw5&!_$=>Grke-Z!y$m#z7I1>N>xa$7@fD-@!xaTNE847 zIPL!bToeER;O+kZh!g+-@a_KpoD={6DDM9Mz!U%g0Pp_){1gBH2=D&?5ETFb5byr~ zBozPv81Mf7I28Z@SnvM-TonKSVDJ9_a1{UmVDSF`#1#Ml*zx}Ve>fHZ0C4jD|6~>b z0EqJb|9BPv0HE^z|BMy@008v<|L7I~008#>|0ov#00{T~|8y4s0KoVD|DYED0MPgT z|FjnX0ND5b|Hu~r04VtW|L_+80GRmx{}>nm0Fe6r|F{?c0Lc9Q|6CaW0I2=`|BM*` z06+l$|2P@|09XM3fB#Gx003|R|Nmeb005`~|Nq1q002+||NrP3003YD|Ns0N00004 z|Nl%I005W;|Nqb%001Bc|NrD0005{6|NmSZ007tv|Nr0|006iQ|NkT%005{B|Nmqi z004Ln|Nn>`006)a|Np!l007ty|Nr0}000mW|NlT9001x$fB*kf9smGX5dZ&Z9smG< z5dZ&t9smHa5dZ(A9smHa5&!?}9smGf694}s9{>Qj694~D9{>Pg6aW8w9{>Pg761Pb zAOHa17XSZ%AOHY38UOzPApig<8vp-5ApihS8vp-PApih)8vp-jApiiN8vp-%Apiid z8vp;0Apih4e;fb**dYJ_z#ISnAR+(&=o|n4G$H^15FG#iNFo3LKpg-7U?Knj@ErgD zlp+8CNFD$G#3BFypdbJL>>>aFcp(4(NFx9Mup$5dkRt#9pd$bO$Rhv%I3xf6^dkTO z*dzb{AS3_)P$d8VU?czl=p_IDpd2s{}d$v0B|V(|BxjB0H7-W z{}d(w0Qf5Z|2!rD0FW#H|7<1z0H7=X|9~a{0I)0n|CA;G0KhB%|EMMa01z(!|0E{> z0B|t>|BNR904Op4|IjA@0H`tl{{$!i0I)It{}?C$0Jt&#|1c;306;ST|70it0GKlW z|CA^Ie*mB}|NrPH002-l|NkT@007uD|Nl@a000Oz|NnF;001yG|NodN003Au|Np!x z004kB|Nr1A004+J|NrzU006Ky|Nks1003Aw|Nm$z003Y(|Np!y003Y)|Njgt003w? z|NkH>003|~|Nk^A0059V|NlrU00782|Nm?&e*gg3IsgBFD*yoCIsgBZD*yoKIsgBt zD*yoSIsgB>D*yoaIsgCAD*ymMI{*LlD*yn9I{*I|EC2woI{*JXEC2xLI{*J*EC2u? zJOBTLEC2vVJOBTvEC2v-JOBU8EC2v_JOBUSEC2woJOBR#EdT((JOBR}EdT)6JOBSI ze=Pt2=sW-aKrH|O7(D;~WGw&yh&=!Qlq~=NpgjNos4V~hxIF*=ye$9#_&opr;4J_E zAU*&81TFvoFg^eO7%l(+P(A>gf0L8;64BUs4f5i5I+C^ z%q{=`7(W01;4T0FSU&&%1TO#pm_Gmie<&{i0H8kq|2!`M0H{9y|4=Ug0I)v)|70%! z0PsHl|A;RD0Qf%t|C}!X01!X_|G+N*060JY|Ku+K0PsKm{}3<$03bmB|1>ZF04PBJ z|41+Z060MZ|6nix0C+(E|Aa6A0N6qQ|MV~b0N6zT|6DQv0N_Rc|A;aG0N_UdfB&>H z004+b|NrnZ0059j|NjIt004kU|NlfY000l006L7|No#i z001CY|NqD~002l=|Nr1N0078WfB*joHvj+-SpWYlHvj-wSpWY}Hvj;jSpWZYHvjt+hIRF4ie_a3nNI3ui zkX--&Y&ie`*j)erkU0PV5MKZP2s!`&7+?SYcsc+8P+$N5pgI5ms9^v9*g60J0Am0D z5IX<>SYrSGG&=wQFl7J#d^-RDcxV6ra6AA2fNB5#{5${vaBBbmFg*YO&};wyU_Af; z@NNJ9;5`5UP;dYLI6eRXf3R`?|HM8300?vc|NK4x0HAgM|Ij}G0PuDH|MWiq0C;x) z|0F;F0I+ud|2RMZ06=*E|D-?w08n`U|F}Q^0N{E5{}e$00PuPL|0qEK07!fP|A0XN z0I+=j|L8#g03d$<|42dr0EmA7|9nCK06>5L|HMK707!xV|0F{Ie*n;d|Nl@!0020J z|Npo{002;k|No3c005AQ|NqoP000<@|NjU@005AR|NlHi005|p|Nl@$008KU|NnSJ z000n+|No3d003}{|Npo}008)l|Nrh%ENB{t6l>h%o zNB{shmjC~}NB{tsm;e9dNB{t+m;e9xNB{shnE(GcNdN#4n*aZFNdN$-n*aZ>NdN$# zod5qUN&oN&o<$pa1^=0ZW(K@&O_$yte-TEC2ui z000005C8xGEC2ui*tY)vJOBUy00000Bme*aKmY&$1he=q<505Ado0PMQ{|7ZaK002P_FhQLJ zo&!M)9UwuI0#FLw-qQ>p0746H3f`3&1e61v2A&04-QLsG zPYfRb004vl006WC000QR{{P?s0000%4xj-+oerK2K@1&mL6ZzZl?s##LYW4b2SS?! zngv3a2$l&(oC9vv)IrzQLD|?r+uA|gMBU!gmp~x`6*HOzn*^K#oerK2YSh)%*Vx(G z+uTBw3zZ6%36}_%2bl(%1)Bt%0{{R>0000y0{{SEzL$3P0Y)l_0{{T@zyAL`00000 zK@Jd|0-gaw4IN6}-3%W90000i000220{{RJz?UBP0V*2Y0{{S+!2bUv000000000a z0002+0{{T5z?Wk80U`z%1ONccz?X{m0ZImF1ONaO!I#?i0Y(P21ONaK!j~ra0VaQB z0002w1ONbl!~Xwx0ssI2K@PA9L!FiZo|OSX4IM#~lmV2J0Y{aR0hW;gn2!N^3fU000000000u000L7U?;Kly`1ONa400000 zU;qFBumu1B)W!b)RF}>80a6c6)YAX}TmS$76b1kQ$i|m0`2ki6EC2uibOrzbK**OC z`T;6`qy_*0xXAwhJOBUy06`8Qodli(K@A;2lLAZL-O~&o00000m;e9(%mx4e(8&J( zpalQ`06`AW0YRM>o)tk19k2mIm=Br_oC`#k5Ji*|lN3gl5tR}|nGQvp3<5&b(?QnN zLDxaqLEA>%-rU;Q3?BeO3>~lmlN6K_l@gYhzxn|YQ3BA@)YaD6+d|htlN6K_l@gW_ zmk^l_n+#Xe)YaD4+1o;s6qFN{5|$B{5Sb2}3=jYS0B8pQ0MyO?|40A;0000003-ka z0DK1k00_?h|EQNe`vDt0EC2uiqz3>1IIRBvr~m)}06`8Qodli(K@A;2lLA=^-rds- z9{@oMM&8}i3?BdhFaQ7m)CT|nSkC_cRF`4@0TY+N`vDjU2nYZGe9r#=w3p%g0WLao z3f+Sl0C*3;As9{@oM0-u|*0e0I(o3a5++eMqQ0cP7vo3a5x+W-InFaQ7ma0mbZ z%+vn=P?ur<0TY+N`vD7=XZ!&T2BZi80O-?~di()G2J8p`0Fcy|di()G1~dr(065i` zdi()G27Czs0N~Y^di()G2E+*f0EpI?n*9Mn3JeMW05I47{{R7(e*6Izm4E^;3UB}b z0ALCL0NmG=x&msKp#A{_f7A*90N~jE{{#R40000004M+e0O$$;0Myw2|D*r_002Y| z5S;>^0YMENN#5NI9{@-U9T1%Yo&f*=01yBG02B)V08rWf{}2EG0000003ZMW04NIp z08H8b|1^0YMENO5WWJ9{>OV05|{u089%20AShvm$&}`HA09B008{i z{{I9300000000mG005i|008jW{{I9300000000mG006KH0089K{{I9300000001BW z006`b007w8l`aAv9_R}I0QA}Z{{#R40000008jt`0Q?I80O;BN{{R7(e*6Izm4E^; z3UB}b08k770F>I5x&mrmC;$Ke#0&rcnA`sUm;e9(06`8wod})>K@A;2lLnLpSPI_V z)6@(f06_}?5C8xG@C*O|1l$1=SnQ|%{}=!O0000001yBG00a#H00`Xv{}ccK00000 z09XJ302mDb01Vuhh5-Ur26PPo01(}m;sF9W3A7CW0MOq4{|J|X69N{OI|2eN1^^BK z0F2+4ZUO=(E-VfJ02tu@{|o>C0000001yBG06-1^02JW<{{#R40000003-ka08|bD z00`ih!U6&!27C?x05IT}>H-2H2BZ!E0C?b+5(5I721E}40MOx=vI7F12Ivm}0A%Br zRs;en1{e?k02Jhxh6DnpF2oQ3008Fx{}ccK0000001yBG0N4-!00id#{}=!O00000 z0AK(B0PGL|00`!nS_J}L22c?I0Nm!6z6Amv8gvl=0AT0-{|o>C00000089V?0EiI) z09@yn_5}h;2G|h*0IcYjMg{_Y8e9?p0Kn<~|A+tp000000CWHV0B{lj05s~C-Ub44 z2Ivw107UGURtExlPCx(v0BjQg0Pybq|KI=s002P_Fr5aT1w;)UL6ZcO14WerQwrYQ z)zj1r9{@rNNebTG)zj1r9{@oMP2Szr)6@(f06YKy0I(AP0F3XK5(ol1UhoqD06g&i z|Ih#c003tW5S;>^0YnWQNZ#EH9{>OV05AXm02CAe0O;`k|40A;002P_5S;>^0YVKO zMGD^C3?BeO3qcCr-3%W9LkmIP-3%W903ZMW07w)70Q~Tmst5ua3S0mH0BjTh0Pyga z(g*@tLd+BZ0LbzF{~!PW0000001yBG0N@k=0MzmR|0Dna0000001yBG0Q3|90O;}l z|0Dna0000003-ka0000i=vmI(qv26z--U$LS2Cx+X z05tQL7779;2HX_@0ATZ%LJ9&V1_%}a0NnGJZVCd92AmcE07&(h0t*6a1`HPf09f{y zati`_28C0000007L))02~+q z02ulH|AYVl002P_Ae{uB13?WPL6ZVTlmSW#)I#3f(+nQ~K?_3Ee?#8g(+nQ~K@1%r zlLDOto&!YQ-O~&o002Y)003kd003C|{{Msk0000%4j`Qbo&!M*9YK=tU0wM-@82|vR{FkN;0#*jp82|t% z{g(m`0zw8H8UO$o{+CJ)0x||v8UO&${+EId0wxB48UO%1|Cg=~0wN7O000228UO&W z|Nj4Um)#Bm8wf|<-O~&o07aKD4+0bh>>2<7=>L}r4+1&|EE@m-Kmh-jOAi7$2W%Sv z04M?fmx~VqEeD_*0049W|ChfH0(A!*8~^~=0{@pg5CVM%pd0`IgarSWzYqc;3CJ7( z0JH@E|CpCi4FViclmSrG0000G0002^8~^|u1^@pH000000000G0000C9RL6r1^@pH z000000000G0000W9RL6n1^@pH000000001Z0000q9RL6j1^@q0mm&258JBJm0$K;K z9RL922LG4O5dvWc7#;us+z9`dHxdFp30xik08k14|9qEW{{a)1kP-qIGmstt0GtW` z{|Ep80000002BZK0HhuO0FVj){{#R4000000000005AXm0K6Um0DuYqm+KhRS3ICT*8UjrQbRPf!unL!=8Ui*3upa;bBny|`8Ui80E`R&mwyuiF$uUJ004Xp|No?yaRLGp zm&_9aAP4jy008I=|CjR<0!;@~cFOb-A51ONa4000005C8xG ztRVmZKo0-^1ONa4000005C8xGz##wtG!Fm&AOHXW00000AOHXW)FA)>Ko0-^H~;_u z07VWEodTW#Lk%56-rWoz000005C8xG_#prQR1W|D3;+NC000005C8xG3?cvkP!9k9 z1ONa`0000001yBG03ad&07MS|{{#R40000001yBG05l>105}f+{{#R40000001yBG z07xPL04xsw{{#R40000003ZMW09+yf03Z(kms=GA9S49S002Y||CfIi0v!jWA^-qr z4*!?`5&|6u$RYp$Y!3gI>lFeW2kasM0Dunvm+KV*9S0C2005K@|Cj#~0v!i5BLD!H z4*!?`5&{{QU=soa8(1R%0Gtm0{|Ep80000003ZMW0Bj=w0FVy<<6`7ytkO000005C8xGG$a53 z@DBg~7ytkO00000AOHXWNF)FN^bY@*M;8Je2W%t&01yxVm;Vw18JA!a0t6zEBme*u z5C8uV000000000W0001_Bme*q5C8uh00000L6^=K0u~3zBme*m5C4~I76KWU;1>b} z2kayO03Z+lm+Kb-9S0C4001lx|Cj#~0v$gzB>(_05C8u#00000MGg?10-gau4IM+? z-3%W90000G00013B>(_C5C8uJ000000000W0001NB>(_05C4~s7y=oW;1vP{2aqKI z089`6myZ|%9YeGw003wY|NjU800000000mG0077(003MM|NjU800000000mG007)2 z002-A|NjI400000001BW008hM002Y}|CjF=0v#I^CIA3n5C8uJ000000000W0000e zCIA3b5C4}Z83G*#OeO#TNDu#){}KWj3n%~p0B|M%089`6mv0#YB?p`)007tz|CgH? z0v&(ACIA5B5C8uJ000000000K0002gCIA4~5C8uV00000000000000W0002&CIA4? z5C8u_00000L=F(00-gax4IM?^-3%W90001N0000GCjbEK5C8vM0RR91K@LEj2%ZN) z4IM$11eOCsmjXtV1(OC>)YC-X-PhID3?G+rFai^QO$yZ0Lf+li)z%Cj06_~-)YC%V z-PhID3?BeN3>`p~1eODr0-Xq+2LP-9004j|007(&|Nj630000%4xj;@6rK}74IM$2 z50?%?lM<8>QkV@!nhTi>LYxVk3PhC zmJgR{4wwy@44Mm@3Y-a@6rK}S)!5nE+uTZ(5SR^_44Mm@3Y-Z^+1Nqb+CkMp+(MWQ zN!US^5SR^_44Mm@3Y-Z-)j`?VLfhIw+(MZQm<>Uj3YrT+l@LLk2><{{0000mC;$Ld zjQ;;T0+&t>0vmrp3ul!CPSscnl>|r&LJE}xNDDy|r&LJE}xNDD!g1OR9N004X_ z006Lw{{OH60000%4se|go((|_9YK@}NR|mt3f|q;)C?a0K?^~c2bTy!lMF(d1(^mz zn*>Ca3SJ7>*FoCZLDNCiLEAwKQq$Gf*xB0KOPB|j2tj|E1(^mxlMF$X3PGC$0000e z0002!C;$L-694}M0RR91K@L!z3Z4l;4IM#~2$Tn03f|q*)C?a0K??vV0000KDF6Ua z6aWAG00000K@KpT2A%~%4IM#~1e60@3f|q*)C?a0LJI&m0000$DF6T@6#xII00000 zLk=*V2A-E29s(gE000005C8xGd?^3`kQD#_1ONa400000JOBUykSPEFgcO$=9|AfC z&?x`_G!>Ux9|AfC5Gnuwj1`xf9|9@{Kq>$LOcs~Q9|AfCfGPk0q!yP7AObE1v?>4q z_!gHwAOa={;3@zB)EEE%e3yX}0uz^jAOa)_AS(a>6dC{jz?X3X0ux163f|q*)C?a0 zK??vV0000?D*ynf8UO#B00000K@KpT2A%~%4IM#~1e60+3f|q*)C?a0K??x10001Z zD*yl-8vp;0mjxjL9|T5}3saXUAp$K5ngyE#oCBQ>o(-2^Ap%ei0Qdj^05B{70Q4OH z{}7kGAp#pBnhi~q6O$A~oC})_L75H$RM|n)(?Q(ZNZ3KzO5WYq)z+7=`T-y&P}9`e z+uTZ%6qFO04x0>|3qcCh(?Q(ZLD@kIO4HQY+uTXmLE4u^0YnWQM&8{F9{>OV0CWHV030v?06-)E z|1<#r002b}P@M{%2|*1VL6-!W145GsLzM{FD2$Tnv29^bv1egPv0-6Dx3Z4l_3?Bf~KGfCL*Vx(G-QGeB9Z-`9l?Imt zm;;>(o(XIK005LQ0000a|NjU90000%4ltbto&`e<9YK@>LX!kSl>$+hEiM8W7E04W z)k5Cg)C?a0K@1%*lmndxo&`jgF)jiX8q-150000003-ka00c1r0GK5I|1@1}rfE0H`FFDklOQ97mOe0b>f?-qq983?BeO3jhE#0001FF#rI_C6_8E0vsGi zm4pFf3f4lo3mo4?~y@mkvyo5JH*@N!Hau*w;bY zN8a7j)C?a0LYJ^80=NeJG5`PoE0=C50)_^NGXMa5ESLHz0xkx;GXMZMEtf7T0wxW7 z0002&GXMZ+F8}`=mtiUb7k@^U2bBm%nFN>xaM{>I*407NL6ZuV2$lz!1(^gv+1N+Z zLDtnt-rd*K3?BeO3>{#T3X}000Ou|NmeC0000%4ltbto&`Y-9YK@>Mhf2D)ISU#06_~ulLSJQ z0$d8yLDfRu-P8;p0745w3e!T>L*Cug3?BeN3t0-&LDfPFLeoLj0001p0002QGynh) zF_$(g0)_@qH2?tQGMBmE0-#mDVZpmCz~dmC7m#yFG1GTLD<(p+Sx(VLEJ-=E|n~nE0=#Nm?@bknkSqk zLDtnl*w;bY*+JY$l`NJkmnxVknJAhkoFz>P+(FjWLD<(p+Sx)2N!Hat*w;bY*+JYv z(?OOil`KJ+DVHiinkShkL6a^)oFxDNAOHXWTs8mz$Tk1}OaK4?07niGodTW#L=7EA z-rWoz00000EC2uifHnjG0Q5DNwk!fF9k4b408BRj|12wGBE-!8pt~U08B;y{}ccK0000003-ka0NgtO08mAjdNBeb8U#E50BA-3|0Dna z0000004M+e02n*~0DMK4wlM-G21Gmn0I)@u;xPg`2828S0Qg0hA~FI}2HZRV0MJI4 zdNKkc9Rxi90I)~@{}ccK0000006YKy02n<00JKN{|CpEkDFPgq)iMGb23S1+01QZ% z_A&xi2DCi@0GLUaQZoWh1_(X?0CY;1rZWN}1~5JV04z(F&NBige@p-X0AM}<0CY?L z|KI=s002P_Fr5aT1wjoRLX-oO1VWVpMV0|t3e?j<*407Y-3%W9K?_0FLe)Xk(?Q`3?2A%~;-rWoz03ZMW0I)s)00d0`|2O~u002V{5S;>^0YnWQM&8{F9{>OV z0B`^R0MtGJ02oaFmq#=LZ3aj`004ANm%20pA{uZ%0068`|NjgC00000002Ay004wP z005{?m-aLQItIi)008(-mo_y5HU0000%4v-Z? zozeoH(E>pY9YK@O0+i1JLYB<}NR`e4MVQM1m&^ia*w;bTLY2+}NYzUU-QL#I)C?a0 zK?_Qi&H|Ur0+`DJN!3Bv*Fu%f0!h_DmCgc}%mSFp0!G!>*g=@f0+-AJL6yz|002|~ z006i^05Je;Qvd(p0RR91K@Q*ooh+U!K@A;2lPZ)cOqV7?mM4`cY}VC5*GCH8-P6000001^@s6 z00000&00000(0Tv>00000(0Tv>00000 z(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv> z00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>004gg z0ML2>000000ML2>000000ML2>000000ML2>000000ML2>000000ML2>000000ML2> z000000ML2>000000ML2>000000ML2>000000ML2>000000ML2>000000ML2>00000 z0ML2>000000ML2>000000ML2>004gg007W>00000007W>00000007W>00000007W> z00000007W>00000007W>00000007W>00000007W>00000007W>00000007W>00000 z007W>00000007W>00000007W>00000007W>00000007W>00000007W>004gg0002c zdH?_b0002cdH?_b0002cdH?_b0002cdH?_b0002cdH?_b0002cdH?_b0002cdH?_b z0002cdH?_b0002cdH?_b0002cdH?_b0002cdH?_b0002cdH?_b0002cdH?_b0002c zdH?_b0002cdH?_b0002cdH{a_00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv> z00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>00000 z(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>00000(0YFW000000ML2> z000000ML2>000000ML2>000000ML2>000000ML2>000000ML2>000000ML2>00000 z0ML2>000000ML2>000000ML2>000000ML2>000000ML2>000000ML2>000000ML2> z000000ML2>000000MLJW00000007W>00000007W>00000007W>00000007W>00000 z007W>00000007W>00000007W>00000007W>00000007W>00000007W>00000007W> z00000007W>00000007W>00000007W>00000007W>00000004i`dH?_b0002cdH?_b z0002cdH?_b0002cdH?_b0002cdH?_b0002cdH?_b0002cdH?_b0002cdH?_b0002c zdH?_b0002cdH?_b0002cdH?_b0002cdH?_b0002cdH?_b0002cdH?_b0002cdH?_b z0002cdH?_b004gg(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>00000 z(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv> z00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>004gg0ML2>000000ML2>00000 z0ML2>000000ML2>000000ML2>000000ML2>000000ML2>000000ML2>000000ML2> z000000ML2>000000ML2>000000ML2>000000ML2>000000ML2>000000ML2>00000 z0ML2>004gg007W>00000007W>00000007W>00000007W>00000007W>00000007W> z00000007W>00000007W>00000007W>00000007W>00000007W>00000007W>00000 z007W>00000007W>00000007W>00000007W>004gg0002cdH?_b0002cdH?_b0002c zdH?_b0002cdH?_b0002cdH?_b0002cdH?_b0002cdH?_b0002cdH?_b0002cdH?_b z0002cdH?_b0002cdH?_b0002cdH?_b0002cdH?_b0002cdH?_b0002cdH?_b0002c zdH{a_00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv> z00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>00000 z(0Tv>00000(0Tv>00000(0Tv>00000(0YFW000000ML2>000000ML2>000000ML2> z000000ML2>000000ML2>000000ML2>000000ML2>000000ML2>000000ML2>00000 z0ML2>000000ML2>000000ML2>000000ML2>000000ML2>000000ML2>000000MLJW z00000007W>00000007W>00000007W>00000007W>00000007W>00000007W>00000 z007W>00000007W>00000007W>00000007W>00000007W>00000007W>00000007W> z00000007W>00000007W>00000004i`dH?_b0002cdH?_b0002cdH?_b0002cdH?_b z0002cdH?_b0002cdH?_b0002cdH?_b0002cdH?_b0002cdH?_b0002cdH?_b0002c zdH?_b0002cdH?_b0002cdH?_b0002cdH?_b0002cdH?_b0002cdH?_b004gg(0Tv> z00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>00000 z(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv>00000(0Tv> z00000(0Tv>00000(0Tv>004gg0ML2>000000ML2>000000ML2>000000ML2>00000 z0ML2>000000ML2>000000ML2>000000ML2>000000ML2>000000ML2>000000ML2> z000000ML2>000000ML2>000000ML2>000000ML2>000000ML2>004gg007W>00000 z007W>00000007W>00000007W>00000007W>00000007W>00000007W>00000007W> z00000007W>00000007W>00000007W>00000007W>00000007W>00000007W>00000 z007W>00000007W>0012T0002cdH?_b0002cdH?_b0002cdH?_b0001pml6L0>6fr+ z0|tNp|Ns93000010000300030|Ns900001hlmP$$00000000000001plmP$$00001 z000010001xlmP$$00002000020001(lmP$$00003000030001>lmP$$0000400004 z0001}lmP$$000050000500026lmP$$005PT1DSuglmP$$0000G0000G0002MlmP$$ z00000000050002UlmP$$00001000050002clmP$$00002000050002klmP$$00003 z000050002slmP$$00004000050002!lmP$$00005000050002^lmP$$0000600005 z004gg0F?m%000002LJ#71poj52$cZ<000002mk;81poj55S0M{000002><{91poj5 z7?lA4000003IG5A1poj5Ae8|C000003jhEB1poj5FqHuS000003;+NC1poj5K$QUi z000004FCWD1poj5P?Z4y000004gdfE1pt2l09cg)0000001p5F00jU50BDr~00000 z01yBG00jU50C<%F0000001*HH00jU50DzSN0000001^NI00jU50Fadd00000022TJ z00jU50GO2l0000002BZK00jU50HBot0000002KfL00jU50H~D#0000002TlM00n;l z006L+0RR91000*N000F5006j^0RR91000;O000F5007990RR91000>P000F5007XH z0RR91000^Q000F5007{X0RR91000{R000F5008Kf0RR91000~S000F5008in0RR91 z0012T000F5008)v0RR910015U004gl00008mH_|&0000V000050000OmH_|&0000W z000050000WmH_|&0000X000050000emH_|&0000Y000050000mmH_|&0000Z00005 z0000umH_|&0000a000050000$mH_|&0000b000050000;mH_|&0000c004gg1poj5 zP?iAz00000CjbBd1poj5Se5|*00000C;$Ke1poj5V3q*@00000DF6Tf1poj5XqEv0 z00000DgXcg1poj5aFzi800000D*ylh1poj5c$NVG00000EC2ui1poj5fR+IO00000 zEdT%j1poj5h?W5W00000E&zW300jU50Fage00000051Rl00jU50GO5m0000005AXm z00jU50H~G$0000005Jdn00jU50I-$;0000005Sjo00jU50JxR`0000005bpp00jU5 z0Kk?30000005kvq00jU50LYdB0000005t#r00jU50MM2J0000005*RB000F5007vQ z0RR91001`t000F5007{Y0RR91001}u000F5008io0RR910021v000F5008)w0RR91 z0024w000F50001&0RR910027x000F5000P=0RR91002Ay000F5000000000000960 z|NsC0|NrQ)0RR91004gg0000000000u>k-800000000000000Ou>k-80000000000 z0000mu>k-800000000000000;u>k-800000000000001Bu>k-800000000000001Z zu>k-800000000000001xu>k-800000000000001>u>k-8004gg0000000000u(1IE z000000000000000z_9@U000000000000000*s%cs000000000000000@UZ~^00000 z00000000005V8RP000000000000000D6#( z000000K|F#0GB`p0~iVna{&MV0000KbCUrE6O(WO6PG*(12jp{3jhEB0000000000 z000000000300aOSGXMYp0000000000000000000300jVaO#lD@000000000000000 z0000300scSm!Ss(6@Lc+Kv4hy000000000000000000000{{pBm|Oq=0000000000 z00000000000{{sCuzCOh000000000000000000000{{vD(0Tv>000000000000000 z000000{{yE0FM9w000000000000000000000{{#F1akoZ09*h7000000000000000 z00RIG02p%t0000000000000000000000RIH09exj0000000000000000000000RII z0MOh40000000000000000000000RIJ0AQC9{{j;Q0{{^Kc$X3X0uu!T01^O*ml6L0 z69oeR69D*^@dyK70+1}1QV9btN|ciT000000000000000073u&00ICD0F;ve00000 z02BZK0000006zc#000090I+%h00000000000000006zc#0000C00eWF$_WE5DZF|B z00000000000000006zc#0000C01R^h0000000000000000F9Sn2?HO0kP`s_00000 z0000000000n*aa+0sspD&=Uaw00000Tmb+800000KL7v#000XBSdjn#0000000000 z00000ssI20000mGaPk8H000000000000000KL7v#000XB0FM9w000000000000000 zssI20000pHfbs(X0001g00000000000IC200000D03f6R0000000000000000IC20 z0000L0C*Y%0000000000000000IdK30096M0C*Y%000000PqL^000000K5PI0096M z0B9ow0000000;m8000000LcIV0096M0B|D&00000000O80000p00620000004FEu- z0RR91000000000000620000004*<9W0ssI20000000000007pPVF?2uP?P`w00000 z0000000000KL7v#000XBu#f-%000000000000000ssI20000mGXz~L90000000000 z00000-v9sr0sspDShD~C00000w3k^710NX-0RR9202BZS00aO400004000000000O zmyrwu7a1-A000316aWwa1ONa4000C400000002Rk!3+a00acgr3I0RR;M zXa)oT000001ONa400000c>w?b0RR;MYz71X000001ONa400000iI*V_0~ddu0RR92 z02Kgu1_S^A0000G00000000210RR9202Kg;1_S^A0001h000000002J0RR9202KfT z2Lu2B00004000000002e0RR9202KfX2Lu2B00004000000002z0RR9202Kfb2Lu2B z0000$000000002>0RR9202NvQPzMA6000001ONa400000{{a910RR;MSO)|E00000 z2mk;80000069NDL0RR;MU=A3-q10000000000000000062000000 z4FKqa0RR91000000000000620000004*-zq0RR910000000000008xup$-EdVOYrk z0000000000000000IC200000D0MLa20000000000000000IC200000L0C)!k00000 z000000000000sj90096M0C)!k0000005AXm0000001^WL0096M0H~K?4g(h{(1`&6 z000000000000000ssI20000jFfa(DN000000000000000A(!zF10NbN;s5{u00000 z00000000200000001W_0lb1>n11>?x7X$zR0000000000000200000001W_8lK}t# z0000000000000200000001p5-@c{q;00000000000000jm+=n+9|?f%000000001& z`4Iyj0Y8@+5(6J8c$5JE000000000000000ssI20000jFnDhYv000000000000000 zOPBEv10OS(?*IS*0000000000000180{{R501E(^?*IS*00026000000002000000 z01W^TmjRb!5d$tLn*jg-0000000000000200000001p7T^#K3?00000000000001a zm+=n+A3+cW0RR91000000000000620000004FE8s0RR9100000000000062000000 z4*-b!0RR910000000000004!T@eczZK`;mb0000000000000000G9&*00ICD0N@P) z0000004M+e000000IC200000F0I2){0000000000000000Irwu4+9@T;2Hq{00000 z0000000000ssI20000dD0IC51000000000000000ssI20000jFNCpA`0000000000 z00000xR>z{10PukDggih0000000000000200000002KgmD+B-l000000000000020 z0000001W`JuK@r60000000000000200000001p882?78B00000000000002Sm+=n+ z9|@o;0RR910001&DHQ`BF{%In0000D04T5l0000000000000000IC200000F05}Q) z0000000000000000MnQ84+9@TFfaiC000000000000000ssI20000dDc(MTi00000 z0000000000ssI20000jF=nMh?000000000000000;+OFc10R2IG64Vp0000000000 z0002+0{{R501E)9G64Vp000130RR910000B1ONa601E&BGywnr000130RR910000e z1ONa601E&JMF9W+0000~0RR910000`1ONa601E(MMgaf-0001#0ssI20001W1ONa6 z01E)$_9004gg6aWAK00000o&*2@0sspDkZS<|00000)Bykh00000ssI20000dD zV6y=L000000000000000ssI20000#Lcq;?|000000000000000yaWIM0RR;Mcq;?| z000001ONa400000%>)1d0RR;Md@BS1000001ONa4004gg0Nn%t0096M0Dvn500000 z00aO4000000PF++0096M0E8<90000000aO4000000R0320096M0EjCD0000000aO4 z000000IC200000D0I;|L0000000000000000IC200000F0MHEr000000000000000 z01gEJ00fuq3j-S|_;LXN000000000000000ssI20000jFSRMiZ000000000000000 zP?rH110Nb}9s&RW00000000000000W1poj601p6c9+x^8115hi1poj501g0H(*Xbg z00000000000000%1poj602BZK00aO400000000000000^1poj701N;Ga{&MV00000 z000000000~1poj700{uFdH?_b0000000000000151poj60P+8b@&f<>0000000000 z0001E1poj602F@!Fa`tw000000000000000aRmSX0sspD_;LXN000003;+NC00000 zZv_AV0sspD_;LXN000003;+NC00000fCT^m0RZv;82000000000000000mIVL+5&#PTzzhKZ000XB00aO4000000HTu-4;KNq zlMxRW0m_pR4;M+?1pojN01E(+oB#j-0002s0RR910002;1pojN01E&xFaZDn0000q z0RR91000011^@sO01E&RvH$=80000;000000000Pmw_1r7k?}U000sI3jm-h0RR91 z004Xd00000002V<000sI3jkob00000008g+00000003JC000sI3jp|W0RR91008^| z00000003+T000sI3joLt0RR91000O800000004Ie000sI3jpZ;00000000aC00000 z004*v000sI3laeEl>h($0002M0RR9100020mw_1r7fHJY000sI3jlZy0RR91001Ze z00000006}X000pH6aW|i1ONa4000C400000007bk000sI3jjz>0RR91002}000000 z008Khff)lAG5ZDp01^NT0Fdqg000000E7Sl0000001gKL01^NT0Fdke000000PFw& z0000004JBB8Uq($JO=;(5&#PTD9Zo<000001ONa400000N(TS{5&#PT$P57h00000 zAOHXW00000RR;h75&#PTI4%JI00000NB{r;00000YX<-T5&#PT2=xE}00000@Bjb+ z00000dzYaa0~axj2LJ#P01E)v2mt^900008000000001<2LJ#P01E&Z?*IS*0000e z0000000026m!TR17XibUp&A1hNz4ZT01^NT0Qf5b0000007w7;000000OAJ#01^NT z06+}^0000000aO4000000PF_<01^NT0Dv9=0000000;p9000000Q{Gs8Uq&r6_ZgB z7hxy}000sI3jkQc00000000C4000000021%000sI3jpw$00000003wK00000002%1 z000sI3jk@5&#PTh~oeN00000Bme*a00000tdmg@ z7k|?T000sI3jlDH00000000yK00000008I+000sI3jhcq0RR91006)M00000008?4 z000pH6##G;1ONa4000O900000000OH000sI3joMH0RR91003YC00000001Hh000sI z3jlBt0RR91002M$00000001!w000sI3lacuG64Vp0000q000000000xlK~AE0a}v* z4Hp4plK~AEe|8A~01^NT0Lc3Q0000005Ado000000F4O%01^NT05HV>000000GI#( z000000G0^=01^NT05}Z+0000000aO4000000H6r~01^NT0Duz#0000001yBG00000 z0IvxE01^NT0B{@u000000Pp|+000000KW+U01^NTS^)470RR91000O800000007Mi z000sI3jiPu0RR91000aC00000007zv000sI3jp}_00000005W(00000008s}000sI z3jp{L0RR91000O8000000007$(F_*>3zN|d7XcQN(F_+!GYS9z5&#PTFya6J00000 zH~|0v00000N(uk~5dakcum=PH000005D)+W00000TM7UG5&#PT$RPm$00000oB#j- z00000aFfvt7XgNo(F_+!uL=MF5&#PT$dmv800000TmS$700000yb1sS5&#PTaG?MI z00000L;wH)00000)CvFq5&#PTAY=gm00000ECB!j00000>66h67cu+_000sI3jjC~ z0RR91001xm00000000OJ000sI3jk;V0RR91003YD000000051bK{5ju1sn?i01=a6 z0TTf_mw_Jx9}zI$00000007_s00000002~%Q6B>re{u@|01^NT0MI%C0000000aU6 z000000E`O&01^NT00=4p000000B`^R000000HF&201^NT01&Pui4*&oX01E*4?EnA(0000C000000002y4*&oX01E(6r~m)}00004000000002` zlW`0e84C~q01^NT0J!A<0000008{_~000000CR85C8xY01E)P!T?~c0000G1ONa40001x5C8xY01E(E zkpKVy0000y0ssI20001=5C8xY01E(kQ2_t|0001J00000000245C8xY005IW5D^*2 z5C8xY01E(kDggih0000e000000002tlQ9q%0Roeu3>QfZ5dZ)Z01E&x4FLcE00004 z000000000N5dZ)Z01E)n4gmlF0000u000000000a5dZ)Z01E&Bvj6}90000~00000 z0000&lc5Y30acTs3>N`tlc5Y38G8``01^NT0LaAv0000002lxO000000E&~L3>SZ+ z5dZ)Z01E){Y5@QM0000C000000002P5dZ)Z01E&BUI73A0000y000000002m5dZ)Y z02Kgu83X_T000000RR910002!5dZ)Y02Kg`D+B-l000000RR910002?5dZ)Z01E)H z5di=I0000$00000000055&!@a005I`4H17W5&!@a01E(!X8`~J0000u000000000= z5&!@a01E&x%K!iX0000`00000000165&!@a01E(^QUL$}000080ssI20001K5&!@a z01E&xtN;K20000y000000001X5&!@a01E*49RUCU0001p000000001p5&!@Z005JR z5fOiz5&!@a01E)fUI73A0001p00000000255&!@a01E(6WdQ&H0002s000000002U z5&!@a01E&}69E7K0000m000000002h5&!@Z02Kfz7z6+S0000%000000002x5&!@a z01E)nECB!j0000;000000002|5&!@a01GYv2oeDR00000KmY&$000001`_}P5&#PT z*y8{I00000FaQ7m000007ZU&g5|d#86HP4>000sI3jhcT0RR91005)_00000002Z2 z000sI3jnaG00000007hh00000003VT000sI3jhET0RR91000C400000003$e000t` zVF43OgcAS&5&#PT==lHu00000+yMXp00000oD%>55&#PTNTC1#00000Bme*a00000 zu@e9Q5&#PTkP`s_00000KmY&$00000!xI1i5|d#86CLXl000sI3jkO(0RR91003A3 z00000008t8000t`VF43m2NVDR5&#PT@XG)I00000^Z@_>0000078C#g5&#PTK==Rv z00000Bmn>b00000BoqJu5&#PT7%c$+00000NB{r;00000HWUB=5&#PTs1N}F00000 zH~;_u00000KNJ7}5|d#869rHd000t`VF43CV-x@Y5&#PT0B!*Q00000^Z@_>00000 zeG~uy5&#PT(DMKQ00000Gywnr00000j}!m^5&#PTsP_N>00000n3n-710OZG6aWAc z01E&Z{{R300000`0ssI20002Z6aWAc01E(+zyJUM0001_000000002x6aWAclVJf9 zN%<5201^NT0AL6K0000004M+e0000000$KS01^NT07yXr000000AK+C0000002LJg z01^NT0JsbR0000000aO40000003(;-D+3oXHWdH>5&#PTSh4^B00000r~m)}00000 zMil@65&#PTXzu_300000EC2ui00000T$kZ10~cXs6#xJd01E(U;{X5v0000W00000 z0001V6#xJd01E)f0|5X40002E000000001f6#xJd01E&(>Hq)$000130ssI20001s z6#xJd01E){1pxp6000040RR910001(m*Fb|7d5IC000pH6#&2&1ONa4000O800000 z006ZW000sI3jnwa0RR91008^|00000007Pv000q_VF42X;g{hn0~deu6#xJd01E(c zo&W#<0002s0RR9100001761Se01E(!l>h($0000C000000000I761Se01E*41_1y7 z00026000000000W761Se01E)H(*OVf0000O000000000i761Sd02BZ)0R#X50002s z0{{R30000v761Se005U~EdvpMQ5FCI5&#PT2<`v?00000ga7~l00000WflMc5&#PT zkP-m^00000v;Y7A00000bQS;r5&#PTke~nn00000xBvhE00000kOu$&5&#PTsMP=f z000003;+NC00000ixvO?5&#PTP@(_;00000L;(N*00000qZR-F5&#P-0H~D!00000 z05kvq000000J|0d01^NT0BDo|000000AK(B0000007;V}5f^{U761Se01E*4{r~^~ z0000C000000002m761Se01E&xW&r>I0001V000000002+761Se01E(M$^ZZW0002E z00000000087XSbf01E)1P5}S_0001J0RR910000V7XSbf01E&>y8r+H0002+00000 z0000s7XSbf005I?0TBgD7XSbflVJf9WpNh(01^NT0ARuZ0000000aO4000000D>0) z01^NT0I=f#0000003-ka000000F@U201^NT05BK<0000000aR5000000HzlJ01^NT z00<5N0000005|{u000000I(MT01}g70TVUL7XSbf01E&R5CH%H0000$000000002j z7XSbf01E&Z%K!iX0000C000000002x7XSbflVJf9W%w5W01^NT08kA90000007w7; z00000009^P01^NT0DuGm000000DJ%d0000001p@d01^NT08st_000000GI#(00000 z03a9u01^NT07$_A0000005kvq0000005uo@01}g70TTsM7ytkglVJf91!fok01}g7 z0TUg17ytkg01E(!9{~UW0001d0RR910001q7ytkglVJf9f2J4!01^NT0EjFB00000 z07w7;000000KFIh01^NT09eNW0000000062000000Ld5t01^NT0J!D=000000Q>;} z000000M!@(01^NT01z($0000007w7;000000O}Y301^NT0O&6P0000007w7;00000 z0QeXH01^NTLIB7v0RR91002k;00000000gd000sI3jhF*00000006K7000000012s z000sI3jo*~0RR91005W(00000001tR@h}4ye?S=k01^NT0B9=#0000007w7;00000 z09P3R01^NT0N@V+0000002lxO000000AU#b01^NT0Envq000000H^=}000000EZa> z01^NT0N_yp000000JH!A000000G1g501^NT05Avv0000005AXm000000HhfJ01^NT ze*j<|0RR91005K#00000006Za000sI3jp~200000003A300000006)l000sI3jomJ z00000002k;00000007n*000sI3jjD70RR91008^|00000008D0000sI3jmPz00000 z005i-000000090O000pH6#&o|1ONa4e*gec000000000F8UO$i01E&BYXJZN0001x z000000000i8UO$i01E)HkN^Mx0001}000000000w8UO$i01E(cT>$_90001Z00000 z0000{8UO$i01E&Z;s5{u0000K000000001I8UO$i01E&(O#uJ^0000C000008~^|S zeHs7&5&#PTND=`6000001ONa400000jF&+&0~ddu8UO$i01E&}NdW)=0001l00000 z0002H8UO$i01E(65CH%H0000e000000002R8UO$i01E)92LS*80001p000000002d z8UO$i01E(EF984m0000;000000002#8UO$i01E)H3IPBB000040RR910002p3jhET z005U)9|I9F1{(kX5&#PTxaa@?00000d;tIe000005E}ph5&#PT00jX6000002mk;8 z000009g{H@7XdGmF%}mALX$BT7fDnb000sI3jjzB0RR91000C400000003Vb000sI z3jnyG00000005W(00000004L!000sI3jp{J0RR91000;O00000004%QF%}mAnv*dW z7a6S^000sI3jmmv00000000aC00000006|3F%}mY*Bbx;5&#PT(Cq*K00000C;$Ke z00000>XR`R7Xkc}F%}mw4;%mh5&#PTs6qh%00000U;+RD00000EF1s;5&#PTur&by z00000*Z}|l00000KbJu@0~aw-8~^|k01E(!5CH%H0000S00000000178~^|k01E&J zN&x@>0001t0RR910001Qmq9fH7k`2r000sI3jlyF0RR91002k;00000005sH000sI z3jm<-00000003A500000006BV000sI3jioT0RR91000~U00000007Ax000sI3jp|U z0RR91007_t0000000871000sI3jnaE00000000C400000008zJ000sI3x5C*-~a#s z0002&00000000059RL6l01E&J{{R300000C000000000L9RL6l01E){aRC4T00004 z000000000f9RL6k02Khp7X$zR00008000000000u9RL6l01E)9?EnA(0000a00000 z0000?9RL6l01E)H!T4gmlF0000u00000 z0001-9RL6l01E(s!2kdN0002g0000000027moYX27cs^i000sI3jh$g00000002+` z00000008A3000pH6##Gt1ONa4000O800000008}$F*XAi0T7o_Hv<=8E*<~?5&#PT z;0OT#00000C;On01E)1Q~>}00001l000000000m9{>On01E&xyZ`_I0002+ z000000000;9{>On01E&h&;S4c0002o00000000109{>On01E(!VF3UD0000K00000 z0001O9{>On01E(s%K!iX09OD2YybcN00000bsqo#5&#PTkZAz`00000U;zLC00000 zj2{315&#PTP!a(E000007ytkO00000ogV-I5&#PTF!ule00000bN~PV00000tsejY zAd_JM6MxAc000sI3jiop0RR91003kI00000007+|000sI3jnBQ0RR91007_s00000 z008$N000sI3jp960RR91000C400000000Oe000sI3jnA~0RR91004{u000000012z z000sI3jk;$0RR910058!00000001c<000sI3labb0000005|{u000000LmZ$ z01^NT0B8~c0000004M+e000000NWq{01^NT063KZ0000005kvq000000Qn#Q01^NT z05}=}000000H^=}00000016=h01^NT763pj0RR91002k;00000001B%000t`VF43O zG$8;05&#PTh&KTM00000L<0Z-00000Q6T^T5&#PT2;=|&000000096100000XdwUq z5&#PT=v@H-000006aWAK00000gCPI_5|d#86CITy000sI3jo-00RR91000yK00000 z0062X000t`VF430z99er5&#PT;FSOX000003;+NC00000&mjN+5&#PTKwbd=00000 zBme*a00000-yr}15|d#86B+U$000sI3jokb0RR91001-q00000000J;@jL?;Wg8*@ z01^NT01&?b000000C)ia0000006iiA01^NT03a&?0000007w7;0000008=6W01^NT z0BFJh0000000aO4000000A(To01^NT0PrmV0000007w7;000000DB?;03efL0TUUD zA^-pq01E*4@&Et;0002g000000001)m+?FU7k{rJ000sI3jpxg00000004*p00000 z006oo000sI3jly%0RR91000O900000007S-000sI3jj#)00000003A300000007`3 z000sI3jk>O00000004Xd00000008nL000sI3jpB700000000yK000000000Z000sI z0GD?@0}%xiBLDy(lVJf9e=Q>b01^NT0GLYw0000001N;C0000006`-F01^NT0QkiK z0000009*k80000008k?U01^NT0B8{b0000000aO4000000AM2k01^NT09cy<00000 z05kyr000000Cgh(01^NT05A^$0000001yBG000000DmI@01^NT5&&4q00000000O9 z00000005PjaXteV8K)xv01^NT0GJH{0000007w7;000000JxWNJ_8qj%Od~)5&#PT zcn|>q000005C8xG00000)*}D_5dahb5CsGP00000AOQdX00000ND#OAO#{~flP@+GxA9K{ ze*%|_QUhg|15*Q2mv2)8O9E$KmypB(8MnGq1B(Kea#jPimpNAhpSSK;10(^r9$5oN z0k?Qs1Ns8DgIxn?0ha_{1G=}`UjuIf6ijbnWKwl*bZKp6L~LwRX>Mn1WtT`|1IM>0 zV*?Wcm)%hV2$xu818%q7Wdl|Lmqcd+)(mG~WO8;!b#7xxZf3U_Xai;emw;&lU;^L? zm*QyyE)viS00000007Vn00000002lQmm+EdE)p0s00000000;>00000003MXmttxI zE)jH100000004AN00000002gpp=tv!3cyeR000000Kia}+G+zf5kOG@000000600000(0Tv>00000Fcg>KYXdG40FM9w000000FM9w000001mu?@Yy&O|1akoZ z000001ap^KYy&nD7;^yt000007;^yt00000Kwy`mYy&P5SknOj00000SknOj00000 zbP1Q@Yy&P5(A)t400000(A)t400000ph1@+Z38X}VDbY100000VDgt*Z38w6c=7`P z00000c=DH;Z38w5i1Gsf00000h?m)I12+-)^aB6@0002^^a20?00008mmzKge*$18 zm!fV1ZUT@jmlAISPYgEz0000002u%P0JE2EZv!j|J_7&%0000BB$uLZ12zKlVwc)) F118zhvjqSE diff --git a/third_party/prebuild/aarch64/liberror_manager.so b/third_party/prebuild/aarch64/liberror_manager.so index 759d8e30accaaca6dd3cd9dcecbc97d03f1cb20b..6358365b0c05acca93660d60e3cf70a0a3444d73 100755 GIT binary patch delta 529194 zcmV+W{{#TApf<4JMvxd2pwI;X00000KmY&$00000u&5D{D>Z*K+zJ2y0000q+zJ2y z000000096100001000060001hy9)pS0001hy9@vT0001hy9@vT0002^L;wH)0001Z zO#lD@000000096100002000060001xz6$^V0001xz6<~W0001xz6<~W0001x0ssI2 z0001x0ssI2005ESNfUs(3jhEB004ly3;+NC004ly43j$nB_fE42><{9004-I2><{9 z004-I2><{9000y`00000000y`lY#+Ge}KCS000000D!v;000000D!v;000000DwdQ z000000DwdQ00000009610000000aO402BZK00RI307p($0G}vuQz|^ig{J33Eo*t4 z*6o)xSpWb400jsD01yEH0003101f~E0Kfo1011Nth#3S2gxEkdApo$Tfq(!2f7lGB zVG>hFlnzM?03krYgx~;x1PmY$!5{z#08ju5gdl(elMKi~F0udsAVdrR;3x`&5(kN_?a0ZRoiN)WUnBrrF?AubXT00}@r06+kS0D`C}Kp=t9 zMFH{{3JkdcLJ$B_KHyGC#vlNce+?KR5CAABU;r`>Xo3L)1(-Y&LI8jOngoCV6bKCj z1ORXZMo|O+Kmb5_2yqB9fS@T!%`l<_fFubdf_f+*qzpmuQlJ2UC_(`c#Rby=AOM2^ z24xK>>`;LKL4chQ1P~wr5C8xGkN^SzB!UPNPVmtaNdX7~AT~)LBTbSpe}FU+0y6jj z7!?K*073!890V|nLP7wb0I>jQ000CC0|F8tEF&xk2rvf)l0*fF2*3~kAPh1>AOHXu z+A5I200IJvv;;5!cpw1?RS=*d&=?~i0YM>AC_n&;045Ot0RRGk5DMf36#!t805CCO zK%7Ak03Z*VffyiYK>-2)e+a=qK_I{oA`vJNaUcKy5C}-ZBS{bg7$`t60f0gY0s_E6 zlmJvnQVYNkfHDF=N&o@~h%ToI0u0aqx-tnM3^)J;mHl@0;U880E}rs0LTJN2><~^C`l)P$bbMGG(Z3me{p~f00smA000OK zBmp7G1A>c`1PCGr@hAY0M8^mbBBB@o2oeCv013009^PBLPAH z003f;01%)AC>bLWf59U{5dye1B+BHF5JUn7WU2`Qx(Wb+2~-Hsy9n^C0))Vj0>F?M zKoCGp5E1|)o&XS31j8WEfKUN|Kr0|13cyYmBNXR3=kk(2PhDb z#sf$IR=y_G09hcINC0p{LBIek2mlBI2mlzegX)D)MFa>8e-xk~EQASCU_?ML0N?{C zGzfqyMBzjdU;vT;1P}y(C=dXYKtKQi1;T)W5WoUP43=n&0*Gn=21p@{MGR8_0s;|% zB|;iRfG7zZAP@j0WD5Y$jfkiMngT!}0#X79gis&=AOX-q8$#G1fJPADqyV4^0zeP} z00;m85CI8Le?UMO003|ZKmdUNS`q^V&p{4A282ie4G4i1F#sST2C)PH001HY3IKo- z05AXm00Pkfhi#x6)Bq#|Lqvk$5CA|B2mk5sVa&fG99#0tNwta0&=yX5kNp7H9+KI2{@V*hzJ@0Okg463B(|R0VqfilPE|?Dgl52 z0U!tfe}DiGz&9@l6a!9B0GJTM0hAyjfIxr)0s#PkWf1@Y0{|FW24ty1R3#AzA}~-8 z0{~Ei00IgEKmiC81ONep2tY&tAp``90!auU00aOCfI|cjFa+Qc*$`9!fhYlz1ON;G z000350000W0B9os2vG`6W?%pa2$BFm1ON~ae*i&X1p)w2NDze$s*ofQ6o3Q>07xK0 zAR!=>2%rD}7(_!20RX5Gj0Tk?nE(nA&}mYH1OlXh2EqgY00I^Qgaji2000Vu5Eu{u z00BS%h5$r}LTC&Uf`M=V073)`kRkvB7cc;ju%HA&00;;H2mk;u6p#QQ2t_1HfJA@* ze*pn>G7E_fK!gerK!}8phzNvN2yg*V1YiId7$AWd0U!V*01Jj_z*tt2{0t}nAV5(= zkTehg0E9pU0XTwCAyk9_3=jZmC=?XvfB*yn0007E;sFRC2moLJ0FokL0-y*50Dxcs zLI_|RU=#u*fk2H(BPOd51HhCL2PUARfA9qcphyro5TpQv1pqMtG6@KPGGQR01T+u; z0GP2G2w4Ckpm+f>08Rn`BLo@|3?LDpAhiJyKq3$X0da5u5ikId03lW-FflSL06<0n zMl1o4zyJUM002n@SPcmR5@ARP000yg1tc+GAw;PP0LUmuN>E`KFTfO{fJ#7se;}eM zSPB&bgNh=x2r_^H004q28H@w~AQ4Cb0%NI2Y=K4$2p}K~qA)lC01$MLA%ZOch7te> z1R;?+DQGpQv%r1a}WdIQ%aR4wx zpa=*FVk;DZf)a!Rn*;$wx17OQe~2amWDGzc0)!$A6d*1ELjVW_0#E>eoh$Nm3jh%)A&}4r14LjTLJ$BXC>ekv z007D%Km?))BEl$Q000;OAO-*s5HOGxfL;Iq03a~{074iGC=^HoMZ*#xe-RY?$^aT< zN)SK*h>Z{d07L?y002P(Qvm=7Ge`nR!VoM50000=5Rh=75(Ofx5Cl|!1Ox(t0s_z= z1Q0YW>7auEqSH002w@002z^002(`00000002+{0Dk~d0RR910000~0RRA20RRA30RRA40RRA5 z0RRA60RR91000170RRA90RRAB0RRAC0RR91000000001C0RRAE0RRAG0RRAH0RR91 z0001I0RRAL0RRAN0RR910001N0RRAQ0RR910001S0RR910001T0RRAX0RRAY0e=7h zc>w?bdI10cdjS9dd;tIeegOag00000fB^sig8={lgaH5mg#iEnh5-Nohyefq00000 ziU9xsiva)t0000000000i~#@ujR61vj{yJx00000kO2Syk^uk!lmP$$l>q<%mH_|& z00000m;nF)nE?O*n*jg-00000oPPlT0G|N>00000000000H6T?0HFZ@0HXl_00000 z0Hgr`0Hpx{0H*-}0I2~00IUH30IvZ600000000000I&f700000000000I>l80I~r9 z000000000000000000000J8xA0JH%B0JQ-C000000Ji}E000000Js4F0Drjw006oH z00000006xK0000000000006!L00000006-O00000006`R006}S0074U007AW007DX z00000007GY00000007Ma00000007Pb00000007Vd007bf007nj00000007tl00000 z007wm007(p007+q0000001p7*0RRBu0RRBw0RRBx0hhtZ2qu5%0RR91000000002# z0RRB%0RRB&0RRB(0RR910002*0RRB;0RRB=0RR910002=0RRB@0RRB^0RRB`0RRB{ z0RR910002{0RRB}0RRB~0RRC10RR91000020ssI50ssI20000000000000040ssI7 z0ssI80ssI90swyi0000000000000003IYHC4gvrG4*~!H5CQ-I000000000069NDL z00000000006aoMM0000076JeO00000000007XknP8v+0T90C9U9s&RW000009|8aX zAOZjYAp!sZBLV;bB?15dCISEe00000CjtNfDFOfhDgsjg04o9j04)Ll051Xn05Ado z05Spq05bvr00000000000000005k#s0000005t*t00000000000000005$>u05<{v z000000000005}2w06CX20SOZ&I|2XzJOTg!Jpup#J^}y$KLP*%00000K>`2(00000 z00000LYFZC2@`)r0ssI-0ssI;0ssI20000-0ssI=0ssI>0ssI?0ssI20000>0ssI_ z0ssI2000000000_0ssI|0ssI}0ssI~0ssJ10ssJ20ssI20000000000000130ssI2 z00000000140ssJ70ssI2000170ssJB0ssI200000002S&UjhIC0000000000VFCaE z00000WC8#H0000000000W&!{JX955KXaWEL00000Y61WNYXSfOZ2|xQ00000Z+00000000000Gk2;0Gt8005r?00000005u@005x^ z00000005%`005){005-|005=}005~1006530068400000006B5006E6006H7006WC z00000006iG000000000000000006oI006rJ006uK006xL000000000000000006%N z8vp>o0ssI20002P0ssKU0ssKW0ssKX0ssKYmoWhe6O*t32!G20007Ja007Pc00000 z00000007Sd00000007Ve007Yf00000007bg00000007hi007nk00000007tm00000 z007$p007+r007XX00000 zA_D*b00000CIbKfCj$TgC<6chD+2%kECT=lF9QGo00000FarPpF#`Yq00000GXnqs z00000Gy?zt00000HUj_vHv<3wI0FCx0000000000Is*UzJ_7&%K?48)LjwQ+L<0Z- zMFRi;0Dk}g07nA=000000000007wG>0000007?S@080Y^08Ik`0000008s+~08#@0 z08;}109FG409OM509XS60000009yk90000009*qA09^wB0A2$C0AB+D000000AK?E z0AT|F0Ad3G0Am9H0A>RK00000000000B8dM0Dk}g003zN0000000000003$}AvjYGCv;zPDwF3YE0000000000 zwgUhFw*vqGxdQ+Iy8{3Ky#oLM0000000000zXJdOzykmP!UF&R#{&QW$^!rZ00000 z%mV-b&;tMf(E|Vg(*pni)&l?l00000000000000000000*aH9n00000*#iIo+kXQ9 z0Nn!s0Nw)t000000O11w0OA7x0OJDy000000OkV#00000000000Otb$0O|t(0P6z) z0PO<+00000000000Ph0;00000000000Pq6<000000P_O?0QCa^000000Qds{00000 z000000Qmy|0Qv&}00000000000Dt`h00902009330000000000000040003500000 z000970000000000000C8000F9000OC000RD000XF00000000aG000jJ000sM000vN z000yO000&Q0000000000000*R00000000^U000~W0015Y0018Z001Eb0Dk}?1ONae z1ONaf1ONag1ONa400000000000000d1ONa40000f1ONa40000h1ONam1ONao1ONap z1ONaq1ONa40000o1ONau1ONa40000t1ONa40000u1ONa40000v1ONa4000000000x z1ONa$1ONa400000000000Dk}gJp=#%KLh{(00000Km-5)00000K?DE*00000LIeN+ z0000000000L<9f;000000000000000MFaoUL000000B8gN000000BQsP000000BZyQ z0000000000000000Do@;0043X00000000000046Y000000049Z004Ca004Ic00000 z00000004Ld004Rf0000000000004Xh004dj004gk004jl004mm004vp004yq004#r z00000004;u0000000000004>v004~y0055!005B$0000008Rh^0001z1ONb&1ONa4 z0001$1ONb*1ONb;1ONb<1ONb?1ONb@1ONa40001^1ONb}1ONb~1ONa40001{1ONa4 z0001}1ONa400020lkpQ1Zm$FY0I&oA0I>uB00000000000J8)D0Ja1G000000J#JJ z0J{VL0K5bM0KEhN0KNnO0KWtP0Ko(R000000K)_T0L26V000000LKIX0LTOY00000 z00000000000LcUZ0L%mc0L=sd0M7)Ip#dI$&;$Sg(F6bh(gXki)C2$k0000000000 z)&u|m*8~6n00000*#rOp+XMgr+ynps00000-2?yt-UI*u00000;RFBx;sgKyz}(>jVG*?*sq<@dN+>@&o_?00000^8^3@^#lNa z0QLj`0Qdv|0RIF4000F5009L600ad90000000jjA00spB0000000#vC00;#D015>F z0000001O2H01gEJ0000001yQL01*WM01^cN0000002T!R02l=T02&1V0000002>7W z038JY000000000003QVa0000003Zc_001Ec001Hd001Th0000000000001Wi001Zj z001im001oo001uq0000000000001%t001)u001-v00000001@x00000001`y001}z z0024#002A%00000002D&00000002G(002S-002Y<002h?00000002k@002pU1poj` z1poj{1poj}1poj~1pok21pok51pok61pok71pok81pok91poj5000181poj500000 z000191poj50001A1poj50001B1pokI1pokJ1poj50001G1pokM1pokN1pokO1pokQ z1poj500000000000001M1pokl00000Z3O@TZv_AV00000a0LJWaRmSXas>bYa|HkZ zbp-$bb_D0G$N@0G00000@dW??00000@&y0@00000^#uR`0000000000 z_XPj|00000_yqs}`2_#~`UL<0`~?62{sjO4{{;X500000000000tNs8000000|o#9 z1O@;A0000000000000001_l5C00000000002L=ED2nGOu015^G000000000001O5I z0000001XBJ000000000001gHK01pNL01yTM000000000001*ZN000000000001^fO z02KxR02T%S02c-T000000000002l@U0000002u}V02&4W0000002>AX00000038MZ z03HSa03Zf`001Ed001He001Kf001Ng001Qh001Wj001Zk00000001cl001fm001in z001op0000000000001!t001-w001@y00000001}!0024$0027%00000002A&002D( z002J*002M+00000002S;00000002Y=002e?002jS1^@s^1^@s`1^@s}1^@s600000 z0000_1^@s6000000000{1^@s6000000000|1^@t41^@s6000101^@t71^@s600013 z1^@tA1^@s6000151^@tC1^@tD1^@tE1^@tH1^@tJ1^@s600000000000001G1^@tN z1^@tmXa)cPYX$%SYz6=TZw3GW00000aRvYYa|Qqabp`+cb_M_d0000000000cm@Cf zdmfCc~nfd&8oga!Zrg$4isi3R`wiUt4xiv|DyjRpV!00000 zjs^e#kp=(&00000lLi0)lm-9*00000mj(cT00000000000GS2=0Gb8>000000GtK@ z0G$Q^0G|c`0H6i{0HOu}0Hp>10H_840000000000000000I3E50ICK6000000ILQ7 z00000000000Idc9000000ImiA0I>!D000000I~)E0J8=F0JH`G0JjDJ000000JsK! z00000006oM00000006rN00000006uO006!Q006%R006)S006=U006@V006}X0074Z z00000000000077a007Ab0000000000007Je00000007Mf007Sh007Vi007el00000 z007hm007kn007qp007tq007$t007*71^@uw1^@ux1^@uz1^@s60002u1^@s60002x z1^@u&1^@s6000000002z1^@u+1^@u-1^@s600000000000002&1^@u<1^@u>1^@u^ z1^@u_1^@u`1^@s60002>1^@s6000000002?1^@s60002^1^@s60002_1^@tm`vw32 z0000000000{00C3{RRL4{ssU500000000000000000#g71P1^B2L}KE2nPTF00000 z2?qcG3kLuI3DY000000000002~JZ038Pa03Qbc03rtf z03!zg0000003-(h03`002e@00000 z00000002q{002z~002%0002-2002@4002`5002}600000003480000000379003AA z003DB003ME003SG003Wq2LJ$I2LJ#70001E2LJ$O2LJ$Q2LJ$R2LJ$S2LJ#70001N z2LJ$V2LJ$W2LJ$X2LJ#7000000001R2LJ#70001S2LJ#70001T2LJ#7000000001W z2LJ$f2LJ$g2LJ$i2LJ#70001d2LJ#70001e2LJ$m2LJ$o2LJ$n00000fd>Ep00000 zf(HNqga-fsg$Dot00000h6exu00000hX()v00000iw6Jzj0XS!0000000000jRyb# zjt2k$j|Tt%kp}<(lLr6*lm`F+nFjy>00000ng;*?oCg2^o(BK`00000p9cT{pa%c| z00000q6Yu~qXz(g000000000000000000000Hg;10Hp^2000000Hy~30H+540I3H6 z00000000000ImlB000000I&xD0J8@G000000JaAJ000000JsML00000000000J#SM z0J;YN000000KEqQ0KW$S00000000000Kf+T000000001g0000000000006=V006@W z006`X00000000000071Z000000077b007Ac007Ge007Jf007Mg007Si007Yk00000 z007bl007np007tr007ws007$u0072LJ%?2LJ%^2LJ%`2LJ%}2LJ%~2LJ#70002^2LJ#7 z0000000000000000002`2LJ&42LJ&62LJ#700000000302LJ#72mk;92mk;A2mk;B z2mk;C2mk;8000052mk;E2mk;I2mk;8000000001g4F~`L4hR4M5C{MO0000000000 z5(oeQ69@nR0000076x05k{y00000067Q%0000006GW&00000 z06ho*0000006qu+06z!-06_=<07D1>07M7?0000007eJ^07nP_07wV`07?h|080py zp#c_uO$Y!0Q3wD4QV0M5R0se7RR{n8RtNw9R|o(A00000SO@?B00000S_l9D00000 zTL=IETnGRFT?haGUI+jH00000Vh8{L000000000000000WC#ENXb1oR00000Y6t)T zYX|@UYzP1V00000Z3qAWZU_JXZwLSY0001g0B{HZ0C5Na0CETb0CWfd0Corf00000 z0Cxxg000000C@-i0DK4l0DcGn0DlMo000000DuSp000000D=er0D}ks0E7qt0EGwu z0EY+w0Eq|y0E!3z0E-9!0F4L$000000FMX&0Fnp*0Fwv+0F?*;0G0><0GS8?0GbGY z005f^005i_005o{0000000000005r|00000005u}005x~005#0005*2005>4005^5 z005~7006280068A00000006BB006EC000000000000000006HD006QG006TH006WI z0000000000006ZJ006cK006lN006pw2mk;8000000002I2mk=R2mk;80000000000 z000000002K2mk=T2mk;80002Q2mk=Z2mk;80002S2mk=e2mk;80002Y2mk=j2mk=k z2mk;8000000002d2mk=n2mk;80002h2mk=s2mk;8000000002m2mk=w2mkIeV;>j(e?g#(??+5?@00000 z0000000000@CX0^00000@dy9_00000^9TR{00000^aua|^#}j}_6Ps~`3L|2`v?F4 z{0IO5{RjX600000000000001g000000R9L700000000R9009XA00IdB00RjC00jvE z00s#F00#*G00{{I0162J0000001XKM01pWO01^oR022uS000000000002K)U00000 z0000002c`W02v7Y02>Ja02~Pb0000003Hbd0000003Znf0000003r#0001Nj00000 z001Tl001co001fp001os00000000000000000000001rt001xv001!w00000001-z z00000001=!00000001@#001`$001}%0021&0024(0027)000000000000000002A* z002D+002M<002P=002UN2><{?2><{@2><{9000000000+2><{`2><{{2><{|2><{9 z00000000000000=2><{9000000000>2><|02><|32><|52><|62><{90000}2><|9 z2><|A2><{900000000122><|C2><|D2><{900000000162><|pTnPXGUI_pI00000 zUkLyJVF>^LVhI2M00000V+jBNWC;KOW(fcQXbAuS00000YY6}VYzY7WZ3zGX00000 z00000ZwUYZa0vhaaR~qbatQzcbqN3fcL@LhdInehB~ofe8Qr00000 zf(ZZsg9!itg$V$E0Eh_y0E-C#0F6rQ0>2xY?aNBZ2PJW{cu|hnRJ3g`q|1J=;BZ0j z*G}&6QUjvUklYbBXlwMNsV5*D(GW}rKb|eiRqrvPZ!<(J^?JgK?j84-Q+a%*t;m4+ zZcAV~uStY8oky&*TwwG!5v^-!i+O{)1e*t<$BraclO-*G^<1B;Ph!`(1y_wiYmujd z88^d;6qw3-{J=M;00DY&JJ$lRZgj!NTa@fZAtcwppu*IO5V&`7j}E^)@_k-ra?d3M0tU-#`FUe!oWa^*YWz9e@#?Q82BAS$-4ZxvC< z{w67ZXn_)cRS=0a*9P0w$>=C9+w1~&!Bc}mc)3R_O%d@Z{4|}RHIM8e2EiNM5m|Qa zF@6oR2ua4&&Tl96f%z__GSQH*aJT1Q^yqYX-7rB9pB}{1($C8vX2c99AKhBNBwP(R zxvn4ai9FaI`synm$3qG1sa@nnu$n*T&SLbcWaj^WZOKbH+5`~r0AOEShhq@*50!8& zFy*HTR`2@o$2DE5NqOD87kiF5>v_}Y zrGG4cuy|3S!kH?|*&%$ftO~^t@eA;K+)fzM>cLz3@%&Vn9if)tIb52Mh$!% z5SsfXhhceTC5H8SB z`?Xb7+A^eIPHFya7iwpIiuh@dKBej>T&mFk4FFg)Z)@*D=G(s`8| zA@RqX!a<@C^b{FKD6Fn z@jp??Ud~zlK^{V%KxJjEvrx>%Sc_zTw4np~G>A{C^{%3hxGN`~SD685UVM7H0UlHH z@f;b#V(Eh!^Bft%y`^%iqNt2g&B)b>5e(cU5&!zh*DQR0ect_j`WzX;dnp;B){9*e znlTcB`y3g<`Me-1{lln>N!5Dy?~1AcanxgR&AscIopdmUaezCFBwO zT@C9*k~K33McvFg0&VF`gO z*YxRd@MS~N0^cJ*beUQZrc&5{^!jN@l0~R?DGQwx0*X!01~y8#?O)$R*1$KJ3}yxG zTqIke7@|iqyJn~!ZbkB1#Yv2L*3DZX zO#IF{OB&22$=n}5z5urT`$?ho#{lP~KTI;|_dU14V%{W{vebZX3%UULzUiM)pw3zD^l1%Tpkmw^h5)d5A8d zMVh(7ads3i?m2X-YSNP~k=&fWVrD@VpO4YItI)fo!@s)>6!UPHzJAjDJbV5}J7y)f=>z6qarPpJl2jr*vCCPLUAE z$kd8Eknzr!;!6=w^T;!iekia+SmCsF2#{~?GI%cM6`h|>)JbrU?6iv;og3%aK^{4GQ6cUROeUde4 zjX5JF0=dn+u9=D5Wz?DQlb0q$)Dnns)fL;E?V@OZQ^>E0xrqOf@Uwm>utDJ-ac<%U z3f1WTXG5ltc9vzAZWWZ7+!!$3GcjAj12v;!BJkZSSYV2=Y`q*b<7dr3%6I3z^&{ zVi{7Ymd`ean-l)St-v+I1@sKj9n~1qTXKp#NGR&;(@+J=uWdD--5S1479=E`wX@G>Pm=z4a3U>8P4IIDNtd7}ij(=j;oP>& zozOX9kt&-%sqHr?$lnBbgUfq|G|0f0-QU4!kzU)Ct^dBMe&A1OxhZ|G@4-~dP%2FhDy~o zWg1WtioGofmW)cSup4?V+x+qH$~pabrUO)DUtgZR21i)6Mp2pw$F=tDKEgI3L>dS z+pokVh2$Y6NS4rPTi{7 z1fK))2ZtXtbT+ZGsU}%}z_;2BBw;QlMj4egTQOr|+Helc-HTb{N zPlo7V&ZtE0PYcv=T5$nbi*qT^;f*qzvM7F3BzqcXWh3NFUSc*cufz~PwCZQOx{`gM z?~~-RW6SOfyZ4I*aEO0^z6(V|wRDND&qMo15xF80Q#gTIRTmdmN_|B4n><(|v1FcU zf4NSUs_+MS!nrOGnH1!IAn)V%f3SoK?gv2!cZCS@ysivteDE2`$=mXa;3VLHG2RWg zc{N)BXt9DRY-{JEl=?SB4#VP*ZTSd0q4pOS7%-k+Q*%*5z_1O(^_5W0{`M-;)t3)q zx~taSCx}u&ktZ21d2iF4lLV)mv6Dz0UQt-^y1UkbVIR>BTFn1{I#SnX-eF$JP6x-S zPO6!3vk-bSmZc;itZZws{}kya!9p zZdX0cSRzWm$N;;4v!;%Ga@S;$m-B=^eb$);7(bSm^eKH)6$|ctl%cIrOT+Oi9rWNUA8P5E`toWn5aG=J@*O?$;Ul1Y{ef7HNdFyFcK$L1NiOrFHA7zmD z2qSb<<#s0xICOO?D?VOQ;!5@U?a9W8KLOGWH^#0Sv!$^~Ye6_wy-7Ikd(2eV zr}r-TlKJ6(%8;5{1p$3$wioEAUMTDNA23TTAXzv7!I*KZ+Zi&xq!`1N$)_vhU|Tv+ zF^`17dK3mZY0G`8Qs-?vnC|9?fMOX%#$T`?_3?GbfHp|2;08bn(wnfh#R>SYEL<=+ zv|G?Y?GIdUl@*vPbE@PSBK%00}eM@GRu`{2( zW{CLj$M?D0y*d4S5<|HUdlpY(b(U?Nw0NNAyax~0$Qhnhvc~6kh2NTt(vuOUVZ=n@VIurPJzcz z9d$hQ0C|gyuy*h9K`vuKi<|nsKVHg_Y{1K$PqptkNHib2QH{qt#BKT`$-_@|#Ro2NJJm-7+!;ru+ih8i% z_-_9n(xxtzI`lG*{NYetX4lIyUn4aH9SvSVa}ezJDW#=< z$Sd33{orzO~mmu0~Oi@~}qqnGTKDNO+CX;mJv(_>j=doC+oV8kv zeT{cZts%K+A2_U=DZ(1$Aw^Nx~+5V%`n(_lOHhZ{RJM3aULZXeH) zd3N-6)f$VJ#?FhP$xuNE-eR7pxp|`eI2@3q{y7V1qp67WL#b8!Bxd53NY?m&hCQwS zDOP3!+FF8Pk7y6PHMeNRI`e5+Ds1oW*6aekQt>?vb?t*H*U1N*;H7Slp5CSA_cWdU zFtu^J0u9m666giLGvqqwAG2H;{qKh?3$fso%5Ntw!Hq_J1PAP8F42(9njXaaF$wk{ zNaQbxT1mIR?=cA`ivVi(w5}h231|X!1+1Dsr&ou&;LHDJKfXE}pCUI3Y@2I^Ywr8N zTxhQYKlG>g+qG)sbN&yw1>)bcjrUwS%8zJuukU>55r@(NnnLpUl7vx~=_QYP{WGct zvt=l_+f=?J2{3CCPz^L?t>R7xM!3mBo%2H%+0!1L%r>m(Nb@0?WV< z@sgHu?|sT1u|BDe?9*}3n*}kFPAxvHe=FGFrS@`{W+LP&MfGtnq!Y>~tVO2Kkc>!1 zQ7u&(dWrCoJl?Dt;;d4`twCT_lfl`;41cf~nC`EHqMh~>zN zkTy3CO>Ap!iuwFih?A**hyaC(p<~oV5(Rc4#B>Gpbdv`mkV_d9q&gu{I8MEFWyo}$ zPAPxH&}`3DOgT35?AjSnFVX@77y-BLjX71uXC_pNGUmM1Ki)Hi`h>t)F|V5f%Yant661H9oYJ z+8hc5qVbFVCM<+^MS%T7db@QMdHS$U}CkC4P-~gH_7gq5bV!4}O4n05Hqe zS$~c>PWLCtF?GtbT{_qUunD+|BK*re9bsz7b-yozF^vs>02r5bWC39Ms;~kw@1Wvgc%vD)Fo-7sDgcQZQ%^K_!3wZ% zv+wqA?!6HO4FDiHHY2i*o>i|1{l0<^5xI4Z5ti7KIGdA5No-m@K@MIoLsOla%%X2PIX1R-FP^Gu1jXgBQ9HyH7_8+j*b# zg2oS%bQkoL7Y$58pW<5kVHydp`GX?^x6uOP6`{hjf!9LCsg0Ql@cFyQvu6bGxw?p9HM zpnKGdHEts_y=M#qi$G8S(YK?mnat0i&fE1&tLix<#c}3CS$>7x;WfLulcAPa6>Jz@ z>*p~tbJg{YYi&)=NFas3uE3m-u`&oDfgx-BMoR@#V&S9?;aXL=_-D0Yl4NO#Kbhz) ztZqdFYcn-ojG<>xA3q>f<={t90y?XIG@vz3i9v`-62&9}c>_9m2X;{{{*TbHG`U?M zy9>SSNmKQ~Pvl59R!|tcBk{R`t#ue{et7)SF{Ap7J4@tQ7ThYOtDq#EuBBo;Q-1jw z&j&`?;YRidxXLpOC&+IR#=cHvSklh4ZA|rRc#n*^M!~l-#kDiL{t}Eme#u z3!A_&lD;Y#Gg%hskO+5Cjms*KL!5~YNc`mA{WFCZnLB9IgJy79ZqFaNji=-Y5i-@ zF}3fph;5aL-I-8cDWhOSJ)?d7VhgK-O5%@8Ujf}7anjEk^OWTNXALKRD*5lzrM%&u zX#$n9YO{E0{6furzE3>9CT2adB{6$fVn>vCplZP%*$yw{5UD{><&DsiqeVJ6GeXVSP4Lv~>reO85i(_MbY3$rgKpzGgb@jsmzGuR^u&LXV; zl2=vZ{#KHlq1~T6YuI#uL?pR)`tU;YO+L0L4TEX}-Gmn}lg#Z(a=+_C=tePRFCuS^sqlUH(zSNu0#BVxNNH+BDK z6|YNTl1(_7a!3F;`JbQ%|RK}=2e`A&!_l&h1u{v$x2q3ncOeHxN3HX!e=Uo=w8H=HO%pCF3`)9omG)y`oh$~$*(nv3JJS1cg4H|+fFvU~r0{7X^c^bxl?w;KF)7kEBHINN*|78f^hs9U*>ki{peivwh}hQ!(e;Q@q@T6h3pMiH|x7a2-cC z?X7)eG6Pl@Zom*ft*@6iErX}76<~6T;0)jH!|Z6mZVh+;EjcSwDu{gyvpD|5vpv`J zHQdI3cOml*oFg8eM^*2^70Z|Tzu(k3VbcpwA9(6%VlMk!U_caStAn*J5K8x?VyvJ? z)DOj7aZfaxF$V44hXIki!3Km`zilB&z(vEJcZf=0J~ohCKA#bPG;&5AZQ$3@<`X0R z`tO-vQ<27u&B0GA1?OAWr$o^uU#qDOVjVYsTz;-$ZmFdguL`&wuPTWt6G?LIn%aN(1#>O}iaoaOfHgGf*7_F#BUquntUmrrRn zYW*=aMeeXHA`>{tUz6(pDbT$A0azIa(dW6HVvK$fL}m<@IC0DD`}Bohayk)F5kG!^ zk>dSl=fec{l=c2=_Ri6#W<&h0s|3-RLfwp+=R>dOkcPWvCxmJTmFs@*uGNVP zIAe;_o1En}4v^oRfiQ0u=?fmw0Ec4h4G~;o_dv^=_JAnGhYUAoYWly`(|J zY)_q&l=nr%Y)?1_kih&KQ8>37(|JKlUQguRgX=fK7$DIf`ql}Z&V#>y0HfH#C?iQU znwI`-L3yY~`Nh#8#EH;fAq7>jaf4BGOhh;OS#BB*it-?hAJ38++B+Kqf#1F8NYKul zg!fhpBQL@H(>(+o;np0IX+QiL9#yg;^?4(xm zI?jl>h?O%fj9u^~#!^au@2H%R`yjU#8*d8}oHKL6r1%yMPyRvE%v8jP?pfZs9eo<< zBVHM&FNQI>nNel2-z8}=xY)a{Ylj7dlkGoiows=&sZ#~?@g`ga{tcM-)qBqE<3!;S zT}!p^drz0i6wJ$(WaCVluLEJuiprjQBas^4f&WWo10sw{Z~rxaLabeS-Dk`S()r8c zK<@$aAhEt#0;E#Uv~3--Eb)4W&?mufNRWhV5y`fO?mG!&3^-zD#?lEA7oE|t*f8KE zpo1B1Sw_=5{JeUHliR)EII*P0$`S|Cgk3YxOj0wz}gs$YM;OU@7@48cNf4^ zS3%S1v^t5=_~oei4fE*at$Bauie{xwP+F8Q;WBepjC~(xmyKG-mdW-2CfpIKHzxOV-jok^U%7j#S$Gx`s>D+^f*1tHn;q9i*NYTCwDSD5Sw zke0k;Q@l^x7d9brI#=Y-+v|i94ABqd42P?MXP%PQY@ic-R6^lXdnXG7{u-U-sR+FIt95Dj` z2>{Sl1poj5000000000000000000935deU@3;+NC0000000000007)c0{{|}p#c*G z!AAoC5|g0;69rof0RR${p#c*GyF~*45|g0;69pkP000t`p#c*Gmqh~r5|g0;69M^? z5j__LMo9wz5|g0;69wHx0{{|}p#c*C0+#{r2^R&Z{r~_Hlc50<1;Jqe01}g-0TTtt zM*{#7lc50<1ua7W01}g-0TTt~L<0a4lc50<1@A)x01}g-0TTs{LjwR3lc50<1zF0{{|}p#c*GnMVTv5|g0;69t7u0{{|}p#c*C zQUw0{{|}p#c*Gfx`g+5|g0;69I~oAwL%dFEjuE5|g0;69rww0RR${ zp#c*GY#IOn5|g0;69uzY000n^p#c*GRzm{-5|g0;69s=X000t`p#c*G6Gj675|g0; z69pJF000t`p#c+<$T=7VR%`(P5|g0;69unG0{{|}p#c*G{VM0{{|}p#c*Gb2I<|5|g0;69tI|000t`p#c*GwnYN~5|g0; z69qp=0{{|}p#c*G4hsPQ5|g0;69xAh0RR${p#c*G?mhzm5tE?-69rpB000t`p#c*G z!@~gp5|g0;69xQ40{{|}p#c*CC6_V62^R&WM*{#7lc50<1w2Os01}g-0TTsNMFRj5 zlc50<1%^fg01}g-0TcvC0{{|}p#c*GT1Nu_5|g0;69xB00{{|}p#c*G>qi3s5tE?- z69uth000t`p#c*GCq)AQ5|g0;69q*_0{{|}p#c*G+gAVp5|g0;69s)o0{{|}p#c*G zjzt3i5|g0;69qv@0{{|}p#c*CKa&wU7X?ru000t`p#c*G@<#&z5tE?-69rK_000t` zp#c*CDwDxD7X{=;0{{_|p#c*G3q}I~5|g0;69p1T0{{|}p#c*G?nDCs5|g0;69E{L zaW@wQrbGh(5|g0;69t<_0{{|}p#c*GU`GQ05|g0;69sie0{{|}p#c*G*+T;W5|g0; z69p6-000t`p#c*GYr_Em5|g0;69wr-0{{|}p#c*GLBs(75|g0;69xH40{{|}p#c*G zb4LRJ5|g0;69sfh0{{|}p#c*Gt@!}}5|g0;69pSd0{{|}p#c*GD@X$X5|g0;69u;# z000t`p#c*GOh*F%5|g0;69sHV0{{|}p#c*Gmq-Hu5|g0;69wg-0RR${p#c*CtCJBu z7X^7q0{{|}p#c*GW=I175|g0;69xDi000t`p#c*G9Y+HI5|g0;69qF^000t`p#c*G zB}fAR5|g0;69o-P0{{|}p#c*GU_}D}5|g0;69v9V0{{|}p#c*GX&V3l5|g0;69Fre zp*I%=r$_?;5|g0;69G??0XP>0Y)AtD5|g0;69q(t000t`p#c*GJVF2f5|g0;69FQV zaW@wQq%{Bl5|g0;69rmH0{{|}p#c+<;5--wenSHQ5|g0;69q{|0{{|}p#c*G2}A<` z5|g0;69v@l0ss<|p#c*G?N25|g0; z69tha0ss<|p#c*GpcnuE5|g0;69okv000t`p#c*CPLlyR7X?j80{{|}p#c*GfJg%X z5|g0;69rN<000t`p#c*C;g=x-3Ks<~Ndo{9lc50<1v^Ot01}g-0TTt@Mgss6lc50< z1x7Rg01}g-0TYujJ{Sdsy#N3blc50<1vE(m01}g-0TTh$lW{i}1zto001}g-0TTsM zNdo{9lc50<1=L9c01}g-0TTs7LjwR3lc50<1lW{i}1xQ2#01}g-0TTt!M*{#7lc50<1;$7N01}g-0TTt{Km!00lc50< z1)xX+01}g-0TTr!M*{#7lc50<1%1x}01}g-0TTr<8UO$ilc50<1?5Qt01}g-0TTri zMFRj5lc50<1*bv)01}g-0TTs~NCN;8lc50<0mqYZHx~sgLI3~~lc50<1;axF01}g- z0TTspBmw{ulc50<1=U3Z01}g-0TTryNdo{9lc50<1zr~b01}g-0TTtOMFRj5lc50< z1%M#{01}g-0TTtGNdN#6lc50<1$RUP01}g-0TTfkmodT#7X^((0{{|}p#c*CGn1h= z7X@u50ss<|p#c*GC`JPS5|g0;69rZp000q_p#c*G-A4le5tE?-69w5w0{{|}p#c*G zVl)5%5|g0;69q+B000t`p#c*Gphp7$5|g0;6O#}?7zHvJ000t`p#c*GTpIuY5|g0; z69rI40{{|}p#c*G!bJlB5|g0;69wW$0{{|}p#c*G&no}`5|g0;69v;m0{{|}p#c*G z%2xmY5|g0;69pa_000t`p#c*G(nbRS5|g0;69o)N0{{|}p#c*Gxkm#45|g0;69wu< z0{{|}p#c*G$3+7G5|g0;69r~K0{{_|p#c*Gutx&`5|g0;6PIwz2n3V7KokY;Ndo{9 zlc50<1%W*P01=a+0TTspJOcm{lc50<1=dFc01}g-0TYwxKo|uyL<0a4lc50<1tP=& z01}g-0TTr_Gynh+lc50<1$;#V01}g-0TTgHlL0sv1+7B^01}g-0TTg-lMy->1yn}^ z01}g-0TTtppaB39lc50<1^Pe(01}g-0TX|W%mDx*01E)5`v(930000u0000000015 ztN;Ka01E)r{RRL400013000000001?TLJ(g01E&_S_l9D0000K000000001rM*;vM z01E*8L90RSQZ3jln62LJ#7003M700000007oi001HY z3jkz>1^@s6002w?000000053M0RSQZ3jp|Q2LJ#7001Ze00000008V20RSQZ3jmB> z2LJ#7001li00000005{@0RVp@01E(sBL@Hg0000K000000001?aRC4#01E)@KnDN- z0000e000000002>ivR#301E)Hga!Zr0000q000000002fYykiw01E(Yga-fs0000K z000000002r_y7PR01E(U#RdQX0001F000000000*4FLcm01E&Z;|6~K00000JOBUy z00000)aC*JA^-~j^ppqy000006aWAK00000u@ z0000S000000002T!2tjw01E)1e+K{n0000`000000000FMF0RI01E&#UE(ibs0000$004gg000009HRgLA^-~jD9Q!^00000AOHXW z00000qbvacA^-~jm}v(900000SO5S3000003w;0pA^-~jxUU8P00000C;$Ke00000 zJIw(AA^-~j===u&000008~^|S00000=LG-&A^-~jWI_f200000JOBUy00000ydi%8 z03rYj0H{<3000000F(d#0000005S6b03rYj0K7j30000007w7;000000A_{&03rYj z0Ca%{0000003ZMW0000003+7`03rYj05By70000003-ka000000G8|m03rYj0LYgJ z0000005kvq000000L^y+03rYj033gn2LJ#7000CA00000006;g001HY3jkPZ1^@s6 z001li00000004>p0stZa3ji3M2mk;8003|R00000005{@0stZa3jkOh2mk;8000yK z00000000^q0{|ib3joBR2mk;8001Ze000000071K0RSQZ3jj1D2mk;8004g^00000 z0002~Hv#}601E(+4hR4M0000$000000000}!2tjw01E(M_6Gm}0000S0000000015 z+W-I}01E&pDF*-m0000m000000000jE&%`{01E(k4+j7M0000a000000001DB?15< z01E)TFbDtu0001-00000004ggIHUjoA^-~jP>==y00000Bme*a00000a~A*rA^-~j zOh*O)00000KmY&$00000>$v~`A^-~jn3@Iv00000C;$Ke000007L@=1A^-~jNQedi z00000AOHXW000006dV8mA^-~j>`Vp#00000Q~&?~00000Ml1pVA^?9208|JF00000 z02BZK0000007kw703rYj07Qle0000004M+e0000004w?d03rYj06ZiJ0000002}}S z000000LgFx03rYj0HB2j0000005AXm000000MM%e03rYj0G#Uw0000005|{u00000 z0LTmh03rYj0FdGa004gg002w?00000001_A0RSQZ3jna72LJ#7006uM00000000Na z001HY3jma<1^@s6000yK00000001kO001HY3jjRB1^@s6000;O00000001kT0RSQZ z3jmbm2LJ#7001BW000000068C0RSQZ3jj1%2LJ#7000;O004gg0000%$pHW&01E&B z`Ue020000W000000000t$pQc(01E*0hX?=w0000K000000000B9svL%01E(U00#g7 z0000W000000001=B?15<01E(EF$e$v0000i000000001>mH_}F01E(A;s*c#0000` z0ssI20000u9{_&<0h<0000e000000000@ zBmn>-01E&F0tWy90000;000000002x`~UzV01E)*#s&ZY0000i0000000012c>w?- z01JNr_(lf+00000AOHXW00000dNBY1A^-~juv`WJ00000FaQ7m00000!7Ks*A^-~j z{51#w00000Pyhe`00000-a!HYA^-~jBo_z(00000NB{r;00000>dpcHA^-~j?28Bh z00000WB>pF00000YS;h(A^-~j*d_-6004gg03-ka0000002crQ03rYj05Fya00000 z04M+e000000LNzl03rYj003$R0000003ZMW000000D2t(03rYj0AOMV0000005|{u z000000IxX%03rYj01&eX0000006+i$000000B~3V03rYj0Q4*e0000002BZK004gg z0080D0stZa3jjQU2mk;8001Ze00000005!r0stZa3jhd}2mk;8001xm00000002e~ z0stZa3jloX2LJ#7000yK000000013O001HY3jlOs1^@s6001BW00000008h#0stZa z3jlmp2mk;8000yK00000005zm0RVp@01E&JUIzdG0000W000000001SwE_Sl01E)5 zf(QTr0000W000000000=Z2Re000008~}d+0000007psy03rYj03?A10000005AXm000000E;vL z01^NT0Jzo#000000F(d#000000QPAD03rYj0EAx%0000003ZMW0000009Dxn03rYj z00fT+0000002}}S0000006`f503rYj0DMRW000000CWHV00000055;b0stZa3jkD! z2mk;8001xm00000004#T001HY3jlyP2LJ#7007Vc00000001HB0stZa3jhF#2mk;8 z002|~00000004N_001HY3ji1;2LJ#7000;O00000001m9001HY3jp+71^@s6005i- z00000000(9001HY3jlu*fCc~n0000u000000002MTL1te01E(Uhz0-v0000`00000 z0001AYXATu01E(Mj0OMz0000K000000000LVgUdm01E)rGzS0x0000K000000001F z0RjLb01E)r=?4G+0000W0000000007K>z?E01E(geFgvk004ggFaQ7m00000ad-d# zA^-~j45tPF00000H~;_u00000_M!m*A^-~j{OAV&00000OaK4?000004XOYDA^-~j zAm;`E00000Xc7Pb00000aVh}-A^-~jd}s#%00000FaQ7m00000O6UOqA^-~jY!?Us z00000NB{r;004gg0DX`E03rYj0Pw#C0000006YNz0000003iPY03rYj0EnFk00000 z0H^=}0000006C}v03rYj0L*0w0000002BZK000000Jxk103rYj0LbMB0000004M+e z000000DWly03rYj0EB7=0000006YKy000000GVC^03v??3jnk)2LJ#7002k;00000 z006z80RSQZ3jhq~2LJ#7000~S00000005W~0RSQZ3jnNK2LJ#7001xm00000006Y( z001HY3jmZc2LJ#7003M700000007tS0stZa3jm;)2mk;8000~S00000008cf001HY z3jmbA1^|Bm00017000000000gjQ{{501E)*ga!Zr0000K000000001z-vR(401E&Z zkq7_)0000m000000002KMFIdK01E&-LI?l=0000u000000001CVFCam01E&JTL=IE z00017000000002HZU6ux01E(^ng##>0000e004gg00000@-YJdA^-~j#Hw?-01E)p00000XaE2J00000;NAiNA^?920Q`^$ z0000002}}S000000J&8H03rYj08m&6000000B`^R000000GF)*03rYj0O0-x00000 z07w7;000000LL5x03rYj0H`hq0000004M+e000000EasR03rYj07SG10000007w7; z000000Jqx#03rYj0Ei3-004gg001Na00000003te001HY3jq8@1^@s60049V00000 z006n%0RSQZ3jp*C2mk;8001Na00000005CG0RSQZ3jnZY2LJ#7004vm000000058B z0stZa3jkz|2mk;8001li00000001?30RSQZ3jiRS2LJ#7001xm004gg00003M*;vM z01E)bL01E)P1_%HE0000K00000 z00003ssR8Z01E&_>jwY;0000S0000000015F8}}{01E){TLu6C0000W000000000s zMgRaJ01JNrbYKPm00000AOHXW00000kO2b#A^-~jc%BFV000007ytkO00000a0deb zA^-~j450`B000008~^|S00000@(cq2A^-~jw4w+A00000AOHXW00000Z72c&A^-~j zI5Y?V00000r~m)}00000Ks*BgApjHr$h`~z004gg01yBG0000002sdj03rYj0OSe> z000000N4Nk0000004zHI03rYj01$Ns0000005AXm000000LB*p03rYj0I){}00000 z04x9i000000Q71B03rYj0H8ew0000002BZK000000GQJN03rYj0B|7(0000000aR5 z004gg005vb0stZa3jjzs2mk;8000yK00000004he0RSQZ3jj!Z2LJ#7000;O00000 z001l60RSQZ3jnO92LJ#7005`~000000020Z001HY3jn~v1^@s6002Ay00000001&o z0stZa3jo9+2mk;8000;O00000004C`004g?01E&xTm}FD0000W000000000HBme*+ z01E)5Rt5k70000W000000002Jf&l;`01E(Itp@-A0001_0{{R300015oB{wM01E){ zbqD|e0000W000000002!0000Y01E&-IR*d#0000q000000001KjR62601E(c;0J#I z00000AOHXW00000YPkUbA^-~jH1P)j000008~^|S000000`LF;A^-~jv^@s^00000 zOaK4?000008T0`FA^-~jR3Hcd00000L;wH)00000X=ecdA^-~j;5i2X00000Bme*a z00000DA)i1A^-~jj3x&F00000OaOlX000000ER#T03rYj0H_)V0000002}}S00000 z08bAA03rYj0N_{$000000H6T?000000GAO003rYj00<}u0000009*h70000001Q;M2F z01E(UI0pa#0000O000000001i)c^n@01E)@A_o8f0000O0000000000aRLA$01E&N zVh8{L0000)000000001uHv<4701E(MuLuAD0001B004gg00000Y8V0lA^-~jv@Hk# z00000XaE2J00000ZiD~;A^-~j=(7d@000000096100000&^H1AA^-~jXbuPf00000 zC;$Ke00000kt6{CA^-~jC}al!00000hyVZp00000=Fb2CA^-~jTpb4h00000TmS$7 z00000)=_@~03rYj0H{|80000002BZK00000039;|03rYj09XtN0000005AXm00000 z0HZJf03rYj08m^80000003ZMW000000A5uB03rYj0Nf!60000002BZK000000Mh9K z03rYj0QiXr000000C)fZ000000Dgx703rYj0K|XF2LJ#7006uM000000055}001HY z3joMw1^@s6006`Q00000006)-0stZa3jh!~2mk;8001xn00000006b10stZa3jpMH z2mk;8003|R00000008$E001HY3jh#D1^@s6001BW00000001Uo001HY3jn~01^@s6 z004hr000000000;G6Dc101E&p3k=03rYj0O*ef z0000008{_~000000JBm603rYj0QiIk00000089V?0000007Q8J03rYj0K|0$00000 z02BZK000000Cj}`03rYj07SJ0004gg007(o00000008jX001HY3jn0H1^@s6001Ze z00000008>Q0stZa3jjcf2mk;8003M700000002`Z001HY3jpX=1^@s6001BW00000 z005H~001HY3jjoA1^@s6002+`00000003k50stZa3jlzc2mk;8000~S004gg0002( zod5tL01E*8j0OMz0001x000000001&mH+@E01E&l!v+8V0000O000000002ckpKW9 z01E&lhXw!u0000`0000000027Kmh*nn z000006aWAK00000l4bw^A^-~j+-U{?00000Bme*a00000Nl5|#A^-~j)JX^c00000 zH~;_u00000s&WAUApjHrpuG$L000005C8xG00000Vv7I(A^-~jh`N6U0000002BZK z000000Ip>L03rYj0EmDG000000CWHV0000002U(v03rYj0Dx8o0000003-ka00000 z0Cv3s03rYj0Q7|j0000004M+e000000K~8X03rYj0Nm^c0000003-ka0000007j_* z03rYj0I22$0000003?3^00000005rL0stZa3jl^A^-~jNM;5A004gg02BZK0000006H1~03rYj z03=EV000000CWHV000000D~X`03rYj07&`=0000003ZMW000000N9`b03rYj0PyDr z0000002}}S000000Hmt`03rYj01*5J0000003ZMW000000I3TD03rYj0F0st00000 z04M+e004gg001de0stZa3jico2mk;8000yK00000007%H0RSQZ3jh>t2LJ#7001xm z00000006TF0{|ib3jpk(2mk;8001BW00000003EJ0RSQZ3jpwc2LJ#7001xm00000 z008_M001HY3jmN~1^@s6006`Q00000000nM004g?01E&lWd;BM0000W000000001Y znE?PI01E)XVg~>K0000W000000000GyZ`_q01E)b1qT2C0000$0{{R30000pLIMCH z01E&tK?nc<0000)000000002|B>(^-01E)DXa)cP0001(000000001`fdT*`01E&} zYzTh<00000Bme*a00000A{YSxA^-~jTwezO00000ga7~l000005FG&kA^-~j5Ml=a z00000Pyhe`00000ByIr!A^-~js6Ph)000006aWAK00000=hXlJA^-~j1S1Ck00000 zGynhq00000v5o)$A^-~jbi4)t00000oB@9T000000AeEo03rYj0F*BX0000003ZMW z000000N-r@03rYj08nfO0000003-ka0000004Q7n03rYj04OC00000003ZMW00000 z0LmQz03rYj01#0I0000007L))000000Nt7Z03rYj0LY340000009*h7000000A+u7 z0RSQZ3jmCY2LJ#70058*00000008k00{|ib3jh$K2mk;8001xm00000006rt001HY z3jhdL1^@s6004vl000000052v001HY3jky|1^@s60049V00000001;~001HY3jnmE z1^@s6000yK000000050=0stZa3jlu<0h<0000W00000000235CH%p01E) z0RR910001h=l}pB01E)p#lIR01E*0TnGRF004ggEC2ui z00000x&{CMA^-~joI?fx00000Bme*a00000Cous4A^-~jR1pUN00000Bme*a00000 zF7p8ZA^-~j03Qed00000Q~&?~000004P*iUA^-~jxLXJS00000TmS$700000(slp< zA^-~jl%)m$00000TmS$7004gg0IZ1u03rYj0F-VB000000KfnM000000Jf9>03rYj z04RtC0000003ZMW0000003Tlh03rYj0MJ?p0000002}}S0000008R%103rYj07O&= z0000002lxO000000HG%W03rYj09Y~z000000Js1E000000KX>z03v??3jjC;2LJ#7 z002k;00000005kd0stZa3jkDa2mk;8001-q00000004c*001HY3jo9u2LJ#7002+| z00000004Ye0stZa3ji=#2mk;8002+`00000003m70stZa3jh>e2mk;8003|R00000 z004|p001HY3jhR!1^|Bm0000S000000002vdI10;01E)%^!00000KmY&$00000tEK_~A^-~jIC=;G00000 zC;$Ke00000tpI-j03rYj0N6MN0000002lxO000000QhzQ03rYj0Pv*-0000004M+e z00000022QI03rYj08GdR00000089V?000000E6%W03rYj0Nfo20000004M+e00000 z0O)iA03rYj09a=T0000008{_~000000P;uy03rYj0LXuK2LJ#7001li00000001R+ z0RSQZ3jp+o2LJ#7003+N00000006#n0stZa3jnlb2mk;8000;O000000072G0RSQZ z3jkCe2LJ#7001Ze00000003+r001HY3jlyp1^@s6002Y)00000003f|001HY3jplH z1^@s6004g+000000001a6aoMu01E(IDhL1o0001d000000002HH2?r201E&(a0UPX z0000a000000001bK>+|F01E)A^-~j07C}=00000C;$Ke00000dk+EtA^-~j zbSDS^00000C;$Ke00000SC{|*A^-~jn281e000006aWAK000000rLO=A^-~jh{6T{ z00000H~;_u00000L>>bGA^-~jB&P@f000007ytkO00000=k);qA^-~j=pcUx00000 z03ZMW000000G5CO03rYj0K}*V0000007L))000000B8;X03rYj0LVlJ0000003ZMW z0000006_!*03rYj02n_80000004M+e000000Gw<903rYj0Kk|A000000Nelo00000 z0Cold03rYj07OFu00000062dD00000005!30stZa3jjoe2mk;8001Ze00000003!m z0RSNY6aY}Y3;+NC001}u00000002J-0{|ib3jipg2mk;8001BW0000000573001HY z3jpw;1^@s6002Y)00000008)N0stZa3jjD~2mk;8004{t00000004ig$^rl)01E(A zcL)Fg0000S0RR910000DhXMd101E(UJqQ2*0000S000000000I`vCwV01E)<=m!7* z0000K000000001VCISE=01E(M0tf&A0000u000000000A-2wn201E(gkO%+(0000` z000000000}&j0`-01JNrfE)(^00000+yDRo00000SM~t_A^-~jDCGwL00000AOHXW z000009>M?sA^-~jOb-VD00000EC2ui00000K0p8fA^-~j6nX{#00000gaQBn00000 zn|}ZRA^-~j^soj100000zyJUM00000^MnEbA^-~jL~RHF004gg03-ka0000009<1M z03rYj0HA*d0000008jt`0000003dP#03rYj0LWqp0000009*h70000002-1303rYj z0CeF80000005AXm000000OITc03rYj000~a000000Gt2-0000002*Zg03rYj09a`T z0000006YKy004gg003cB001HY3jmab1^@s6001Ze000000005t0stZa3jpMi2mk;8 z007_s000000075z001HY3jm~~1^@s6003M700000001c^0{|ib3jn~W2mk;8001Ze z000000053I0stZa3joYF2mk;8002Y)000000005u004g?01E&NF9!es0000S00000 z0001hD*ynY02l!92n_%L00008000000000-mjVDH01E&hbqD|e0000S000000002d zTmb+g01E&peFp#l000220000000005b^!n)01E)rhX()v0000W000000001<6aoMu z01E)vDhPi700000Pyhe`00000)B6DcA^-~j{OAV&00000OaK4?00000Gsgk|A^-~j zoQDVi00000C;$Ke0000055NKdA^-~jtcC~x000007ytkO00000IPm}gA^-~jbUp_F z00000SO5S300000T|xr@A^-~j(6k5u000006aaq!00000055|903rYj0BEWQ00000 z0Hgo_000000KWME03rYj0L;Y(0000007L))000000J|{(03rYj0HkXN0000008{_~ z000000Iqrh03rYj05oa{0000005AXm000000It3Q03rYj0BnW`0000006+i$00000 z0ONlE0RSQZ3jkP12LJ#7000yK00000006NP0stZa3jjbV2mk;8001Na000000087p z0RSQZ3jpkR2LJ#7001xm00000002Gj0RSQZ3jlx}2mk;8002k;00000002}@0stZa z3ji!s2mk;8002+|000000059`001HY3jlw>Y6buR0000K000000000c>j3~F01E(+ z83+IX0001-000000000|Z2$lw01E&pYz6=T0000e000000001XaE2r01E(^j|Kn$0000`0000000022%>e)+01E&Z{RaR5004gg zFaQ7m00000uNVOUA^-~j;9my-00000JOBUy00000IGq3hA^-~jbc_Z700000hyVZp z00000I1&H=A^-~j2xbNV00000AOHXW00000QZ)bo5&#PTNZADd000003qX+;10000e000000001qHUR)401E(6YzF`U00017004gg000009$f+e zA^-~jbR`G?00000Pyhe`00000ZMFdbA^-~jknjfp000007ytkO00000##sOWA^-~j z@MH!600000Gynhq00000c5MIvA^-~j0Bi;T00000C;$Ke00000WGnyxA^-~jkZT42 z00000Bme*a000006LEh403rYj061<20000005kvq0000008;n>03rYj0K7s600000 z07L))000000F4;}03rYj0Mz#f0000004x9i000000OYj*03rYj03ZPe0000003ZMW z000000I+rf03rYj090uR0000004M+e000000IL5103rYj0HA;A2LJ#7001-q00000 z006RI001HY3jnN%1^@s6000yK00000008bC0stZa3jnzJ2LJ#7002k;00000000KD z0stZa3jkz+2mk;8004jh00000008Wz0stZa3jnxz2mk;8000yK00000003v_0stZa z3jhd+2mk;8004g+000000001XNC5yM01E)Db_W0e0000S00000000261pxpe01E)H zu?PSF0000q000000000td;tI=01E)To(BK`0000e000000001!Yykiw01E&Vg9iWr z000130RR910002abpik)01E(2HV6O!0000e00000004gg0b&9GA^-~jWLpRT00000 z8~^|S00000WM}{YA^-~jAZi8x00000H~;_u00000peXb(0000002BZK000000Q{=}03rYj0D$}k0000008{}0000000NASn z03rYj04#tA0000003-ka000000G?z503rYj02EvZ000000N?-s0000005lu}03rYj z0IZ=10000004M+e000000RMae03rYj00f^0004gg008s=00000007i80RSQZ3jnNa z2LJ#7000~S00000000bD0stZa3jh!!2mk;8001Na00000001?{001HY3jlZ#2LJ#7 z005K$00000008<1001HY3jn}F1^@s6001BW00000002>9001HY3jj!H1^@s6000yK z004gg00016TmS$f01E(=Wd;BM0000W000000001uw*dem01E)@@CN_@0000m00000 z0001pE&%`{01E(sY6k!S0000K000000002c-~j+401E(M69@nR0001Z000000002& zbpZe(01E&(L0000W0000000029SO9+jA^-~joMZ+700000AOHXW00000@5ulF zA^-~jSg{BI00000Gynhq00000s>%QWA^-~j2(1PH00000FaQ7m00000whjRRApjHr zAifL$000007ytkO00000Kb!ynA^-~jti%QY00000OaK4?00000ipK!}A^-~jJcfS< z000000Av9G000000BuPC03rYj0K|X>0000003ZMW000000A|tw03rYj0Q3b200000 z0H^=}000000Oe@`03rYj09b+t0000009*h7000000Cpz=03rYj02DF^0000007L)) z0000002?X-03rYj0DuPw0000007QQP00000004_i0stZa3jpL!2mk;8001}u00000 z001q_001HY3jin@2LJ#7001xm00000008B`001HY3jhEO2LJ#7001}u00000007Wh z0stZa3jiou2mk;8000~S00000006u(001HY3jp9;1^@s6000yK00000004izr2qgT z01E&}$_4-c0001J5&!@I0000XKm!0F01W`Rh6w-w0000W000000000ye*pj@01E)@ zr3U~20001N000000000xQ33!W01E&BR|o(A0001}000000002^+5rF}01E&p3#g03rYj0Q`Ig000000C)fZ0000004q%a03rYj0E~DC0000002}}S000000JXvb z03rYj004&w0000002}}S000000Pf8K03rYj0Q8Co0000003ZMW00000051su03rYj z02D+90000003ZMW004gg007G<001HY3ji=!1^@s6000~S00000000pz001HY3jmN@ z1^@s6001BW00000002(`001HY3jlCA1^@s6001Ze00000003Oq0stZa3jhp)2mk;8 z000yK00000008MC0stZa3jk~{2mk;8001Ze00000005AK0RVp@01E)XO$Pt~0000K z0000000005)dBz_01E&ZjtBq%0000S0000000011H~;`501E(was~hZ0000W00000 z0002TX#fBs01E(&jRpV!0002w000000001vwc00000Bme*a00000a})yrA^-~j7@Y_J00000 zZ~y=R00000kc|QWA^-~juy600017000000000M`2heT01E(EBM1Nh0000e00000 z000064gdfm01E)LWd;BM0000W000000002OwE+Mk01E(g@CN_@0000S004gg00000 zT>$|AA^-~jY)A(H00000EC2ui00000U$p=LA^-~jXaNTR00000FaQ7m00000;r9Xn zA^-~jSdRz*000009033T00000PUZmsA^-~jn6U={00000Bme*a00000hPnU%A^-~j z)S3nW000006aWAK00000tqXqu03rYj0CYqK0000003ZMW000000O|+;03rYj0K8-d z0000003ZMW000000NmjL03rYj0Av&h0000007L))0000004F^G03rYj061?400000 z03-ka0000000K$?03rYj00@Bw0000002}}S0000007U!%03rYj0JMKY2LJ#7000;O z00000004e*0stZa3jlax2mk;8000;O00000002El000sI3jmn&1poj50086y00000 z001~A0RSQZ3jibq2LJ#7000yK00000006!K001HY3jhc?1^@s6001-q000000030D z0stZa3joY)2mk;8004hn00000000179{>QM01W^zhY0`x00001000000000A2LJ#f z01E)%Lk0i<0000q000000000Ey#fFt01E)jg$Muu0000e000000002tB?15<01E(| zF$e$v0000u000000000+F#rG}01E(cTm}FD0000W00000004ggGa3Q_A^-~jO!x-? z00000EC2ui00000ML+`pApi{k*oFxJ000009RL6T00000nOOh;A^-~j+++p-00000 z6aWAK00000nvVhiA^-~j{BZ~X00000Q~&?~00000)`$QAA^-~j;J5|=00000Bme*a z00000t`-9TA^?920Ia150000002lxO000000PuJL03rYj0BD;B0000003ZMW00000 z06TI503rYj03>4w0000006+o&00000010pa03rYj06?4u0000002BZK000000GD_H z03rYj0N6$c0000003ZMW0000001IXS03rYj0F-G4004gg002k;00000000v8001HY z3jn-82LJ#7001xm00000006gb0RSQZ3jnBu2LJ#7001xm00000008XM0RSQZ3jq8E z2mk;8000;O00000002$n0stZa3jnl~2mk;8000;O00000004j<0RSQZ3jmm62LJ#7 z000yK004gg0002JRssMb01E(sSqK0C0000K0000000023Q2_uV01E){c?SRh0000y z000000002vZUF!y01E&Ng$Dot0000e000000001*9{>QM01W^*hY0`x0000100000 z00004y8!?q01E*0@&^C_0000e000000002u69In!A^-~jyj=$X00000EC2ui00000 zUCICeA^-~j%&Z0g00000JOBUy00000-U9;wA^-~jB%lZY00000EC2ui00000?VSMt zA^-~joMs0A000006aWAK00000m)ZaTA^-~jkhTT@000006aWAK000000}0000003ZMW000000347403rYj08nxW00000 z0Du4h000000Mq^f03rYj01PDv00000033e+00000004Rb001HY3jjPg1^@s6001Ze z000000030000O000000000JK>+|F01E){ z8V3LX0000$000000000u1^@se01JNr976^G00000AOHXW00000ZfpVoA^-~jlr9JW z000006aWAK00000E@uG%A^-~jaDfK^000007ytkO00000=bi!pA^-~jh;|4700000 z8~^|S00000ZQTI?A^-~jd<_Ty00000WB>pF00000M+yS~A^-~j)S(Cf004gg03-ka z000000PG0?03rYj0Nhmv0000003-ka000000IT8x03rYj063Hg000000DJ%d00000 z01zJl03rYj0Ps@=0000007L))0000001*=a03rYj0DMIT0000003ZMW0000004q2F z03rYj0Bmgs0000005AXm004gg000(d0RSQZ3jq9p2LJ#7003A300000003mk0stZa z3jpMH2mk;8003|R00000005G%0RSQZ3jkc~2LJ#7000~S00000004#R001HY3jiQD z2LJ#7000;O000000049a001HY3jj1i1^@s6001xm00000005g|0RVp@01E(YGY0?w z0000K000000002+i2wj101E*4xds3L000170000000003(E$J=01E(^0tf&A0000y z000000000FUH||h01E(si3R`w0000O000000000OkpTcA01E&N;RgT!0000u00000 z0001?O922P01E(o9|wN`000006aWAK00000xHkg;A^-~jV6O-O00000U;qFB00000 zbL;^CA^-~jWE%(o00000oB#j-00000(J=!6A^-~jtgHwC000007ytkO00000<~slY zA^-~jtaSze000007ytkO000004fg;5A^-~jI6(&h00000oB)3S0000007*Oo03rYj z08Bmz0000004x9i0000006iQ403rYj0PIW#0000008{_~0000002bQ-03rYj0Jtaz z000000B`^R000000K3-#03rYj0H6p60000009*h7000000HVwQ03rYj09YCa00000 z04M+e000000CIoK0stZa3jp|u2mk;8004LZ00000005s10RSQZ3jmPf1^@s6002w? z00000005DB001HY3jo-41^@s6001xm00000005LF0RSQZ3joYy2LJ#7000;O00000 z006H*0stZa3jpjD2mk;8000yK00000008KA0RSQZ3jlwti3b1x0002s000000001T z(*Xb?01E)j1_%HE0000m000000002ve*gd?01E)5eFgvk0000K000000000>%>n=- z01E(!iU31C000007y$qP00000F)jiCA^-~jbTtb%f01E)v{{{d60000)000000000PEdu}|01E)Hs|Wx90000O000000001f=>Y&D z01E(&&<6kj0000e000000002xy#W9s01E(I^alU{0001#00000004gg2yXxYA^-~j z{F(*;00000Bmn>b00000OmF}IA^-~jA^-~jWSR&7000008~^|S00000fn5OrA^-~jv@Zt$00000 zNB{r;000008|(rAA^?9204SFT0000002}}S0000005By203rYj00jL90000002BZK z0000002x;R03rYj004&u0000003-ka000000L35x03rYj0EAQq0000003-ka00000 z0IV7T03rYj0El1*0000008{_~0000001iU|03rYj09bGb004gg002M(00000005%D z0stZa3jiE+2mk;8002w?00000005cG0RSQZ3jkR82LJ#7001BW00000005tm0stZa z3jok^2mk;8002|~00000008qG001HY3jm-`1^@s6002Y)00000000Hb0RSQZ3jm1v z2LJ#7008I!004gg000215CQ-q01E(Q@dp3^0000W000000002SIsgD701E){as~hZ z0000m000000002{Spone01E&NS_l9D0000S000000001PECT={01E(^s|Wx90000O z000000002{?EnBG01E&FItKs%000170000000013qXK^bA^-~jAYTXo00000Pyhe` z00000VAKHsA^-~j6bA?Z000006aWAK00000)&>CpA^-~j;8OpF00000 zpgaKpA^-~j9B&5z000008~^|S00000@zDYRA^-~jn1BcX00000OaK4?00000R4V`g zp#T^FzzBa00000005AXm000000LKIa03rYj0C1oP0000008{_~000000OPR$03rYj z07w7_0000003ZMW0000009+>n03rYj0QjT`0000009*h70000002q$~03rYj0H|>Y z0000008{_~000000D4FO01^NT0DSfZ000000DOM}00000004(R000sI3jj3k1poj5 z003kK00000000y}0{|fa4FIr(2><{9000;O000000095M001HY3jiPv2LJ#7007Vc z00000003@G001HY3jlD01^@s6001BW00000006eG001HY3jlQg1^@s6001BW00000 z004hL1pojd01E&NK?VQ-0001#000000002ZM*#pL01E(6b_W0e00017000000000K zhyeg101E(s&j$bi0001l000000000`i~s;401E*8x&{CM0000O000000000EG6Mi2 z01E)vtOx)A0000K000000000M+5!L~01JNrAdd(D000008~^|S00000yF~y1A^-~j zRA2@G00000AOHXW00000tTX@s5&#PTOxFbf00000XaN8K00000`40mCA^-~jsG|q~ z00000U;qFB00000viAW1A^-~j2q6dn000008~^|S00000%qs!_A^-~j=rsrc004gg z02BZK000000B8#V03rYj0BBbS0000002lxO000000IDDX03rYj00d(P0000004M+e z000000HAsR03rYj0Bo!V0000003-ka0000007VJ`03rYj0AyAN000000Av6F00000 z0CcGV03rYj0Gw?H0000009*h7004gg000#{0RSQZ3jmOA2LJ#7001xm00000008DT z0{|ib3jjc{2mk;8001}u00000001)40RSQZ3jm0l2LJ#7001BW00000008~n001HY z3jkCt2LJ#7001-q000000077=001HY3jok-1^@s6001xm00000000|=0RVp@01E)T zu?GME0000W0RR910001a$N~T&01E)bhX?=w0000K000000002Z8Up|#01E)XrU(E4 z0000S000000001$asmJ%01E(QWe5NO0000`000000000-ya50r01E(w^9KL`0000W z000000002IVgLXl01E(IXa;`(00000JOBUy00000Esg;IA^-~jIN%2W00000H~;_u z00000rIi2xA^-~j_=pAo000006aWAK00000(0%{_A^-~jP_PC700000AOHXW00000 zTAKj?A^-~jKw}30000006aWAK00000H6Q>0A^-~jG*kuv00000C;)!|000000GT}i z01^NT0NmvT000000OSAw0000000Aig03rYj08Cg00000003ZMW000000KS(103rYj z093;U0000005kvq000000KD1&03rYj0Awf!0000006+i$000000P*Jn03rYj06>Tc z0000009*h7000000Iq*)0RSQZ3jhFv2LJ#7000yK00000002`z001HY3ji#51^@s6 z001Na00000005>I0stZa3jk0o2mk;8003kF00000004j_0RSQZ3jpk62LJ#7001Ze z00000004@Nra00000Z~y=R00000A65YXA^-~jv?&Jw z00000NB{r;00000Onm_WA^-~j_@4&=00000EC2ui004gg0JA6o03rYj0LWzr00000 z09*h7000000F@d503rYj0GLV!0000006+i$0000003u2LJ#7001Ze00000007^D0stZa z3jmmF2mk;8003M7000000084C0{|ib3jlPg2mpTo0000~000000002oJpceA01E(g zUj_gG0000~000000000E%mDx*01E(+`v(930000K000000000t76Jew01E&#EC>Jq z0000S000000000zmjM7G01E)50000S000000000dCISE=01E&}00;m80000O000000001q zlK}uC01E)5;RgT!0000K000000002fF9QG~01E)XsR#f70000e00000004ggn_&O| zA^-~jkY)w|00000H~;_u00000bmjp7A^-~j02T-U00000Bmn>b00000MFIf;A^-~j zxJU;800000C;$Ke00000_mctuA^-~j9CQc(00000OaK4?00000FiQadA^-~jG#>{5 z00000NB{r;00000M&<$lA^?920Gx*i0000004M+e000000D)2g03rYj0E{LF00000 z089V?0000004^^A03rYj01!9`0000005|{u00000011Qu03rYj0Fbi=0000009XJ3 z000000CR}|03rYj035jn000000Nelo000000QHOk03rYj0O(x@004gg001BW00000 z001dc0RSQZ3jlZ~2LJ#7000yK00000008dj0stZa3jjEk2mk;8003YB00000006yQ z001HY3jhp>1^@s6000yK00000007FE0stZa3jjn~2mk;8000yK00000003vH0stZa z3jm~j2mk;8001-q004gg0002mv;Y7i01E&>0S5p80000e000000001GAOHX&01E(I zR0aS50000e000000001OZvg-z01E&#g$Dot0000m000000002Sr~&{Y01E(=dJA^-~j;7A7m000002m$~A00000|40A;A^-~jNPq?a000008~^|S z00000u08+&A^-~j2zCYl0000000IC200000lLG?)A^-~j5TFPE000006aWAK00000 z`BVV_A^-~jh z03rYj0Mu^>0000002BZK000000D=Yr03rYj0N^DE0000002}}S0000004&Y|03rYj z06dEb0000004M+e000000HcBc03rYj03d$`0000003d$=000000028E0RSQZ3jlxx z2LJ#7002Y)00000006|^001HY3jm}o2LJ#7000;O00000006Jp0stZa3jnl^2mk;8 z002+`00000000UV0{|ib3jp|}2mk;8003M700000003%s001HY3jiRc1^@s6000yK z00000004iv76AYv01E)PUIzdG0000W000000002`vH}1i01E&hf(QTr0000?00000 z0002a7Xtty01E&lrU(E40000O000000002>KLG$D01E(s8V3LX0000e0000000028 zi~s;401E(|x&{CM0001B000000000K76Skx01JNrRHX<200000SO5S300000czpr@ zA^-~jIBN(100000FaQ7m00000+;IT_A^-~j6hQ|700000Bme*a00000YP$gdA^-~j z9PW^A^-~jgro=n00000 z8~^|S00000RWbtrA^-~jG_42#00000JOBUy000005f}gfA^-~j@J9v!000006aWAK z00000x2FOCA^-~jFnkCA000006aWAK00000-0T7XA^-~j{Fev-00000NC1BT00000 z0DKz&03rYj06 zA^-~jl(Gl_00000Bme*a00000GaUl}A^-~jtf2@100000C;$Ke00000`G)`iA^-~j zgtrC&00000JOKaz000004|)OsA^-~j{5S{z000006aWAK004gg0J2j603rYj07Qib z0000003ZMW000000LdBx03rYj05tgr0000003-ka0000008icm03rYj0N{`a00000 z02}}S00000089D+03rYj04PHT0000002BZK000000P#Zr03rYj004gm0000003ZMW z000000Q67+03v??3jmOL2LJ#7001xm00000008~N0stZa3ji>O2mk;8001Ze00000 z001Uc0RSQZ3jn-&2LJ#7001Na00000000W50RSQZ3jk#42LJ#7000~S000000046Y z001HY3jjbr1^@s6001xm00000003~N0stZa3jpMK2mpTo0000~000000001nI|2YA z01E)v69@nR0000K000000002uaRLA$01E(+Vh8{L0000u000000002w3IhNl01E*4 zp$Gr~0001#000000000&1pxpe01E(QQwIP50000O000000002B{r~_X01E&t$OZra z0000W004gg00000<2M5UA^-~j*sury00000fB*mh00000+(G~VA^-~j1bzkp00000 z`~Uy|00000GE)KoA^-~j5LgHR000006aWAK000001|R?cApjHrIJ^u100000C;$Ke z00000G>ZZNA^-~jh;Il000000H~;_u00000uWNq-03rYj06;DX0000006+i$00000 z0EQ9)03rYj00>h01E*8>jwY;0000K00000 z0001K0096Z01E&ZM+X1^0002c0000000000>i_^E01E&(HwOR!0000e00000004gg zuFC)bA^-~j1R4hb00000Bme*a00000B>@8fA^-~jaFz%F00000Bme*a00000*f;_J zA^-~jkPZj{00000KmY&$00000(cS?7A^-~j&<+Ry00000AOQdX00000q@(}==y00000Bme*a00000B{Km4A^?9203d7!0000005AXm0000004q@d03rYj0N{fL z0000003-ka000000LDZD03rYj0Q?sS0000007w7;000000LU!@03rYj0CYA800000 z000310000004PWT03rYj08B>+0000005|{u000000707o03rYj03?eB004gg001BW z00000007wo001HY3joAD1^@s6006K600000000w5001HY3jnlW1^@s6001li00000 z007qu0RSQZ3jlyu2LJ#7003YC00000008MG0stZa3jowH2mk;8001BW00000006O9 z001HY3jidC1^@s6002+`004gg00013vH$=g01E(U00#g70000W000000001RbOHb( z01E)bW(WWP0000S000000000vw*UYl01E&F0tWy90000;000000000a%K!i(01E&( ztp)%90000e000000000ot^xoe01E(QY6t)T00017000000001+F#vx6A^-~juv`WJ z00000FaQ7m00000nQj6AA^-~jAYljq00000C;$Ke000008_fU!A^-~jfEot?00000 zC;$Ke00000b+!NiA^-~jzySvU00000FaQ7m00000O_BfrA^-~jh=&FM000006aWAK z00000lR5wZA^-~jV03>5000000Gt2-000000E;6703rYj005~70000002lxO00000 z03_T103rYj0F)~S0000009*h7000000OC0T03rYj06cC70000003-ka0000004Fs8 z03rYj0Pt)F0000003-ka0000000vV503rYj04#b300000033e+00000008&T0RSQZ z3jjC*2mk;8001Ze00000007q1001HY3jhSO1^@s6000yK00000001j*0stZa3jkCv z2mk;8000;O00000002QG0stZa3jn112LJ#7001}u000000016D001HY3jhFM1^@s6 z001-q00000004j4SOEYc01E&JEe8Mq0000?000000000*7ytkw01E&FNCp4^0000$ z000000000`0096Z01E*4Mh5@@0000W000000001rz5xIt01E&Z^#=d|0000~00000 z00010!vO#y01E&Z_Xhv~0000O000000002EIRF4601JNrAae!)00000SO5S300000 zrDy^GA^-~j3||NU000006aWAK00000Yl8p)A^-~j7_$Zd00000U;qFB00000NM-;4 zA^-~jIE)4W00000C;$Ke00000(;xx>A^-~jX!-{L00000AOHXW00000l^y^9ApjHr zh`bB{004gg04M+e0000005gUF03rYj091hn0000003ZMW0000003bR803rYj08q0C z0000003-ka0000009!2p03rYj0F-S80000005AXm000000F%)G03rYj0303%00000 z05|{u000000I;C}03rYj0F3Ad0000009*h7004gg007Oh001HY3jnA92LJ#7001Ze z00000006sF0RSQZ3jjbU2LJ#7001Na00000001G80stZa3jlmp2mk;8000yK00000 z000yH0stZa3jk1@2mk;80077U00000000nI0stZa3jlB=2mk;8000yK00000006p2 z004g?01E)He+B>m0000?000000000kaRC4#01E)@KnDN-0000e000000000g7XSbv z01E&xM+N`@0000S000000000%t^fcc01E&p{{{d60000S000000000Sy#N3r01E(Q z3I_lH0000q000000001@xBvhm01E(=0|$Qq00000TmS$700000|A^-~jjH3ns z00000C;$Ke00000_iq6JA^-~jWQ7L+000006aWAK00000<6!{+A^-~jI5YpF00000?tuURA^-~j@UjK~000006aWAK004gg0EajO03rYj z0C=zn000000AK(B000000QZOi03rYj0Nl_A0000004M?g00000061v^03rYj0K6** z0000004x9i000000Ij z00000004F~0stZa3jpLh2mk;8001xn00000006Mf0stZa3jjEJ2mk;8001Ze00000 z004?T0{|ib3jkoU2mk;8001-q00000005(I001HY3jk!A1^|Bm0000q000000002& z@d5xM01E*0m0001Z000000002+3IYHk01E)LCI|ok0000S000000002@GXnr3 z01E&htq1@B0000K000000002^SO5Sb01E(|hXw!u0000K000000002y0s#Ob01E)< zN(TS{0002M0swyi00000F@OL7A^-~jw6O*N00000JOKaz00000{Sg8HA^-~jWGDy# z00000lmGw#00000{WJgo5&#PTz~%)200000bO!(c00000&O-tKA^-~jv=;~f00000 zNB{r;00000z3Bk}A^-~jL>LGF00000oB#j-00000$W(s<03rYj0PrCQ0000006+i$ z0000004)Ik03rYj031370000006YKy0000007Fy(03rYj0Hi1f0000002BZK00000 z06b3s03rYj090WH0000003ZMW0000002B!V03rYj04yd50000008jt`000000Pzh0 z03rYj0FZy<1^@s6002Ay00000006)E0stZa3jiFO2mk;8002|~00000004kQ001HY z3jj1=1^@s6001BW00000008|I0stZa3jio92mk;8005K#00000001}0001HY3jlAOHX&01E)9R0aS5 z0001#000000002Qg8={{01E&Bu?GME0000;000000002GBmw{;01E)1FbDtu0000S z0000000019E&~7}01E)fs|Wx90000K000000000Kb^rh(01E&Br3L^10000W00000 z004ggh3)|WA^-~jSRDue000006aWAK00000L1h5|A^-~jSbzrr00000FaQ7m00000 z00000OaK4?00000M*{!=A^-~j+&l&V00000Gynhq000002af>& zA^-~jC|(Bu00000Pyhe`00000P$>ccA^?920GKrh0000004M+e0000002Sx~03rYj z0IatM0000002BZK000000KEtS03rYj0Ju~K0000002lxO0000001|ls03rYj0H~=3 z000000Gt2-0000007Mr603rYj08B>)0000006+i$000000Fa*o03rYj0Hk&Z004gg z001BW00000001jn001HY3jj!n1^@s6001BW00000006P`0RSQZ3jkCf2mk;800001 z00000002`_0RSQZ3ji=B2LJ#7001Na00000004I=001HY3jq9A1^@s6001-q00000 z003P-0{|fa4FLFs2><{9001ih004gg0000<)B*q^01E*8jR*h$0000S000000000L zp8)_O01E)L<_7=(0000e000000001m$^ZZ&01E(+tOfu80000y000000000mKLY?E z01W`hg$V!v0000f000000001ER004Z01E&(SO@?B0000O0000000008%L0D@A^-~j z0Eq|y00000Q~&?~00000+ROm}A^-~j-1`Rr000003;_TD00000k<@03rYj0LU{40000004M+e0000001&hR03rYj02J^C0000002lxO00000 z0J!u403rYj0GJ>M0000008jt`000000QTPi03rYj0E{jN0000009=0n00000008%! z0RSQZ3jh>k2LJ#7000yK00000008%E001HY3jjcC1^@s6000yK00000008GG0stZa z3jnwV2mk;8000yK000000094(001HY3jm121^@s6000;O00000006=K001HY3jpNC z1^@s6001li00000004i7odN(N01E&(b_f6f0000m000000001>X8-^q01E&BY6buR z0000W000000000ZcLD$+01E(oX$SxS00013000000000c836zy01E&dU<0h<0000S000000000IK?49H01JNr5VHsX00000KmY&$ z00000ELj2oA^-~j@L31|000008~^|S00000e@_4aA^-~jbYTVn00000AOHXW00000 zwG;sWA^-~j2wn#O00000AOHXW00000QzQTYA^-~jq*ew100000AOHXW00000>C6HE zA^-~jaEb^3004gg02lxO000000PXGp03rYj0C3<30000003ZMW000000F%N103rYj z0Gtm80000002}}S000000L{q(03rYj03iAY0000004M+e0000004YiV03rYj05Eq4 z0000003-ka0000009ytD03rYj0I*XB0000005AXm004gg007?(0stZa3jj3k2LJ#7 z002k;00000004k(0stZa3jokA2mk;8002M$00000006ZB001HY3jpvq1^@s6000;O z00000000oS0stZa3joA|2mk;8004jh000000068^001HY3jp9?1^@s6001-q00000 z000(g0RVp@01E)9ItKs%0000K000000002V4FLcm01E(A;|2f#0000y000000000J zlK}uC01E&_Us|Wx900013000000002&asU7#01E(Up9TN`0000u z000000002kBLV;-01E)LF9-kt004ggH~;_u00000Ql0_;A^-~jXm$ty00000AOHXW z00000xatA`A^-~jSe6I?000007ytkO00000m@EJQA^-~jv}*00000L;wH)004gg0A)e} z03rYj0GM+J0000009*h70000006?Y!03rYj0L*y^0000002lxO000000L+U503rYj z0Kjhu0000005|{u000000D7nZ03rYj06gXf0000002lxO000000L*j(03rYj07Pd9 z0000002BZK0000000eFT03v??3jp|;1^@s6001Na0000000470001HY3jq9a1^@s6 z000;O000000030>001HY3jmnJ1^@s6000yK00000001K)001EX6aWys3;+NC001Ze z00000008I;0RSQZ3jj=42LJ#7000~S00000008$R0{|ib3jk=N2mpTo0000$00000 z00019+W`O~01E(E30000e000000001^8Ug?! z01E)b_y+(00000a004gg00000;zj@fA^-~jlwbw`00000AOHXW00000Cl><%A^-~j z#H9!T000007ytkO00000L@NORA^-~j$Y=)u00000L;wH)00000HSPfbA^-~jI2{N8 z00000AOHXW00000=9vNjA^-~jgmnl200000FaQ7m00000Na=q903rYj0GO2s00000 z05kvq000000FZeB03rYj0H~V>0000006+i$000000O$b#03rYj0JJ&=0000002}}S z000000O7>}03rYj0G#*-0000004M+e00000084QK03rYj08nBG0000004M+e00000 z0CsKw03rYj07QS91^@s6001BW000000042}0stZa3jjcq2mk;8004Xd00000007lL z0{|fa4FGV42><{90018V00000000;<001HY3jk1D1^@s6001BW00000001980{|fa z6aZ+x3;+NC000;O00000001210RSQZ3jiP$2mk;8004i8000000001;(f|M=01E&V zAqM~e00017000000000FOaTBQ01E |r&c0000;000000002`)d2t^01E(s2M7QF z0000)000000000PCj$T?01E)zqzC{20000S000000002jGXel201E(!300000H~;_u00000Cj|fiA^-~j+&~5Z00000FaQ7m z00000HWmN?A^-~j{6z);00000bN~PV00000*)#$GA^-~j^b80900000Bme*a00000 zo0h1^@s6 z001BW00000007$g0stZa3jlPG2mk;8002+`00000001pC0stZa3jnMP2mk;8001Na z00000004TR0RSQZ3jomP2LJ#7001BW004gg0001kGy(u301E)n3jwY;0000S000000000RMFIdK01E*4K?nc<0000)000000002+ ziva*401E)<+Xnyu0000e0ssI20001eIspJ801E(MZU+DW0000m000000000$CIf!} zA^-~j=&1+*000008~^|S00000j4c2FA^-~j#BBxu00000AOHXW00000j-vnoA^-~j z=#K^f00000Q~&?~00000->w1xA^-~je)+01JNrNc{%@00000 zm;e9(00000m`VTuA^-~jA^-~jbYKPm00000AOHXW00000mmC2AA^-~j=wSx{00000C;$Ke00000 zXTJdeA^-~jl=TMy004gg05AXm000000HQDf03rYj02C1i0000003-ka00000014Ov z03rYj03d=000000089V?000000GAg503rYj00dtL0000009XJ300000055X@03rYj z0HB`+0000008jz|000000Ii$=03rYj0NjfP0000009*h7004gg003I1001HY3jna( z1^@s6001-q000000020`001HY3jlZz2LJ#7001Na00000006F30stZa3jly22mk;8 z001Ze000000050%001HY3jiRA1^@s6003YB00000000j#0RSQZ3jiQ%2LJ#7000~S z00000001k#0RVp@01E){^alU{0000e000000002>rvd;X01E&_d00000TmS$700000upl+ z000008~}d+000000C^e!03rYj03=EV000000CWHV0000005<*t03rYj0F03c00000 z02lxO000000Km5b03rYj0DyxC00000089V?000000BBnQ03rYj0O&3U0000003-ka z0000009H8w01^NT08ju1000000Qdj^000000K9)^0RSQZ3jm0L2LJ#7001Na00000 z000e@0RSQZ3jpll2LJ#7001xm00000006HS001HY3jnZ31^@s6001li00000005ag z001HY3jjcM1^@s6002Y)00000000!q001EX6ad)03;+NC000;O00000006b>0RSQZ z3jlxg7zh9W0001-000000002&FaZD}01E(AYX<-T0000K000000002n5CZ@r01E&J zqzC{20000q000000001wmI44G01E&BbqD|e0000W000000000W3jqKk01E&NR|fz9 z0000e000000002vbO8V&01E)PhX()v004gg8~^|S00000b$J2+A^-~j%s2=D00000 zFaQ7m00000wU7Y-A^-~j=->wc00000Bme*a00000V=DpxA^-~j3Xy z03rYj0MHKy0000002lxO0000003F5x03rYj0BnZ{0000002}}S0000002s9a03rYj z09^0~0000002lxO0000008ADD03v??3jic#1^@s6001BW00000004y>0{|ib3jo}v z2mk;8001xm00000001k|0stZa3jlz01E(A5eEPO0000a00000 z0000V1ONac01E)bJ_Y~)0000~000000002Ap#T6P01E*4#|8iZ0000q000000000J z%mM%+01E&hc?bXi0001h004gg000007y|A^-~jaMcF@00000C;|Wg00000K%svD03rYj01W5{ z000000Du4h0000002yZh03rYj0NiN?0000003-ka0000004g#A03rYj0PL&?00000 z05kvq000000ENE*03rYj0K5zb0000007L))000000J5C|03rYj00>(M0000009*h7 z000000M|JH03rYj0C<0M1^@s6003M700000007U5001HY3jlPw1^@s6000yK00000 z006Es0stZa3jl;V2mk;8001xn00000005++0stZa3jp+72mk;8001li00000007oP z0stZa3jk~v2mk;8002k;00000004-T0RSQZ3jo;R2LJ#7004g!000000002e1^@se z01E(|Lk0i<0000a000000001KiU0s201E(Ax&{CM0000W000000002$%K`u*01E(| zi3k7y000170000000015R{;Pb01E(&dj|jj00017000000001U4FLcm01E&Z;|2f# z0000y00000004gg2NDASA^-~jpqmH)00000TmS$7000003_Ak=A^-~jxU&cV00000 zi~s-t00000`^o?SA^-~j2(1PH00000FaQ7m00000vf}~(A^-~j#D)j}00000C;$Ke z00000c&z~dA^-~j5bOs400000OaK4?00000Zgc?vA^?920MtPT0000003ZMW00000 z0R00103rYj0CYYE0000008jt`000000HQYn03rYj0B8;f0000004M+e0000003$>K z03rYj0JK2}0000007L))000000PLRu03rYj04V1N0000008jt`000000LhO503rYj z0My_I004gg000yK00000004bg000sI3ji>A^-~jJf{c%00000NC5x<00000Aus{}A^-~jTsQ~-00000Pyhe`00000Pfq~= zA^-~jI3ouD00000NB{r;00000NGbpTA^-~j^jHP}00000AOHXW000006pjJ_A^-~j z0C5Na00000Q~&?~00000Sfl{}A^-~jNa=qE0000002}}S000000Db}j03rYj0QBhx z0000003ZMW0000005>fG03rYj03d1y0000002}}S000000LjV%03rYj0Gx;j00000 z0Av6F000000A?8j03rYj0GOr-0000004M+e0000002So|03rYj05FFL0000004RR| z000000090y0{|fa6aY}Z3;+NC000;O00000006ok001HY3joAY1^@s6001lj00000 z005Rs0RSQZ3jkDi2LJ#70068200000008KT0RSQZ3jj#d2LJ#7001Zg000000064C z001HY3ji?u1^@s6002+`00000004hL-v9t201E)TEe8Mq0002c000000002-X955s z01E)XUI+jH0000u000000002>@BsiK01E&F9tZ#c0000q000000001==m7vC01E&( z!3O{U0000K000000000xcmMz*01E(2bOrzb0000K000000001C01E&lW(NQO0000O00000 z0000G0ssIa01E)nItBm$00008000000001JI067701E(UJqUjQ000008~^|S00000 zELi~nA^-~jG<*jD00000L;wH)00000C(r=^A^-~jU;+pL00000H~;_u000001K0000009*h70000000+_l03rYj0N5S} z0000009*h7000000M~v203rYj0BofP000000Du4h00000027e_03rYj04%`<00000 z03ZMW0000007q~F03rYj0B~Ul0000002}}S000000M37#0RSQZ3jloO2LJ#7000;O z000000020;001HY3jn~H1^@s6000yK000000002$001HY3jmBY2LJ#7005K$00000 z0075e001HY3jiRB1^@s6000;O00000006qo0RSQZ3joxI2LJ#7001BW00000000%I z0RSQZ3jlv$>IVP-0000e000000001W5&!@q01E&}W(EKN0000K000000002~E&~7} z01E&BsR#f70000O000000000pAOZj)01E&}`Ue020000W000000000{z5oCs01E)L z3I_lH0000O000000000g`2qkU01E(cn+N~^004ggFaQ7m00000V7~$YA^-~j0CWfd z000008~^|S00000T@(QTA^-~j5c~!J00000AOHXW00000OC|yUA^-~jU;qdJ00000 zH~;_u00000lBog!A^-~j*nJ2900000L;wH)00000C5Zq4A^-~j1i1zP000007ytkO z004gg01xg003rYj07RGw0000009*h7000000DXr703rYj0GMqE0000005kvq00000 z0NC3C03rYj0BnN@0000002BZK0000001qGm03rYj0N`Q=0000003-ka000000M-Qo z03rYj0DMyi0000005kvq0000004Q1j03v??3jh#^1^@s6000;O00000008v_001HY z3jmZr1^@s6001BW00000006Q80{|ib3ji>d2mk;8001Ze000000065^0RSQZ3jlN? z2LJ#7000yK00000004Dg001HY3jhda1^@s6001BW00000004Xu0{|ib3jjQ%2mpTo z0000e000000002f$^ZZ&01E)jtOfu80000y000000001{A^-p)01E)@RR#b60001# z000000001sh5`U001E&VJqQ2*0000e0000000020Q~&@X01E)Tg$4is0000a00000 z0002z)&c+`01E&-fd~Kq0000e004gg00000cOw7*A^-~j6lewj00000Gynhq00000 zGd2PMA^-~jP(26$000007ytkO00000sI&n9A^-~jEbs>a00000FaQ7m00000KraCR zA^-~j@M;GD000007ytkO00000PXGb{A^-~jC?yC000000AOHXW000004h4Sz03rYj z0JJ~`0000004M+e000000C#5s03rYj02C<*0000004x9i000000M>;703rYj03fyo z0000009*h7000000O$+=03rYj0F*=q0000005kvq000000O-2`03rYj0C4jM00000 z02BZK000000DkfS03rYj0C;~t2LJ#7001xm00000005}%001HY3jp9b2LJ#7001Na z00000002V_0{|ib3jhe32mk;8003M700000005xm0RSQZ3jhGI2LJ#7002k;00000 z000{s001HY3jq8|1^@s6008g-00000002u1001HY3jpwB1^@s6004h9000000002; zbN~P%01E(&qXqx~0000e000000002dFaQ7|01E&(ZUz7V0000q000000002=IRF46 z01E)ra|Qqa0000q000000001m@B#oL01E)vm0000S000000002CtN;Ka01E&t z{ssU50001700000004ggyBPuiA^-~j)b|Gf00000EC2ui00000>1+T1A^-~joS6my z00000TmS$700000zW@RNA^-~jpy>wy00000Gynhq00000#xwu`A^-~j1Y-sO00000 zi~s-t000001&06tA^-~j@0001F000000000TdI10;01E)-PZwA^-~jBq4tY0000009*h70000003lWa03rYj z01zVx0000003-ka000000G`MJ03rYj0C@Qa0000006+i$0000001s3F03rYj0Awf! z0000006+i$0000005re>03rYj0Oa)t0000004M+e000000EeOg03rYj08GdR00000 z08D=X00000006}V0RSQZ3jj1x2LJ#7001xo00000003(#001HY3jmN<1^@s6001Ze z00000005Ky001HY3jjDo2LJ#7001BW00000001R7001HY3jmOD1^@s6002Y)00000 z004{i001HY3jplH1^@s6000~S00000004i2JOcnC01E(&u?PSF0000a000000002} zCjkH=01E&(1qT2C0000;000000002_F#!M~01E&V76$+T0000m00000000000000L;wH)00000%^!00000 zKmY&$00000^FIIpA^-~jw0H&p004gg0AK+C0000009j}P03rYj0Qg=A0000002BZK z000000F+b#03rYj0AP9t0000004M+e000000I3N803rYj05n7f0000003ZMW00000 z0NODC03rYj0N`8(0000002BZK000000J$#!03rYj01#XT0000003ZMW004gg004m# z001HY3jio)1^@s6001BW000000010000W z000000000XUjP6i01E(^i3R`w0000K000000001ub^rh(01E&#r3QZh00000U;qFB z000005#0a)A^-~j@GA!Z000001OWg500000n7{x4A^-~j2o47T000007ytkO00000 zGED*iA^-~jq)`X}00000Pyzq|00000cMSjlA^-~j$V3JJ00000AOHXW00000SpWk7 zA^-~jV4es7000007yy3&0000003T=p03rYj0IY!r000000IUE2000000F@X603rYj z08pj~0000002BZK000000Br9703rYj0GvDr0000000aR5000000L^m(03rYj0Q6-D z0000007L))0000008ml@03rYj08oSm0000003ZMW000000FHm70stZa3jm~F2mk;8 z001Ze00000000`60RSQZ3jiGA2LJ#7001}u00000001V#001HY3jnwe2LJ#7000;O z00000003_!0{|ib3ji3Y2mk;8005`}00000002Vu0RSQZ3jiqP2LJ#7001BW00000 z000|K0RSQZ3jlwJA_o8f0000?000000000{ECB!_01E(6X$JrR0000W000000000O zzXAXv01E)*a|i$c0000S000000001Y@c{rL01E&(9tZ#c0000e000000002=0ssIa z01E&FJO%&&0000q000000001dX#oHt01E(MItKs%004ggNB{r;00000luG~rA^-~j z2!jRy00000oB#j-000003gQ9)A^-~jypsq300000d;kCd00000O8)==A^-~j*vJL| z000000096100000p{f7?A^-~ji1`Kp00000Z~_1T00000edhoGA^-~jAT$R600000 z7ytkO004gg066#o03rYj05Bp5000000N?-s000000HHGj03rYj006EC0000009XJ3 z0000007V1=03rYj04z@j0000000;sA000000Pjiy03rYj0F+D!0000008jz|00000 z0ECGF03rYj02tE;0000005AXm000000Cy+>03v??3jlxx2LJ#7002Y)00000008*T z0RSQZ3jmk^2mk;8003+O00000005s>0stZa3jpv~2mk;8001Ze00000005<;0stZa z3jlC<2mk;8003M700000000vZ0stZa3jmxa2mk;8001xm00000007%x0stZa3jpj| z2mpTo0000S000000000sT>t01E&lpa%c|0001t000000000Qq5uFQ01E(^j|Kn$0000` z000000000~SpWbc01E)TWCj2L0000W004gg00000E))O&A^-~joJ9rz00000AOHXW z00000>nQ>NA^-~jz%>W}00000C;$Ke00000!p;ByA^-~jXdDLs000007ytkO00000 zQ`P_gA^-~jkh2B=00000SO5S300000-V^`;A^-~jyhR2800000AOHXW00000C$E11 z03rYj0F?g*0000002BZK000000KWqR03rYj0Hl`)0000003-ka000000RB$_03rYj z05~HD0000007w7;0000004=Kk03rYj0Qma`0000002lxO000000H-|y03rYj0PqwD z0000005AXm000000B&Uh03rYj03d%|2mk;8001Na00000004>L001HY3jpXY2LJ#7 z001Na00000008QL0RSQZ3jnO92LJ#7005`~00000000Sw0stZa3joY)2mk;8003M7 z00000000m(0{|ib3jlDf2mk;80058x00000001-?0{|ib3jjQ(2mk;8004g!00000 z0002?2LJ#f01E)%Lk0i<0000q000000001Z5&{4s01E*4CkBA^-~jXb1=Z00000H~;_u00000@;n0oA^-~ju(5v#0000005kvq000000Pbf1 z03rYj03d1x0000005|{u000000OTeB03rYj0E|}#0000007w7;000000F*!g03rYj z0GNCR000000Av6F0000007NbT03rYj03>b(0000002BZK000000P~^(03rYj0L*v@ z00000091bf00000005m^001HY3joZB1^@s6001li00000002ZW0{|ib3jl1a2mk;8 z002M$00000000aa0stZa3jky-2mk;8002+`00000002RF0stZa3jiQ$2mk;8000yK z00000000{P001HY3jkO|2LJ#7000~U00000004jJ9smF$01E){Qw9J40000)00000 z0002$1^@se01E)XLk0i<0000W000000001n3;+Nk01E(=Wd;BM0000W000000002= z00RIb01E(EmIwd<0000O000000001<-vIz301E(ksRsZ60000`000000000lrvd;X z01JNrxO)fy00000KmY&$00000b0000W000000000`;{gC701E){6bJwS z0000m0000000022%>V!*01E&_8wUUY0000a0RR910000u1p@#g01E)bpa_2e00000 zC;$Ke00000cQ63}A^-~jJZlF4000007ytkO00000K?nf=A^-~jj8q2z00000EC2ui z00000o7?~ZA^-~j5Gn@%00005fck#`0E7Vm0000004qZR03rYj0B}JF0000007L)) z0000004&D=03rYj04RnB0000002Ba!00000007(s001HY3jhQ_1^@s60058x00000 z002dF001HY3jh$K1^@s6003M700000001w}0RSQZ3jpB#2LJ#7002Y)00000006J& z0stZa3jidD2mk;8000~S00000004H}0stZa3jlzF2mk;8002w?00000000bsr2+sV z01E)1c?bXi0000O000000002T4*>uo01E(sS_c3C0000a000000002ZumAue01E(6 z#s&ZY0001N000000001R2mk;g01E&VLA20000008jt`000000CL&@ z03rYj0DQIv0000002BZK000000JE0?03rYj0LbJA000000AK(B000000Ez$s03rYj z0NBX}0000008jt`0001g004M!001HY3jkD{1^@s6000yK00000005;1001HY3jlmW z1^@s6001BW00000001t8001HY3jq9o1^@s6000yK000000052z001HY3jjbr1^@s6 z001xm0000000026001HY3jjp_1^@s6001xm00000004=R0sta^01E&la|i$c0000m z000000002edjJ3;01E&JvIqbG0000q000000001j9{~U&01E)DVg~>K0000K00000 z0002&iU9y301E(^*9QOq0000e0ssI200027#Q*>z01E(crUn220000K000000001* z!vO#y01E&x_Xhxf00000AOHXW00000RYd^+A^-~jEOiF}00000Bme*a000006(0fs zA^-~jKraXY000006aWAK00000k>UUVA^-~jKraUX000007ytkO00000vNr$#A^-~j z6mkXt00000Bme*a00000Q9%FzA^-~jxP1ly00000OaK6X000000Q#H)03rYj01$Qv z0000004M+e0000005=x_03rYj0N7p!0000004x9i000000QH0b03rYj0O+&^00000 z09*h7000000P1T303rYj0DvwC0000002BZK000000KlgJ03rYj0L01E)XqzC{2 z0001g8~^|S000000Sf>CA^-~jbVLRK00000AOHXW00000Ih6tcA^-~j&~yj@00000 zFaQ7m00000_7DI7A^-~j)MW+$00000Gynhq00000knI2fA^-~jtT_h&00000Gynhq z00000Q}hA=A^-~jw2lY>00000Pyhe`0001g07tn103rYj02BlV0000007w7;00000 z035gg03rYj0K@_Z000000MGyc000000N;NC03rYj0BCCn0000005AXm000000G4e4 z03rYj08nfO0000003-ka000000D;v303rYj0Bi>c0000002BZK000000G$E@03rZ? z3jo-k2mk;8000~S00000006Kn0RSQZ3jhph2LJ#7001xm00000007e^001HY3jhdL z1^@s6004vl00000004dn0stZa3jh%82LJ#7002w?00000000(10stZa3jl~f2mk;8 z001-q00000008M30stZa3jj>`2LJ$n0000i000000002kegXg@01E&>ItTy&0000m z000000000yJ^%nB01E)vbp`+c0000m000000000%Y5@Qu01E)Lf(HNq0000m00000 z0001%C<6c@01E(Ast5o80000W000000002cd;tI=01E(^NCyA_0000W0001g00000 z$p-)cA^-~j+++p-000006aWAK00000nT7xWA^-~jlz|2S000007ytkO000008iN1; zA^-~jc(Voo000007ytkO000007b^n*A^-~j45tVH000007ytkO00000pUwgRA^-~j zyo(3`00000FaQ7m00000qP+os03rYj06g>u0000003ZMW0000006aAV03rYj0GO@_ z0000005|{u00000068@P01^NT0JPWz000000F(d#000000926x03rYj08GIK00000 z0C)fZ0000006nMy03rYj0Bmgs0000005AXm000000K0_&03rYj0DQcE2LJ#7006uM z0000000013001HY3jq9d1^@s6000yK00000006H8001HY3jk<61^@s6003M700000 z0012R0RSQZ3jpvW2mk;8000~S00000004-A001HY3jlPG1^@s6006uM000000014x z0RSQZ3jj3v2LJ#7001O^000000000JJOcnB02BbQy$k>V0000O000000001ou>b%f z01E&d00#g70000i000000000DC<6c@01E&Zst5o80000y0000000027hynm201E&l zZU_JX0000;000000000|?EwHH01E(|90&ja0001-000000001gsv!daA^-~jjG_nt z00000C;$Ke00000TEzhXA^-~jc=!hZ00000Bme*a00000bcO%`A^-~jkhTT@00000 z6aWAK00000RYn2;A^-~jbV3LK00000Pyzq|00000mD>OSA^-~jSSbep00000TmS$7 z00000u+jnmA^;120FaFc0000008jt`000000Nxe=03rYj0Pscz0000003ZMW00000 z02C4d03rYj0Nh*$0000004x9i000000FQ(L03rYj0PwO0000000K5bM000000Obz= z03rYj00>0}0000003ZMW000000ANr601^NT0QmR?0001g001ll00000003xP001HY z3jnx?1^@s6000yK00000004hU0RSQZ3jp+X2LJ#7000yK00000006x60stZa3jmy% z2mk;8003+N00000004Zt0stZa3jid92mk;8001Ze00000003$)0{|ib3jo}!2mk;8 z004Xd0001g0002hqyhjU01E(EU000006aWAK00000Y5@QMA^-~jm^lUj00000ga7~l00000301E(^ z0tf&A0000y000000000GPyzrV01E(rY#j&y000006aWAK00000T$}*_A^-~jwB-i? z000006aWAK00000MS=nVA^-~jY-|Vs00000FaQ7m00000KFa_AA^-~j@EQjI00000 zKmY&$00000uXh3fA^-~j)M*F+00000FaQ7m00000ryT+|F01E&x8wUUY0000K0000000023*Z=?`01E*8CIA^-~j=z<0S00000AOHY=0000000BY+03rYj z0I0MG0000004M+e000000I~@I03rYj00317000000B`^R000000K0Gi03rYj00eFZ z0000003ZMW000000BExT03rYj0Fdqn000000E7Vm0000003y8s03rYj0Q~a@00000 z06+i$0000001<9~0stZa3jhpZ2mk;8000yK00000003WO0stZa3jmy32mk;8000~S z00000001fJ0RSQZ3jq8V2mk;8002k;00000000X%001HY3jkbj1^@s6000~S00000 z005kG0stZa3jp9{2mk;8000yK00000003ej0{|ib3jm;hs0aW60000S000000002c z8UO$y01E(^N(KM`0000$000000002bN&o;N01E(|f(8Hp0000?000000000xpaB3P z01E(|=LY})0000q000000002AJp%wD01E)@vIqbG0000a000000002zIsyP901E)f zJqQ2*0001gga7~l00000K|TQhA^-~jz-|Ws00000TmS$700000r9S`wA^-~jPpF00000+qMA!A^-~jsPG2>00000L;wH)000009ykC1A^-~jRB{FY00000 zFaQ7m00000BFO*%A^-~jj28z0000007y$qP0001g0Ge000000Du4h z000000Ni;103rYj07R+=0000006+i$0000006k{{03rYj0EAu$0000002BZK00000 z08xek03rYj08GIL000000K5bM000000L;+=03rYj0FVR-0000005|{u000000OHXA z03rZ?3jll`2LJ#7003M700000003tL001HY3jo|a1^@s6001-q000000028|001HY z3jkDW1^@s6002Ay000000038R0RSQZ3jp|p2LJ#7000yK00000001=v0{|ib3jk1{ z2mk;8001BW000000021D001HY3jl~B2LJ$n0000?000000000z`2YYS01E&BLk9o= z0000e000000000$&jJ7<01E(6j0gY#000000RR910001X9RL6#01E)*PX+)00000i z0RR910002yOaTBQ01E(Icn1Ig0000m000000001rIRF4601E)5as~hZ0000?0001g z00000bAti^A^-~jv^xj@00000FaQ7m00000C|UskA^-~jsC)+i000006aWAK00000 zreXmAA^-~j*nS5900000$N&HU00000E(rkuA^-~j&{PKi00000FaQ7m00000(6;~p zA^-~jtO5rB000007ytkO00000l1%`A03rYj0FZ3#1^@s6 z001-q00000007?;0{|ib3jm;`2mk;8001BW00000006Ux001HY3jm0M1^@s6000yK z00000005yj001HY3jnBa1^@s6001BW00000002q*001HY3jjFA1^@s6000;O00000 z006Ny0stZa3jjPl2mk;8000z!000000000^umS)g01E&dfd~Kq0000W000000000) zfdK#_01E)fsRsZ60001R000000001?4FLcm01E(A;|2f#0000y000000000%IRgM8 z02Ba-y$k>V0000O000000000ieE|R>01E)PNCyA_0000e000000001g_xAt*A^-~j zSV9K?00000WB>pF000007%l(+A^-~j03rYj08sY_0000002BZK z0000007Y^D03rYj0N|Vk0000005|{u0000000%b!03rYj04#9^000000Av6F00000 z0IfFx03rYj07PvE0000004M+e000000Nob=03rYj0Nh6g0001g000yK00000000E+ z0RSQZ3joL(2mk;8005i-00000005OB001HY3jkbH1^@s6001Ze00000006&T0stZa z3jkPJ2mk;8003M700000007~_0RSQZ3joCS2LJ#7002|~00000000lp0stZa3jh?0 z2mk;8001Ze0001g00015zW@Lt01E)T3kLuI000000RR910002o0|Edd01E)*>IVP- z0000K000000002a8v+0#01E(E`3C?10000W000000002(tN{Qb01E(+ZU+DW0000m z000000000h&;kG=01E&tfCvBp0000a000000000G0{{SjA^-~jI6MXb00000bN~PV z00000m|Ow?A^-~jU?m6u000006aWAK00000v339eA^-~j@NxzK00000FaQ7m00000 zqzeK7A^-~j&?X1~000006aWAK00000wIu@pA^-~jJfsK!00000C;$Ke00000Dgyuj zA^-~jtULyP0000005AXm000000G+7-03rYj0L9001HY3jl0b1^@s6001Na00000002D^0stZa3joX~2mk;8002Ay00000 z000AjW&!{r01E&_T?haG0000e000000000W9RL6#01E)1PX+)00000)000000001v z!2$px01E)nbO-b00000mJI^{A^-~j z)S?Ig000008~^|S00000ZxaCkA^-~jlwAh^00000C;$Ke00000beRAEA^-~jpu+|L z000007ytkO00000#svZZA^-~j#3cv-000008~^|S0000096kU5A^-~j$X^Bk0001g z05|{u000000OiyH03rYj09XeI0000002BZK000000C7bC03rYj090TG0000003ZMW z000000DwLN03iSk06>Ka000000BQgL0000005OsP03rYj0Ca~20000002BZK00000 z04E9o03rYj00348000000Av6F0001g006*H001HY3jm~p1^@s6001BW0000000559 z0RSQZ3jhQH2mk;8001-q00000002_a0RSQZ3jovt2mk;8000yK00000000zZ001HY z3jkz`1^@s6004LZ00000002+n0RSQZ3jpjB2mk;8004LZ00000003}#0RSR@01E(! zn+E^@0000W000000000K(g6S>01E)f1PA~C0002+000000002N?EwHH01E&(;0FKz z0000u0000000011)&c+`01E&#jtBq%0001h000000001{L;(OI01E*4bO!(c0000q z000000002uTL1te01E*4hz0A^-~jG*kuv00000C;$Ke00000>M8;NA^-~j;1~!1000006aWB!000000MAVV z03rYj0Q6G`0000005|{u000000E|Ba03iSk04RnD0000003ZMW0000001<%#03rYj z01#{l0000005|{u0000005XCA03rYj004gm0000003ZMW000000Kyvp03rYj0MKU! z0000007L))000000GNM&0RSQZ3jkQA2LJ#7002|~00000002ia001HY3jmN^1^@s6 z001}u00000005ov001HY3jiEG2LJ#7001xm00000008Og0stZa3jlDI2mk;8001Na z00000003y>0RSQZ3jnAT2mk;8001Na00000001460stZa3jh#*SO@?B0000K00000 z0000yhX4Q~01E)5wgvzI0000e000000002qy#fFt01E&dh6n%v0000e000000002G z&jJ7<01E(6jR*h$0000K000000002H;sF3601E)j6bJwS0000a000000001|C z00000TmS$700000Df0pVA^-~jgqa8c000007ytkO0001g07WVR03rYj08D5H00000 z05AXm0000007+5;03rYj0K8WS0000006+i$000000K7c_03rYj0CZml0000008{_~ z000000Oy7Q03rYj00_nh000000K5bM00000071e503rYj0DSfb0000003ZMW00000 z00ZU$03rZ?3jmn02LJ#7001Na00000006`m001HY3jokY1^@s6001BW00000001=E z0RSQZ3jo9m2mk;8000;O00000008zF0{|ib3jk!M2mk;8001-q00000000oB001HY z3jo;21^@s60000100000006Z@0{|ib3jpM_2mkh)D01E*8!Ug~U0000e z0001g00000yA=ZfA^-~jV4es7000007ytkO00000;9~#)A^-~jB#Q000008~^|S00000OLGE$03rYj0Ms!E0000003ZMW000000Kn$~ z03rYj03;U(0000007w7;0000005+=u03rYj0DS8Q0000003ZMW000000Lj(?03rYj z01OBS0000007w7;000000E6oQ03rYj0AM!<0000000;p90000005qfm03rYj0Hj}k z2mk;8001Ze00000008OG001HY3jo+12LJ#7002Y)00000003Ek0stZa3jlCB2mk;8 z000yK00000000})001HY3jmNG2LJ#7004vm00000001j~0stZa3jkzl2mk;8003M7 z00000007qv0stZa3jho!2mk;8003xz000000002T`v3qU01E&-Lk9o=0001Z00000 z0000CI{*M801E(|Zw3GW0001R000000001(U;zLk01E*0G6w(v0000K000000001z zx&Z(p01E(w@dp3^0001R0RR910001YY61Wv01E)jUkCsI0001F000000001gJ9Gj7 zA^-~j;ARK_000006aWAK00000n*Ra-A^-~jFrEki00000FaQ7m00000ov#1@A^-~j zsQ(5400000FaQ7m00000uKfZ4A^-~jNRbEt00000C;$Ke000008*>2wA^-~jpoa$l z000007ytkO00000`ltW^A^;120957%0000008{_~000000LV)K03rYj0F;0R00000 z05AXm000000BFVm03rYj0O001Ze00000 z005l1001HY3jkaM2LJ#7000yK00000008<^001HY3jkz=1^@s6001xm00000003NT z0RSQZ3jnk|2LJ#7002z0nd^4}lep{<5-~DgOClQ(;L{LX%ClQ(;L{LX%NmD|TVe=FPH(yL*F_*AS0V@V{Gi_mTNtdx00TmllbTn*bb8|^kb462ONmDRi zNmDUjQ#D^jMMalE5djyMpgao>26QuRVQ@*8u^0gr7*ljKY-MwENmFx0Q(;L{FkeYi zF<(NmD|X@gV^v26QuRVQ@*8 zu^0gr7*ljKY-MwENmFx0Q(;L{FkeYiF<(3iQ(;L{HD5_nHeXXYUzb4<0T-8`JPQs6bTe&Xa7mZ37y%U+Q*<ClQ(;L{LSIEiL@`Bn0AE^8Q*<+J zVQ@)Pb5mbQm$4WD6_+zb0U8!EUqwYlQ#4;wVM$XpUrAFoUsE|>MVB#!0u~ExQ*<73vE+$G;m>Qa!E^VmytaI6qi)m0T2>3Uqw@NG;C#ab4hJQQ(;L{ zHIrfU6a_h7Oky#Y@dFJO3vE+$G;m>Qa!E^VmytaI6qi)m0T2>3Uqw@NG;C#ab4hJQ zQ(;L{HIrfU6a_h7Oky#YuuTCd0d1E7$N?3XLlFTL3vE+$G;m>Qa!E^VmytaI6qi)m z0T2>3Uqw@NG;C#ab4hJQQ(;L{HIrfU6ahJxErkLa3vE+$G;m>Qa!E^VmytaI6qi)m z0T2u|Uqw@NG;C#ab4hKN4LA!O3vE+$G;m>Qa!E^VmytaI6qi)m0T3lMUqw@NG;C#a zb4hJQQ(;L{HD5(VLor2m0AE^8Q*<+JVQ@)pm$B~*6fAT!Y-MwENo_?_VM$YTG;m>Q za!E^VQ!-ygMMXn0MRovRT251RGi_mTNo|+0?+g?ybTn*bb8|^;MN?r(Q*<Qa!E^V zQ!-zdK@kBL3vE+$G;m>Qa!E^VmytaI6qi)m0T2u|Uqw@NG;C#ab4hKN9G(mq0d1GD z?+g?ebTn*bb8|^;MN?r(Q*<>au;7(F*HPM zZ)Zhva&K};Zf<3AX;4dJdY6A^4H%cw>k1Q>Fckqo7GGL1Gem4}XGv~mZ%J-$WpZgy zOJjPM(0&Y6m(_j@L6;O30h*WBKn)a^;1&T4m)>#$3zvX80Uej>CIW)DlNSLV0hdrN z4Hr0Ya7A_iUs_HwXJs)kM`d(FZ*FvDcyvW}0AE^8GG}EuM@2(#ZggdMbTKklZ*pr> zbaG{3ZAoO8(d!Bne=;~nWpqVya&K}?b7wMmtb7wyF(eHcmp}CjDwoJx z3lWzN69EyI+JX%c7BMk(ZfS9KWnXM>V{1uMbTw^tbY*y#fbR?#5H&|-bVYJmk<;I6qoH70SA{w6ag?4Lor2DbTKnuNpEvsPjF>! zZEs{{mlpL4QI}R10X>&P83CD>C&d9DmybOG771{0MRovRT24!sF%tnW1T%AVa+lHT z3KSDDH%Dc3S7CB)X>MmtWpi_3XJwbs>k1T?SFj2c7yxs0WMy)40A^`yWo~Z(Us_Xi zG-GddbeDl~3=4UiVQFk-WKDBtMN*f{7Xc~-Qd2NrNtdx00TmNdGG9|+NmDalNmDdmQ$t@xMMam8 z7XdB^HDYCFX>LV!mw~_mDwkk*0S1?a)(a1pI~V~eCuC(sb^u>mF*HPMZ)ZehVs&Rp zZf<3AX;4dJdH`QqPE&L! zMVBzf0u>Web45~1Q!rluUs_I6bTe&Xa7mZ37y%WRGerR!95P=;MMXDdZE$R1V`WK7 zQd2fxQ!-ygMMYCJUzc&+3m6tRUqxa?L~u`3UjScPPE%n?Q*%W`F_(b-0V$WIIsp`y zgVYQXmk<;I36~BN0TByJQ*%W{MRr7RPgIvc+zTp~V0Zxr0&sAb;Y|S`m!&!Z6PHxl z0T2^4Uqw@NG;C#ab4gQkMN?r(Q#F%e^ArI&lMwPQm!vuY6_*u|0wR~BIsp`yRN4U$ z6E$B&Q*<0Th>1+5r$0HD5(jbTn*bb8|^kb462ONmDgnlacikm$MiF z6PGhZ0U8uCUqwYlLo!8n0AE^8Q*<+JVQ@*8u^0grmor5H8UZqskM$Lovlsyrmor5H z8Uiw3mq8H$7nh(s3l5j1Isp@xRN4U$4K-gyQ*<2pWpHnDbWL+~b#7%tZ*6dCY-L7aX>4U=O><{OQcIUG-V7xOQ!rms zb45i(myj0$E(SGaZE$pXmyQMMXt4W@&C@MN(5XUjScPPE&La zNtdx00Tq`sMFAQVGG9eSMN>3iQ(;L{HD5_nHeXXYmk<^KFb7j~Gi_mTNtdx00Tm@v zbTn*bb8|^kb462ONmDRiNmDUjQ#D^jMMXtLZe>MMOH)H%mvFueCI?e=Gi_mTNtdx0 z0TnS*bTn*bb8|^kb462ONmDRiNmDUjQ#D^jMMXtLaCAj>0AE^8Q*<+JVQ@)Pb5mbQ zm$4WD6_+zb0U9teUqwYlQ#4;wVM$XpUrAFoUsE|>MMXt7Wo>Y5VPj=UN>WocUs6*! zUqwYlQ#D_gaor0TL_uFgVnsx7PgGw3Us_HvGDUK7Z*omxZeeF-axyhXWpqt*baifJ zLvL+xX>4UiVQFk-WKDBtMN&&sb5c`uGi_mTNtdx00Tq`sMFAESQ!rmsHD5(VMF3w~ zPD@jCGi_mTNtdx00Tq`sMFAQMGG9eSMKxt@aCCW>fc*g}26QuRVQ@*8u^0grFjI6i zY-MwENmFx0Q(;L{FkeYiF<(v(RZBsH|MMXt4W@&C@MN(5XUjScPPE&Lv(RZBsIr5EcP00d1E7$N?1;WpPDPOH(#q0AE^8Q*<+J zVQ@)pm$B~*6ftx(Y-MwENo_?_VM$YTG;m>Qa!E^VQ!-ygMMXDdZE$R1V`WK7Qf*T* zUqwYlQ#D_gaor0T3O8RxVnsx7PgGx*aJ~#A0d1E7$N?1saCDc^>k1S;GBQVHZdY$| zYg2S`WnpbeWJG0VVRdYDMRovRT23-&WjRMhLvL<$Wq5QkHB)7DS8sA_Q*?4=VQood zMQxXGz6>M*ZI=Pa0TmQ(Wkpg;Q#M}!Us_I6bTe&Xa7k^KvF{8N8gw*lWpi^$ZADXI zNmFz*aA9e3NlR^0GG9fPtQ-v|0XLUX@Cy_oGC4ClQ(;L{LSL89A^{`@bTe&Xa7mZ3 z7y%U+Q*<ClQ(;L{LYDzV4HTDf zH~|Wmpgao>26QuRVQ@*8u^0gr7*ljKY-MwENmFx0Q(;L{FkeYiF<(G10Ve@%mjTED6$3>?F_%Dg3nu|>m+=D)77J}tbTn{bX>v(RZI_Wf0Th>1+5r#~ zHD5(jbTn*bb8|^;MN?r(Q#D_czw;IgZBukKaA9e3NlR^)kv#zvmsHvT5E3JJM0E`im#)(R z3YS0*0W-JmGXZA?mwM_73b&m)0a*c;&o%)Imq2m>d6%#~0n!~YF?DWfadl;1Y;R+0 zNmFz+ZFO{Ictt}pMN(5Rmk<&I6n|56HEnftWq3tUR9{k4F<$^*T251RF)?*+X>oOB zUuoOBUuV{1uMbTw^tbY*x&Hg#@nZ)0mkb^u>mPM1ao0w0%; zJpmS%xU35dm+MRn3QaX%UvF@8F*jddX>?_BUuAK1VQh0>c42HuOH*@2MNU&NUokdc zNpxj$UuAK1VQh0>c42HuR9{6!Qd2Zv0AE^DbU9yNW@&C@UukBSAW8u^ElyJ~Uo~G} zZ*X%lHeX3}WpZC-adlyAb6<90Y)MN~b45i(R9{m1+5r#~HD5(jbTn*bb8|^kb462ONmDhKL3Irim#)(R7nh_u0Th>1+5r#^HD5(j zbTn*bb8|^kbC*Go3={!jmyx^yDG)<4MRovRT2518NmFx0L^79g$^j#nZ*>6(mv_bj z76Maqm$AkIBLY)%mu$uYAp}!%MKqUiuM8BHJOK=sFvbEE0#kFBp>+Wy2SYMNQcF`X zm%&>L7$Pw_Urk?fVQyz-UukY>bZKF1X?kTzP)k#DMMZX0UsE+-m%RT08w*o(H)d~g zcVTj5Ntcm50Th=;!wegjuqF!)m+)!~8JDCw0Th>1+5r#~HD5(jbTn*bb8|^kb462O zNmDhKpGyH61TkMzG?!8N3LgqJUqxa?L~u`3Uza~r3?Y}KIsp}zG}QqRmz5&}8kSB0 zDF`)XZE$pXMRu2wBLg0mP5~*G1;GIqm!Lcg4hD2HZDDXpm$4WD6&O==G;C#ab4gQk zMN?r(Q!rmiQ!!stHD8zUgaQ-`LSIEwH(yg>NmD|XL3Irim#)(R7Y9>xGi_mTNtdx0 z0TmcibTn*bb8|^kb462ONmDRiNmDUjQ#D_g@q_{t3qoH-Q#W5zVM$X$m!LZVDhE?^ zGi_mTNtdx00TmcibTn*bb8|^kb462ONmDRiNmDUjQ#D_gFVz7Rm(Ml1+5r$0HD5(jbTn*b zb8|^kb462ONmDgnm(L;rB$uT+0TY*0+5r#~HD5(jbTn*bb8|^kb462ONmDhK0YnWH zmvA@%3YVZf3l5j1Isp@xRN4U$6E$B&Q*<VMN>Cl zQ(;L{LSIEiHDz*Pb7hwy*#Q*`ZBukKaA9e3NlR^)kv#zvmsHvT5E3j0SXIkQ*<Qa!E^VmytaI6qi)m0T2>3Uqw@N zG;C#ab4hJQQ(;L{HJ6`D0T}~rQ#6-R`3fHjHD5(yMMQ8Qa!E^VmytaI6qi)m0T2>3Uqw@NG;C#ab4hJQQ(;L{HJ7U# z4JZROUqzQmBnuw5CQt#*0+-4Q0uYzrjtdEwuE7ismnl;LbC>r>0T-7$?+X%_;EoFr zm!new8kdPs0XGLzbTe&Xa7mZ37y%UNmD|X5jG1Z0YaCtI}AGkZI=Pa0Tq{`Zvj35 zZI=Pa0Tq|TY6}$$ZBukKaA9e3NlR^)kv#zvmsHvT5E3pD3`!d0SpUmQ*<v(RZI_Wf0Th>1+5r#}HD5(jbTn*bb8|^;MN?r(Q#F?n zHVY;KHD8x;Ede*T?^6Lo1ecO&41N06a&%v5bY*g3bZ>G=R9{6>Q#qG>y$pjWbTTkvVPk7wX>N06a&%v5 zbY*g3bZ>G=Q$b%vMN}|fR9{m@UsFY2mLLLc1~p=3XK8MiP@N2Rmjf3IO_xi60wk9q z6#`b53vU7ymsjizW&&_`wSAhafmv`bx zKym>rmtMsI3zwB60~(hQ$pI_3>~;at0hh420t}Zxasd>Vk39hvmz#D0443On3<@MQ zUte!c42HuOH*@2Lor2CQ!-zd?AQW3mybOG7MIj14KWco zUjScPQ*=3BUuJ1;WM64!mmo?3I4(|8FkdxaUvF@8F*aXGbY*g1WpQ<3Y;#|BVQfiD zQ*%W{MO0r?LSIutUsNz(m!Vk#9+z8`3?G-!Z~+kyQ*%W{MRrhBUqo4G@1+5r#~HD5(jbTn*bb8|^k zb462ONmDhKvET|I0X3I!`vD-Ar8)rA+qB;Q=mq6GGCI?e=Gi_mTNtdx00TmcibTn*bb8|^kb462ONmDRi zNmDUjQ#D_g@q_{t3qoH-Q#W5zVM$X$mk@ISDF;(@Gi_mTNtdx00TmcibTn*bb8|^k zb462ONmDRiNmDUjQ#D_g@q_{t3qoH-Q#W5zVM$X$mrvyjC;>v3pmPB_2UB!2ZDDXp zm$4WD6&O==G;C#ab4gQkMN?r(Q!rmiQ!!stHD8zUgaQ-`LSIEwH(yg>NmD|X+?NX_ zm!N6^Aqi7+Q*<+JVQ@*8u^0gr8&h;NY-MwENmFx0Q(;L{FkeYiF<(NmD|X zfdLB^2U9U$Qd2=+myy>27Xd<-L6HF@2UB!2ZDDXpm$4WD6&O==G;C#ab4gQkMN?r( zQ!rmiQ!!stHD8zUgaQ-`LSIEwH(yg>NmD|XvET|I0YaB>`vD*ZbTe&Xa7mZ37y%U+ zQ*<ClQ(;L{LYEClQ(;L{LYHuE z0W1PYUzY(r0u%v4m*J8DDhE?^Gi_mTNtdx00Tm@vbTn*bb8|^kb462ONmE}*Q!!st zHD5(VMMXtoMME)3Q$t^3mjU?!Dwm`>0Th>1+5r$NHD5(jbTn*bb8|^kb462ONmDgn zMMXDXONmD{!MMX4oX?kTvc9%ii3n>Xxb5nFPZDDXpm$4WD z6(3V{G;C#ab4gQkMN?r(Q!rmiQ!!stHD5(VMMXtJF_&@N3mO4JmvH+5AO>_ZZDDXp zm$4WD6&O==G;C#ab4gQkMN?r(Q!rmiQ!!stHD8zUgaQ-`LSIEwH(yg>NmD|Xxh4ZI zAwpk8Q%GL`Us_I6bU0s9VqbJ}Wo1ciQ*<3vE+$G;m>Qa!E^VmytaI6qi)m0T2>3Uqw@NG;C#ab4hJQ zQ(;L{HJ4B23n&3Km!NY2EemZ^bTn{bX>v(RZI_Wf0Th>1+5r#}HD5(jbTn*bb8|^; zMN?r(Q#F^|mkTAAJ#+yJ2yI1UY(-K_ZI|Gq3?>0>mjTED6_>Gn4IY=kQ2`7KZBukK zaA9e3NlR^)kv#zvmsHvT5E3v(RZI_Wf0Th>1+5r#}HD5(jbTn*bb8|^;MN?r( zQ#F@xZUHO;L|>NyJpvQ~HJ9O%0Vn}&m+>Y776?T{F-cQ4Ut*Ub{tO(J&kGv?ZI=Pa z0Tl-}Urk?dbaIyw!T}`#ZI=Pa0Tlsomw{^mECFqo0muOr0z)yEaoh_U9yMP@0AE^8 zQ*=0AQet0pa%E*nZBukKaA9e3NlR^)kv#zvmsHvT5E3NmD|X@e>Ov0YaB> z`vD*ZbTe&Xa7mZ37y%U+Q*<Cl zQ(;L{LYMG70wo?oUqt|4T251RIA2m?UvzS1Wl3#QbTn{bX>v(RZI_Wf0Th>1+5r#} zHD5(jbTn*bb8|^;MN?r(Q#F_I6ALLGHD5&lUs_I6bU0s9VqbJ}Wo1ciQ*<ZtM#PmpY{j3zsnR0Y;bIRss%}MZ^IgmnHH6Q42U@VQ^t%X>@r- zc9(!s3y+uPfC5mLQML>hmquU<6b&?AMMXtZIbUs5UsFPtF%JV|mrnEn7?(NC3k#P( zasd>Vkn#*wmxy@{4iYjjVqs%zUukZ0WpZ?1X>?_Bm(jBeEtgP;0VxPYLor2CQ#W6i zF^>T>myq%dFqg2E3>KF#cLETXEm{MYCUi0|Vqs%zUukZ0WpZ?1X>?_BVRUbDNmD^z zMMYFGUsNz(Q$}Bv4+D&s8F&o~m%}6iGnZF^0$!Ik%mNaZ%;W-Hmuu4u8JFdN0#Xz; zUrk?Sa$$32Utx4#Wo~3eP*XIO5d(?^HDhdLVVAxN3wyT?w*iL*mz}-~FPBe13>%j} z?+Xr>;EoFkmre`~5hG7iFkb*)T2518NmFx0Lo!8DOH*F}Us_XiG;?=ha7j>8bC;l3 z0a^hymk(C~8X7fUMMXt5c42IFWkq&HG<11zWkpg`HD6Okml4AOM7Kb_0R;z_>sA68 zmr!W|1ebus0jHOC`vDo3V3GkrmrfoH50|jX0Wz1+7Y!ztD#iihm#mip7?w@}DYtCM z0TlvLHD6zEaC0#>UrBUja$jX}bzy9CUv^<^NlR06MME-0Qd2Tt0AE^8IbUCAZgpQ{ zcz88mUvF@8F*aXGbY*g1WpQ<3Y;#|BVQfiDQ*%XQY)O|f7XcTSKuQ4^14Tt*m+-p* zA(zl_0T7oSc>xBusLBD61((2Q0|b|_f&mM+&V>OK1h?$Z0l)*7Icot8m+y27ikFIO z0V9`SHVjF(vef~11(&~30UMXFgaT`qK5PS&mmW|I4VNHu0UDQZKMQb|&NBi+mp^I& z441&y0fv|CmkSk_ESCatmwz+@4VSil3lW!h^Z|R96g&Yp3{zoAQ*%W_F-1~KQ)X8ZewLhP)k#DmyNmtECn-PQ!`(e zp;-bRmjQMG8V68IQ*%&LbC;|30V$WHIsp`yRN4U$6E$B&Q*<0VS8DIsq04MME-4Q#M~>mjU?! zDwm`>0Th>1+5r#~HD5(jbTn*bb8|^kb462ONmDhK@e>Ov0X3I!`vD-Ar8)rA+qB;Q=mz|daCkIn>Gi_mT zNtdx00TmcibTn*bb8|^kb462ONmDRiNmDUjQ#D_g@q_{t3qoH-Q#W5zVM$X$m%zmV zLClQ(;L{LYJ|%3l^8E zxdIvnbTe&Xa7mZ37y%U+Q*<Cl zQ(;L{LYMFe4H^b9Us6*+UzhJS0vHHFUqwW4PgGx*QNIBwmw=iA90yZ$Gi_mTNtdx0 z0TmZhbTn*bb8|^kb462ONmE}*Q#D^xIbWB*5e*rauFL`rmmu8>9tLzXZDDXpm$4WD z6&h1?G;C#ab4gQkMN?r(Q!!sjQ!-yuHeW?Wmx1~LAD1s@3=Eg7`T+@-UB3YzmpUX2 z7YS2yQ*<+JVQ@*8u^0gr7gKaJY-MwENmFx0Q(;L{UrAFnUsE|>mk$;VGY3<2Gi_mT zNtdx00TmcibTn*bb8|^kb462ONmDRiNmDUjQ#D_g@q_{t3qoH-Q#W5zVM$X$mudL{ zFb7j~Gi_mTNtdx00TmcibTn*bb8|^kb462ONmDRiNmDUjQ#D_gd^rLp2UB!2ZDDXp zm$4WD6&O==G;C#ab4gQkMN?r(Q!rmiQ!!stHD8zUgaQ-`LSIEwH(yg>NmD|X4=4g6 z0YaBI`T;QqQ*<+JVQ@*8u^0gr8dG#MY-MwENmFx0Q(;L{F<(hjGG9|RUqwZif%*X- zmq7Ll4VSI@0VD@gbTe&Xa7mZ37y%VNQ*<Vr@Us_Xi zG;MEoWl2(Pmx0*}BbU%90RBL`D-Gi_mTNtdx00TmcibTn*b zb8|^kb462ONmDRiNmDUjQ#D_g@q_{t7D8V|Q#W5zVM$X$UqwYVV{Bz%az%ERfxrPO z2~%@ZbTe&Xa7mZ37y%U|Q*<ClQ(;L{LYKKF11}yzUqt|4T251RIA2m?UvzS1Wl3#QbTn{bX>v(RZI_Wf0Th>1 z+5r#}HD5(jbTn*bb8|^;MN?r(Q#F^s#Q{PAZI=Pa0Tl;hY(-K_Q#F@R{{bWcZI=Pa z0Tq{oJpv$?KvDq;3vE+$G;m>Qa!E^VmytaI6qi)m0T2>3Uqw@NG;C#ab4hJQQ(;L{ zHJ7oq3l^8ExdIFeZBukKaA9e3NlR^)kv#zvmsHvT5E3;j4HTEI%mNLU zAl(Za0d1E7$N?3Xg!%y-m+))?43}RW4GEWBzX2bYIwTAi0d1E7$N?3X6c!C33vE+$ zG;m>Qa!E^VmytaI6qi)m0T2>3Uqw@NG;C#ab4hJQQ(;L{HJ54m0WJY;mjTED6_=ov3mXA# zmjTED6$&?BOky#E2B z0hdgT3l|4dbTe&Xa7mZ37y%U+Q*<ClQ(;L{LYHtu4I!0K0yGCxbTe&Xa7mZ37y%U;Q*<)8x84^54hEMfnG6w@^Z`znAW;kzx7_OjA_12lQ49{35NHA!mh=Hl0YjHz z>jGt$?_2=~m*x2iQkT$w3=5YPJOL7yKyeFdm*BJlaF<#t3|ay)MVE0!17??vO$-s2 zqXz>Um%vj3c9#V%3>lV114oxmYXT1gHeXPeG0h7Tmq2m>6qk_l3{{sXMhpxTQ*<&g zVqs%zUukZ0WpZ?1X>?_Bm(jBeEtgP;0VxMXLo!8DQ#Y3o5(5;M0tF3H22fOAQd2mW z5g!c}moL%`MVH-r13#Ba^#KkPQ*<&gVqs%zUukZ0WpZ?1X>?_Bm(jBeR|Q2>UsFMs zpO^z1moR$+Ml^IXFk)e2YhP(@b7gXLUukq@a$$6Da!FG&UrAFnUsGX8Q#D^jMMXta zF<(?LUsFS0Q$m-mJ^>k*?BoJGmvEB{8@KWw13LkiK(+!^mq^bH6_-hk3x=0Xh5{v* zw>u0Rmp~y5X_upK0VJ0|Aq;7k@Z<{$w+J5sRRWj%whSAWUxN$*3mKOX$pI~w zn%oQvm%YLP7;0s4Nn=G$IbUCAZgpQ{cz7`}UteWzVPb4$UukAZSaWhybU9yNX>(s= zXkl_mR9{6mUte}%Y;|QtVnsGzUvznJWkpg;Q({R|Ghb75F)?FkVRBz|a$#w7b4gP( zUqw@4NmDalMMYCXm+`v+A(ucQ3~IN&C<2KEmu~F=3zwil4RyD`=mD+-ml{X{VwW(M z0u+~^unPyb#4ZBg1D73?3l*2FxdIoLur>m1mvATpIhT-`0tT1QgbWFnxhMl7mxjOr zC6{0}3`v$I3>UYVHUb<0w_Z2`*8!J6QUMs3u!I6@w6IALsTZ)0I}Wkpg`IA3j-0a^k%m!vuY6_+wC0}=*PGG9|QUzhNJ4HuVP)Cv}t zP$UB#m!vuY6%<89H)LgVbaHQbNmDjoMMZW}Q#hAEB?3E_qB;Q=mwjjf9G5jw3>*nl zb5nFPZDDXpm$4WD6&O==G;C#ab4gQkMN?r(Q!rmiQ!!stHD8x6)d3fmds_lD26QuR zVQ@*8u^0grE>mUvqb1 zaF=oW3?i4MIsp_2MMZW}R9{m;m(b%1AeW`W3?&CsbTe&Xa7mZ37y%VFQ*<$^uE3J#GSkmk+=J2A59E0!g=T zZvqhmmp9`ILzf*$3=6mRK>?uxm(WWKN0#3U47c8L0&fJjGk5}Y0hcGm0U(#4gAG=f zmqr1mmsw^FG`A~y0*C~ctKI?&mO=qIw<~`FSOk|UnG6q>^Z`znAW;krmkdq=SC<;p z0t}b%T?1B^kf93*mx;LxNS7aV3<{T*5epEPkZ=tSmpP0JQI{2Y100udEdxfEKNABE zmxg%*6qkSy15%e*odOn?MFU5dOIHFEmllfyD3^c`12C7MEdv&pO0xnImsKqTVVBzK z3Kf?Fo&k23fOZTEm*o`;5|?0i3j~*}xeRhCbTTkvVPk7wX>N06a&%v5bY*g3bZ>G= zQ#M~oQ#fB!VM$XsUqwYlMVBzo3LBR|wgOd`|5gJgmniZ9WtR+t3<{SiatxH0Wflx5 zmyZ4mVwa{K42PG{jSCmI9)tp_0hf2m0UeiL6%7rShIj!cmk`7O1()5>3JjO9xB@np zFgF1Px1@&xZ2^~FOadR5)Itmxw<(DNl?j)31`8gROafq+1sx1%mvCwg7MCBN3@DdN zM-7gbfc^>=m(QsJ3YUJR0%4ahPy!a0%|ZhQmu0O27Pn_k0vH09b!H4(m%z{g9k(`< z0@MSyFqi^^0hd?50V216ngXu@mn#qre3!4c3mTV5B?2>-fS3YHm)%$m8kbch4MVr? zwgNu`mkMzL4VUoL3LTfEIsp|2HeXF&aCCB)XD0$Smk><>EtjP_0Th>`hXP2KqB;Q= zmjuoN5d%{*UzhH&0xp-3O#vR4r8)r=motqE7nh&Y0t=TAO#v;Jr8)r=mnB{e5Cb@0 zZI^*~0WJn~Gi_mTNtdx00TmllbTn*bb8|^kb462ONmDUjNmDXkQ#M~kMMaldhXOvGi_mTNtdx00TmllbTn*bb8|^kb462ONmDXkNmDalQ#W5lMMamPhXP6oQ*%>v zGi_mTNtdx00Tnb;bTn*bb8|^kb462ONmDXkNmDalQ#W5lMMXtLMLA<{ZgX^Ubz^i% zQ$$}%Qd2WuQd2{hfkFZkm){Zt3zzWJ3L61!mjTED6_;Fx0zv_8mjTED6_=xj0!RUE zmjTED6_*6g0ucjkQ#O}S-wYI&^4J0um$2FjHKeQa!E^5b5nFPZDDXp zm$4WD6*W_IG;C#ab4gQkMN?r(Q(s9_Fke$NUqwYlMMN@1b^u>mPE&L=aA9e3NlR06 zQ*<+JVQ@*8u^0grEmL$fY-MwENmFx0Q(;L{UrAFiUsE(+MMXtLL@`Bn0AE^8Q*<3Gn%mQ`=m&a!dQJ0Ng4K0_YKLJCRkU0x6mml5>6_?P00aUk7 zf&r5Pmo;($AeWBz3|5wR36_=oc4Of@i ziUC!Z4U7SxmsS@6AeWHx3{{tYjR9Df@KXymx0l-j^bVJB(+fwJu=xxJm)&&(PnW81 z0UMSu14ox1bqofV6cPhsmroM|SeH4u0UDQZEdxfEu(bjdmj{6j8JGTX0~42Ei2@Us zfDi*Pm+;*R7ME8M0~iouVPk7wX>N06a&%v5bY*gv(X$IJmr#fSDIG;*Wkq%XUs_I6 zbTTkvVPk7wX>N06a&%v5bY*gv(X$IJmr#fSDIi5*Q$2C314VFfmmwAdBA1}M3uKp%gaQbcyG;Qfm%w`iFqc4n z0T-8F>jDXviemyNmk`7O6qn|R0vngGgA5?IeC+}T2$z?{3wM`_@B$B)f29IomoQKQ z7MGy23kR1NHwzZGd#wW00=IPa0tN<`<%j|tmQevYxBL?WYyp?v5(5mE@YD(&m!vuY z6_+yY0z{XhIsq4#({=+Km!&!Z6CXn{NmDalOky!bMMY9mFkeqpUjScPPE&L=aA9e3 zNtdBI0T&QNGDUU(Us_I6bTn{bX>v)Ip*jHvGi_mTNtdx00TmcibTn*bb8|^k zb462ONmDXkNmDalQ#W6ip*aH}2UB!2ZDDXpm$4WD6)#hCG;C#ab4gQkMN?r(Q(s9_ zFke$NUqwYlMME)3Qd2o!Oky!bMMY9mFkeqpUzdS+0V@G*mjTED6_+yY0z?6AmjTED z6_?X?0~rBrmytaI6bC~wNm6Z7GnV-Q6t`*>0~`vM&X@xXmq2m>6qi0I4Puw8&H_@G z7fcHhm+w{r6_?<10!WujR{{*Ta3TX(0+)5Q3Qd>3fdM(UuiF8f43|K10Tq{y_6%0H zKP3Y?3YT!x3rUwy9RmcnV@?B=0hcEh100v2y9;Z#PA~%y2Dk7F14#pyN$~;?mk>Jx zQJ1g?3@n#yN&^pQa!Hq=Isq3RLo!KI zGha+%F-1j1Qd2NrPgGw3Us_I6bTe&Xa7mZ37y%WRGerR!3^HFuMMXn0MN&&sIhWDx z3m=!IIsp@xRN4U$DK%e3Q*<Qa!E^5b5nFPZDDXpm$4WD z6+KgQG;C#ab4gQkMN?r(Q(s9_Fke$NUqwYlMME-4Qd2o!Oky!bMMY9mFkeqpUjScP zPE&L=aA9e3NlR^)kv#zv6hkseQf*T+Urb^#MMXtVR9{b2FqiQR3?i3Mkqls$4-g9u zx4%&X-~^WyJOL7yaK8*}x1d)8$pM#VF9H#l@ZSNImz#zS61Q1C0*MWmGRg`Smyc8p zjJMZX0}TV0B7F^Nw@h9GM+KMg3j-v#?@9x-0+-2v1Amv#*9#AqN$~;?mk>JxQJ1g? z3?G*++6)f2P-FvJ0+;tW0TGvgHw+7x>P!p_mpV-g6PIZ`0Y{gmQvw^8z6S##m;c8B z8kg)X3>uf^hyom!AWZ=wm$e2950?uR0}Ge#b^{)lcKZPum!&!Z6PHxl0T2{5Uqw@N zG;C#ab4gQkMN?r(Q#D_g3!?%jm!vuY6qi)m0T2^4Uqw@NG;C#ab4gQkMN?r(Q#F%e z^Ary`Urb^#MMXm~MN&&sL|>PYBLg0nq&fiFA7ni*n43xKCp91&{m!+5k6}K>Z1Em9(L|+3Gmw*rh zMg=rtVRL1d;b;tJm%wKOD3`!70~MFh9}N?i$bbW1m%wKOD3`!70~MFh9}NYU?}Gyi zmzQD#7nkse15lUn3j-vVFQ^Lz z0VKm#O~q&fi1+5r$MHD5(jbTn*bb8|^kb462ONmDgn zMMW_&UqNhaZ)0C>Z)9adGDT8TLSL7WBLg0nq&fim!&!Z6PHxl0T3cJUqw@NG;C#ab4gQkMN?r(Q#D^j zMK@nfUt@1@c}Y%FLYENf3MB|NUqw($Q$}Bx@iPJ#mm_ThAD5&$0Th>1+5r#~HD5(j zbTn*bb8|^kb462ONmDhKt8D{Bm!vuY6qi)m0T2^4Uqw@NG;C#ab4gQkMN?r(Q#F@; z1Pv>fJjwwKmq1Yi50?q`3lW#ciwqXG*^UEc0hdVe0uZ+lI|Kg&myLu2gqPnk3mLcF zKm(5nmnfP8P?wI71ND~#O#wNVr8)r=2t`9NMN&&sIhWDx3m=!IIsp@xRN4U$DK%e3 zQ*<^m!vuY6qi)m0T2^4Uqw@NG;C#ab4gQkMN?r(Q#F^_g#$vDq&fiu#c0hflt3u2eRX9Gf)&>sy3ms+(9 z3b$Nm1CRrkn+OdUm#~BaY?t3M3k|ofYXg-Cw~v$qJ^`1NBLf%lFIsp@xRN4U$6E$B&Q*<n4myy>27YH?9MMQ8T zmmik{E0>RV3lz75kpo2mmwWLH8JAEb109#7Isp}zGA#oVm+i3v2$%GL4HK9AoD2$= zFf{^^mui*+C6{^X3LBS@O#vR4r8)r=mjuoNCYM{(3K*AABm-ub?^^>Bm%7vo83r_P zVQF$nm!UcV7ZgJ>MN&&sG+zK;T251RG;m>Qa!Hq=Isq32Lor2COPBDY10k1?O#vO3 zzaRUvvD4GLp3V(2PXmVv?WM6G{bY*y7b#82LV`~6#bZByA zVPs!zb#!HTUu0C03@a%Ev;Uuth@ZUAs}XmVv?WM6G{ zbY*y7X>MtB0C03@a%Ev;Uu|`CWq4n7a(QfTV`~6#bZByAVPs!zb#!HTUu0!-bboSh zc>r*9XmVv?WM5-%Ze(F}baG#5ZfSG?aCB&LWnpArV{dL`VRUqIUvp)2V{C6@YXER` zXmVv?WM5-%Ze(F}baG#0Wpi|LZ+QT4bZByAVPs!pZ*F8?X>MtB0C03@a%Ev;UvhVB zZ)0m;X>MtB0C03@a%Ev;UvhVBZ+~NJUvgw@Z)0l!aCB&LWnpAra(8TRV{2b@a(QxO zY;R+00C03@a%Ev;UvhVBZ)0m;cXDiRV`~6#bZByAVPs!&cWiHCYhQG7d3SPbZ)0l! zaCB&LWnpAra(8TRV{2b^ZftL3YXER`XmVv?WM6W3Y;R+0Uu0!-baHQb0Do|FXmVv? zWM5-%Ze(9~VQF*#ZDnn9WprP20Ap-#V{2b$Wps3DZDjy(bZByAVPs!pZ*F8?bZKp6 zWOrd{bO3O4XmVv?WM5-%Ze(9`X=iR>Yyfa{XmVv?WM5-%Ze(9#a&KW|V_|c20C03@ za%Ev;Ut@1>WM5=ub98cVd4B+BWps3DZDns}WMO##Y;R*>Y;MtB0CQz+Uw2_?bO3W@ZC`M2b94Z6Wo=(%Wpi|LZ+QT4Z*XO9 z0B~b$Z*ye;cXDZTWdL$zVPpVvZ)0m^bO2&$Ze###X>)XCZUA9pV}E6EbO2*-Zf<2` zbO3W@Ze##+Wo~42Zvb*-V|D;?Wn*?`a&K(_WNdG6Wo`guY++<%asXs(b9rq5WNc$> zZ*ye;WNc+}a&K|~bZKp6a$jR|Wnpw>0CZ_>WpZD0Wps3DZDjy-X>Db4Uu0!$Wprf# zZEIv{asYL6Y-MF|0Dos?baiQD0C03@a%Ev;Utx4~a$jj~X>@aAXk}yoaCB&LWnpArVRUqIUu0!-baHQb0C03@a%Ev;Utx4~a$j?0 zbaP{9Wn^$~Y-wY80C03@a%Ev;Utx4~a$j?0baP{9Wn^$+a(`iM0B2=%aB^vHa%psV z0CQz@aB^vHa%psV0C03@a%Ev;UuR`>b7N>_WN=|}VQm0#bZByAVPs!(Wps06Xk}z@ zVRB(@0AXWeWpi@?Z*XO9WNC5$ZDDL|Z({&va%E)zb8c{QX>N38UvmIsY;SXAWNC5$ zb#82FZfgK?Wq)C0WNC5$a&2U3asX*>V{~i)b7gH`baHujVQF*#cXDZTWp)5*Ze?^| zVRUb90C03@a%Ev;Utx4~a$j?0bYx|8VPj}>bYXO50Bvh#X=ZN#aBpmE0CRM5WpZ+F za$j-)a%Ev`aA9<40CQtuZe(e40Bv(;XJvE%ZF6UHZhvF|ZF6UGV|D;-b7y08YyfX? zbY*gK0B>+~X>McyZ*X*PaC87~aCBjEX8>nqbZ>BU0B2=%Z*X*9Y;SI70Bmz*WorOt zbaHiWV_|e<0AzJ=G5}@RDWB_MnbYpj90CRM5bZ={4asX##bY*UK0BvP$V{mz2a{zN?bY*UK0Ay)$ZeeX@ z0AF8taA#j)VRL0}VQpmqb97;Jc4l(`aCB&LWq)C0Uvp)2ZeeX@Uv6*!aCB&LWnpAr zXJvG5VQpn!Zg2o_a$|IC0B2=%Xm4|LZeeX@0CjF*ZDjy+baG>Gd0%q?UteQ%d2nT4 zVqa`;V*qn>a&BX7Z~$|3a${k0Wn*n{0A_D+Wo`gwXJvGA0A^!sZ*ye;VRUb40BvP$ zV}EdY0A_D;YXD_J0049;k_Z3*0|1&z z0{{R3000310RYNL0{{>J00000005VfmIeR-0suNn0{{R3004ly3;+NC0009700000 z003~&1poj5004-)3;+NC0009700000001C31^@s6005A?3;+NC0009700000001a| z(FFhi0001(y9@vT000031ONa40001x6$ty9@vT000031ONa40002E6$t<{9003wJ4FCWD00097000000020M2><{9 z003|R4FCWD0009700000002mc2><{9006+d3;+NC000320015U0000000000007Yb z3;+NC000350015U00000000000079l3;+NC00032007_v000000001g0001B{|o>C z000011ONcw0{{R3000000002cy9@vT000010RRB_0{{R3000000002E{tN&B00001 z1ONc|0{{R3000000002sy9@vT000010RRC02mk;8000000002!y9@vT000010RRA< z0RR91000000002+y9@w-000000RaF2lLY_(0000000000_`3`M000000RaF28UX+R z00000000000K5zU000000RaF2;06Ey00000000002)qmc000000RaF2@&Nz<00000 z000007`zMs000000RaF2dV000010RR940RR91000000001hy$k>V z000010RR940RR91000000001JybJ&U000010RRB51^@s6000000002+{tN&B00001 z1ONc81^@s6000000001RybJ&U000010RRAe0{{R3000000002UfBp;r000000R#X5 zd;O<000000RaF2@d5w< z0000000000sJsjS000000RaF2@d5w<0000000000(7X%)000000RaF2@d5w<00000 z00000_`D1N00000e*pmi0Pz9<000000000003f{#00000009920Pz9<0000000000 z07$(I00000009920Pz9<00000000000Fb;400000009920MY^g00000000000D%7t z00000009I50MY^g00000000000GPZC00000009920Ad3Ge*gdg00000002P$3;+NC z00035003eG0000000000005x83;+NC00032003C000011ONa%2mk;8000000002UybJ&U000010RRB50{{R3 z000000002sybJ&U000010RRAg0{{R3000000002k{tN&B000011ONbj0{{R300000 z0002!ybJ&U000010RR9)0RR91000000002s{tN&Be*gdg0R#X5LjeE)0000000000 z@VpEF000000RaF2IRyX!00000000002)zsd000000RaF29RUCU000000000082=0a z000000R#X59RUCU00000000005WNfl000000RaF2!2|#R0000000000nEng^00000 z0R#X5f58L*000000000002sXt00000009920MrNo000000000005H7_0000000992 z0RIL600000000000O-9800000009920RIL60000000000006!W00000009920RIL6 z000000000002sau00000009920RIL600000e*gdg001z)3;+NC000320093600000 z00000002n73;+NC00032009360000000000000O84FCWD00032009360000000000 z0020>3;+NC00032007Yl0000000000002O}3;+NC00032002V-0000000000003CM ze+&Qs000010RRA51^@s6000000001(y$k>V000010RRA51^@s6000000001Zy$k>V z000010RRB?0ssI2000000001py$k>V000010RRB00000G0000000026y$k>V00001 z0RRB00000G000000002ky$k>V00001e*pjhsQ>@~5C8xG00000@VyKG000000RaF2 zsQ>@~5C8xG000002)+ye000000RaF2sQ>@~5C8xG00000AifL$000000RaF2sQ>@~ z5C8xG00000IKB)3000000RaF2sQ>@~5C8xG00000P`(TR000000RaF2sQ>@~e-How z000000BF7p00000009920I2`~01yBG000000Fb>5000000099205S;x0000000000 z0HD1L000000099209yb601yBG000000LZ-z000000099209yb601yBG000000D!&> z000000099209yb601yBG00000e*mbx3;+NC00032004yq0000000000006kX3;+NC z00032008s_0000000000007Xv3;+NC00032008g>0000000000007{<3;+NC00032 z002e_0000000000008*C3;+NC00032008*~0000000000000oa3;+NCe*gdh0RRA3 z1^@s6000000000ez6<~W000010RR921ONa4000000000$z6<~W000010RRAu0{{R3 z0000000013z6<~W000010RRBz0{{R3000000001Bz6<~W000010RRC00000000000 z0001Rz6<~W000010RR93e+d8p0000000000c)kn(000000RaF2YX$%S0000000000 zh`tN}000000RaF23I_lH0000000000sQwH9000000R#X5UK4H z000000R#X5kp%z%0000000000!2S#X000000R#X5kp}<(00000e*gdg0OC000011ONbM00000000000001R{|o>C000011ONbW00000000000001Z{|o>C z000011ONbZ00000000000001p{|o>C000011ONb`00000000000001x{|o>C00001 z1ONc200000e*gdg00000nEwm_000000R#X5fdT*k0000000000p#Ka2000000R#X5 zxBvhE0000000000sQ(NA000000R#X5B?bTh0000000000u>TAI000000R#X5(F6bh z0000000000xc>|Q000000R#X5e*pjh0000000000f586?00000009I50GtB=00000 z000000LcFg00000009I50O3;+NC00066002D+0000000000005A}3;+NC00066 z005~50000000000e*gfO!VCZa000021ONai1poj5000000001>!VCZa000021ONbU z1^@s6000000001}!VCZa000021ONaE000000000000026!VCZa000021ONaF00000 z000000002E!VCZa000021ONcc2mk;8000000002M!VCZae*gdg0t5g6;Q;^u00000 z00000$ifT&000000t5g6cL@Lh0000000000(83G=000000t5g61_=NF0000000000 z*uo3|000000t5g6jsgGx0000000000;KB?5000000t5g6(g6Sf0000000000=)w#D z000000t5g6f1m~c00000000000Pw;L0000000IO60Ko(R00000000000QkZT00000 z00IO60EYqq0000000000006@b0000000IO605Svs000000000000_ej0000000IO6 z05k>w000000000001(3r0000000IO60A2|I00000e*gdg000=n3;+NC00066007Je z0000000000001Dv3;+NC000660018X0000000000001b%3;+NC00066006WG00000 z00000001z<3;+NC00066005l@00000000000020{3;+NC00066001=!0000000000 z002P4e+&Qs000021ONc42LJ#7000000000;!wdib000021ONc91ONa4000000000` z!wdib000021ONat0{{R30000000013!wdib000021ONb21^@s6000000001B!wdib z000021ONbA1^@s6000000001J!wdib00002e*^#kcn1Ig0000000000aKj7$00000 z0t5g6g988n0000000000c*6_;000000t5g6tN{Q30000000000fWr&`000000t5g6 zqyzu}0000000000h{Fs3000000t5g67zF?T0000000000ki!fB000000t5g6a0vha ze*gdg000000GPuJ0000000IO601pHJ00000000000HDJR0000000IO60CoWY00000 z000000I0(Z0000000IO602~DX00000000000I0000000000IK&J9000000t5g6cnJUi00000e*gdg06@eH z0000000IO600amC000000000007%3P0000000IO60AmLL000000000008qpX00000 z00IO60FMX&000000000009eEf0000000IO607?k}00000000000AR!n0000000IO6 z05%2y00000000000BFPve*gdg00066004Cd0000000000003~r3;+NC00066001Wk z0000000000004Nz3;+NC00066000080000000000004l*3;+NC00066001Eb00000 z00000004-@3;+NC00066000gE0000000000005B03;+NC00066e*ggY1poj500000 z0001(#0&rc000021ONb~0RR91000000001>#0&rc000021ONa$2LJ#7000000001} z#0&rc000021ONa_2><{90000000026#0&rc000021ONaR0{{R3000000002E#0&rc z000021ONcG2mk;8e*gdg00000z{Csy000000t5g6#RdQX0000000000$ixf)00000 z0t5g6ItTy&0000000000(8LS?000000t5g6Dgpoi0000000000*u)F~000000t5g6 z1O)&90000000000;KU37000000t5g6ng{>@0000000000f9S*v0000000IO60L%ga z00000000000Pw^N0000000IO60M7yd00000000000QkfV0000000IO60Hy)}00000 z00000006}d0000000IO607D1>000000000000_kl0000000IO60O|t(0000000000 z01(9t00000e*gjm003SH0000000000000=p3;+NC00066003eL0000000000001Dx z3;+NC00066002k=0000000000001b(3;+NC00066000jF0000000000001z>3;+NC z00066000jO00000000000020}3;+NC00066007nle*gdg000000000$#S8!d00002 z1ONaj1poj5000000000;#S8!d000021ONby2mk;8000000000`#S8!d000021ONbq z1^@s60000000013#S8!d000021ONa82LJ#7000000001B#S8!d000021ONaG1^@s6 z00000e*gdgXvGWw000000t5g65C8xG0000000000aK#J&000000t5g68wmga00000 z00000c*P6=000000t5g6u?GME0000000000fW-^|000000t5g6`2+v}0000000000 zh{X&5000000t5g6CIbKf0000000000ki`rDe*gdg00IO60ILQ700000000000GP!L z0000000IO601*HH00000000000HDPT0000000IO60L%pd00000000000I0| z000000t5g63u`000000t5g6KL-E+0000000000Fvbi3000000t5g6 z)dm0n0000000000IK~VB00000e*y#m0Q?02000000000006@kJ0000000IO60OSGy z000000000007%9R0000000IO60F?*;000000000008qvZ0000000IO600{#C00000 z0000009eKh0000000IO60M7^j00000000000AR)p0000000IO60AvXOe*gdg00000 z003yl3;+NC00066007hk0000000000003~t3;+NC00066007Vi0000000000004N# z3;+NC00066007#tZ-e000021ONbt0ssI2000000001}#tZ-e000021ONa) z2><{90000000026#tZ-e000021ONaO00000000000002E#tZ-ee*gdg0t5g6-U0vs z0000000000z{U&!000000t5g62MGWG0000000000$i@r+000000t5g6a|Zwb00000 z00000(8de^000000t5g66#xJL0000000000*v1S1000000t5g6v;qJC0000000000 z;KmF9000000t5g6e;fh;00000000000O-aH0000000IO60Qm+000000000000Pw~P z0000000IO60C5Qb00000000000QklX0000000IO60B8vS00000000000074f00000 z00IO603-00000 z000000I3;+NC00066002`70000000000007{}3;+NC ze*gdi1ONb21ONa4000000002!#|!`f000021ONb71ONa4000000002+#|!`f00002 z1ONaw0{{R3000000002^#|!`f000021ONbh0{{R30000000000$P54g000021ONbC z1^@s60000000008$P54g000021ONbpe**vj00000000005XcMw000000t5g6c?AFf z00000000007|09&000000t5g6w*vqG0000000000Ajk{=000000t5g682|tP00000 z00000D98)|000000t5g64+j7M0000000000Fvtu5000000t5g6jRgPz00000e*gdg z0654D0000000IO608a$~000000000006@qL0000000IO602%-Q000000000007%FT z0000000IO609FA2000000000008q#b0000000IO602=@R000000000009eQj00000 z00IO607?b`00000000000AR=re*gdg00066002q?0000000000003yn3;+NC00066 z003qK0000000000003~v3;+NC00066007(p0000000000004N%3;+NC00066006ZG z0000000000004l<3;+NC00066005H)0000000000004-{3;+NC00066e*gfk2LJ#7 z000000001x$P54g000021ONc!1poj5000000001($P54g000021ONay1^@s600000 z0001>$P54g000021ONaW00000000000001}$P54g000021ONaf1ONa40000000026 z$P54g000021ONaX00000e*gdg00000xX26u000000t5g6?EwG)0000000000z{m^$ z000000t5g69smFU0000000000$jA%;000000t5g6N(2A^0000000000(8vq`00000 z0t5g69|r&c0000000000*vJe3000000t5g6rUd{10000000000f8fXr0000000IO6 z09^+F00000000000O-gJ0000000IO608Ik`00000000000Px5R0000000IO60Nnuq z00000000000QkrZ0000000IO60Br>T0000000000007Ah0000000IO60E__u00000 z0000000_wp00000e*gjm000LE0000000000000ol3;+NC00066006HD0000000000 z000=t3;+NC00066002J-0000000000001D#3;+NC00066000#M0000000000001b- z3;+NC00066007AZ0000000000001z_3;+NC00066004mre*gdg000000000u$qWDh z000021ONc61ONa4000000000$$qWDh000021ONbb2mk;8000000000;$qWDh00002 z1ONaZ00000000000000`$qWDh000021ONa+1poj50000000013$qWDh000021ONbG z1ONa400000e*gdgV95*s000000t5g6DhB`n0000000000Xvqu!000000t5g6I0FCx z0000000000aLEh+000000t5g6AOHXW0000000000c*zU^000000t5g6O9lV{00000 z00000fXNI1000000t5g65e5JN0000000000h{+59e*gdg00IO60HOu}0000000000 z0FcQH0000000IO602KoO00000000000GP=P0000000IO60GI&)00000000000HDbX z0000000IO60Fwj&00000000000I10f0000000IO602l)R00000000000I z3;+NC00066001)q0000000000e*gf8$_xMi000021ONb;1ONa4000000001x$_xMi z000021ONcv2LJ#7000000001($_xMi000021ONar2LJ#7000000001>$_xMi00002 z1ONae00000000000001}$_xMi000021ONaI1^@s60000000026$_xMie*gdg0t5g6 zY6AcO0000000000xXKIw000000t5g6e+2*l0000000000z{(5&000000t5g6vjhMD z0000000000$jS@=000000t5g6g$Dot0000000000(8>$|000000t5g6B>(^b00000 z00000*vbq5000000t5g6ejVG*00000000005X=k!000000t5g62Lb>900000000007|aX+000000t5g6^9TR{ z0000000000Aj}K^000000t5g6LjeE)0000000000D9j81000000t5g6vIYPE00000 ze*gdg05Hr90000000IO60D1uc0000000000065GH0000000IO603-we0000000000 z06@$P0000000IO60PhC?000000000007%RX0000000IO604@Lk000000000008q>f z0000000IO60CE8U000000000009ecne*gdg00066003hH0000000000003aj3;+NC z00066002G%0000000000003yr3;+NC00066000pK0000000000003~z3;+NC00066 z001}#0000000000004N*3;+NC000660024z0000000000004l@3;+NC00066e*gfp z2LJ#7000000001p%nSek000021ONaq00000000000001x%nSek000021ONa*0ssI2 z000000001(%nSek000021ONaa2LJ#7000000001>%nSek000021ONar0000000000 z0001}%nSek000021ONaN2mk;8e*gdg00000u*?hq000000t5g6!2tjO0000000000 zxXcUy000000t5g6ng##>0000000000z|0H)000000t5g6ZwCMX0000000000$jl4? z000000t5g6JOKaz0000000000(98?~000000t5g6UIqXF0000000000f7r|n00000 z00IO60IvZ600000000000N~6F0000000IO605Jmq00000000000O-sN0000000IO6 z0Qv_200000000000PxHV0000000IO605Jvt00000000000Qk%d0000000IO60Q?33 z0000000000007Ml00000e*gjm004If0000000000000Qh3;+NC00066001%o00000 z00000000op3;+NC00066000UC0000000000000=x3;+NC00066005Q&0000000000 z001D(3;+NC00066006cI0000000000001b>3;+NC00066001)pe*gdg000000000m z%?tnl000021ONcG2LJ#7000000000u%?tnl000021ONbe0{{R3000000000$%?tnl z000021ONb%1^@s6000000000;%?tnl000021ONcT1^@s6000000000`%?tnl00002 z1ONb|2mk;800000e*gdgSj`Lo000000t5g6It2g#0000000000V9g8w000000t5g6 z*8~6n0000000000Xw3`&000000t5g6-U9#t0000000000aLo(=000000t5g6B?kZi z0000000000c+Cs|000000t5g6bp!wa0000000000fXxg5e*gdg00IO60OA4w00000 z000000Eo>D0000000IO606GBx00000000000FccL0000000IO60AUFL0000000000 z0GQ1T0000000IO605kvq00000000000HDnb0000000IO60CNKX00000000000I1Cj z0000000IO6e*n+}0000000000006Mf3;+NC00066006iG0000000000006kn3;+NC z00066002b-0000000000006+v3;+NC000660021$00000000000079%3;+NC00066 z008F#0000000000007X<3;+NC00066002)000000e*gdg0002k%?tnl000021ONc3 z0RR91000000002s%?tnl000021ONbJ0RR91000000002!%?tnl000021ONbL1^@s6 z000000002+%?tnl000021ONa_0{{R3000000002^%?tnl000021ONc*2mk;800000 z00000f6fd5000000t5g6H2?qr00000000002+j-u000000t5g6`UC&~0000000000 z5Y7w$000000t5g6qXz&000000000007|sj;000000t5g6u>$}A0000000000AkGW` z000000t5g6vjzYF0000000000D9#K300000e*y#m05$*s000000000005HxB00000 z00IO6067Q%0000000000065MJ0000000IO60F49y000000000006@+R0000000IO6 z05<>t000000000007%XZ0000000IO60AT?D000000000008q{h0000000IO605k*u ze*gdg00000003Cd3;+NC00066001)w0000000000003al3;+NC00066005N-00000 z00000003yt3;+NC000660027)0000000000003~#3;+NC00066001}u0000000000 z004N-3;+NC00066004vs0000000000e*gf0&I|wm000021ONaS2LJ#7000000001p z&I|wm000021ONcj2mk;8000000001x&I|wm000021ONa!00000000000001(&I|wm z000021ONbz1poj5000000001>&I|wm000021ONa{2mk;8000000001}&I|wme*gdg z0t5g6_5lC@0000000000u+9ts000000t5g6CIkQg0000000000xXug!000000t5g6 zrv(520000000000z|IT+000000t5g6(gy$l0000000000$j%G^000000t5g6=m7u# z0000000000(9R41000000t5g6e>(sG00000000000NBn90000000IO606YKy00000 z000000N~CH0000000IO605}2w00000000000O-yP0000000IO60Kf(S0000000000 z0PxNX0000000IO60G0><00000000000Qk-f0000000IO60Nnxr00000e*gdg0002b z3;+NC00066006@S0000000000000Qj3;+NC00066004jn0000000000000or3;+NC z00066006560000000000000=z3;+NC00066005o>0000000000001D*3;+NC00066 z002Dz0000000000001b@e+&Qs000021ONcE2mk;8000000000m&kO(n000021ONcv z2mk;8000000000u&kO(n000021ONaH1poj5000000000$&kO(n000021ONa&00000 z000000000;&kO(n000021ONc+0{{R3000000000`&kO(n00002e*^#kUIhRE00000 z00000SkDXq000000t5g6p9cT{0000000000V9yKy000000t5g63pb0000000000c+U&~ z000000t5g6?E?S+e*gdg000000D#X70000000IO60ILT800000000000Eo{F00000 z00IO60162J00000000000FciN0000000IO60Cxib00000000000GQ7V0000000IO6 z0OA1v00000000000HDtd0000000IO606zc#0000000000e*mb@3;+NC000660027$ z0000000000006Mh3;+NC000660009C0000000000006kp3;+NC00066006!L00000 z00000006+x3;+NC000660055%00000000000079(3;+NC00066000~V0000000000 z007X>3;+NCe*gdi1ONcy0{{R3000000002k&kO(n000021ONa70{{R3000000002s z&kO(n000021ONb*1ONa4000000002!&kO(n000021ONaM2mk;8000000002+&kO(n z000021ONb}2mk;8000000002^&kO(n000021ONade+2*l00000000000MHBo00000 z0t5g6HUa&<{900000e*gdgP|*wk000000t5g6@&y0@0000000000SkVjs00000 z0t5g6^#K3?0000000000V9^W!000000t5g6BL@Hg0000000000XweJ+000000t5g6 zx&r_J0000000000aM26^000000t5g6M*si-0000000000c+m_1e*gdg00IO60BHvR z00000000000D#d90000000IO60009300000000000Ep2H0000000IO6089x000000 z000000FcoP0000000IO60P+F=00000000000GQDX0000000IO60Hgu{0000000000 z0HDzf0000000IO6e*hW?0000000000005}b3;+NC00066004;t0000000000006Mj z3;+NC00066005r_0000000000006kr3;+NC000660046V0000000000006+z3;+NC z00066002D&00000000000079*3;+NC00066000#U00000e*gdg0002c(F_0p00002 z1ONb60RR91000000002k(F_0p000021ONbc0ssI2000000002s(F_0p000021ONa? z00000000000002!(F_0p000021ONaa1poj5000000002+(F_0p000021ONb21poj5 z000000002^f6)v8000000t5g6dkFvl00000000000MZNq000000t5g65CH%H00000 z000002+|Ay000000t5g6jR61v00000000005Yh|)000000t5g6*a!ds0000000000 z7}5*?000000t5g6NdN!<0000000000Akqu~00000e*y#m07?J=000000000004UN7 z0000000IO6051ao000000000005H-F0000000IO605b&u0000000000065YN00000 z00IO608$13000000000006@|V0000000IO6015*D000000000007%jd0000000IO6 z0O1D!e*gdg00000002(hL9q ze*gdg0t5g6E(ibs0000000000sL~7o000000t5g6x&i0000000000$kGe|000000t5g6e@+7c00000000000MOD50000000IO60AL6J z00000000000NBzD0000000IO60EPkp00000000000N~OL0000000IO60Hg%~00000 z000000O-;T0000000IO601F8K00000000000PxZb0000000IO6022iO00000e*gdg z008*X3;+NC00066002w?00000000000002f3;+NC00066008U=0000000000000Qn z3;+NC00066001He0000000000000ov3;+NC00066002}30000000000000=%3;+NC z00066002z@0000000000001D<{9000000000;(+mIr00002e*^#kg$V!v z0000000000P}2+m000000t5g6ss#W50000000000Sknvu000000t5g6+y(#u00000 z00000VABi$000000t5g6PXGV_0000000000XwwV;000000t5g6nE?O*0000000000 zaMKI`000000t5g6FaiJoe*gdg000000C>|30000000IO60Kf$R00000000000D#jB z0000000IO60Ne-w00000000000Ep8J0000000IO60Ez?v00000000000FcuR00000 z00IO60L=#g00000000000GQJZ0000000IO603-zf0000000000e*mD<3;+NC00066 z005Z?0000000000005}d3;+NC00066008#^0000000000006Ml3;+NC00066006iK z0000000000006kt3;+NC00066003nL0000000000006+#3;+NC00066002}100000 z000000079-3;+NCe*gdi1ONca2mk;8000000002c(+mIr000021ONcz0{{R300000 z0002k(+mIr000021ONa*2LJ#7000000002s(+mIr000021ONc(2mk;8000000002! z(+mIr000021ONb12mk;8000000002+(+mIr000021ONd2e*pjh0000000000_|psk z000000t5g68wUUY00000000000MrZs000000t5g6mInX;00000000002-FM!00000 z0t5g6dIbOg00000000005Y!9+000000t5g6FbDtu00000000007}N{^000000t5g6 zGzkCz00000e*gdg03g&10000000IO600;;G000000000004UT90000000IO6096J6 z000000000005H@H0000000IO608jt`0000000000065eP0000000IO60Cold00000 z0000006^3X0000000IO6080n}000000000007%pfe*gdg00066005-|0000000000 z002Rs000021ONa#0ssI2000000001h)C>Rs00002 z1ONaK2mk;8000000001p)C>Rs000021ONco0ssI2000000001x)C>Rs000021ONc= z0RR91000000001()C>Rs000021ONaT0{{R3e*gdg00000pwtWi000000t5g6p925@ z0000000000sMHJq000000t5g6Q2+n{0000000000u+$6y000000t5g6B?15d00000 z00000xYP^)000000t5g6x&{CM0000000000z|;%?000000t5g6R0aS50000000000 zf5_Af0000000IO6038DW00000000000MOJ70000000IO606zi%00000000000NB(F z0000000IO60EPwt00000000000N~UN0000000IO608#(|00000000000O-^V00000 z00IO603Hbd00000000000Pxfd00000e*gjm002_}0000000000008*Z3;+NC00066 z000dM00000000000002h3;+NC00066002e-0000000000000Qp3;+NC00066002|~ z0000000000000ox3;+NC00066000mL0000000000000=(3;+NC00066002(|e*gdg z000000000W)eHat000021ONd00RR91000000000e)eHat000021ONcu1ONa400000 z0000m)eHat000021ONa#0RR91000000000u)eHat000021ONb400000000000000$ z)eHat000021ONb00ssI200000e*gdgNYxAg000000t5g6RsaA10000000000P}K|o z000000t5g6VgmpG0000000000Sk(*w000000t5g6!vX*R0000000000VATu&00000 z0t5g6R{#J20000000000Xw?h=000000t5g6SO5S30000000000aMcU|e*gdg00IO6 z0Q~|000000000000C?350000000IO60JQ=D00000000000D#pD0000000IO60JH=E z00000000000EpEL0000000IO60A&aO00000000000Fc!T0000000IO604oIm00000 z000000GQPb0000000IO6e*g^#0000000000005xX3;+NC00066002e?0000000000 z005}f3;+NC00066006%P0000000000006Mn3;+NC00066002!10000000000006kv z3;+NC00066001Zg0000000000006+%3;+NC00066001Hg00000e*gdg0002U)eHat z000021ONcq1ONa4000000002c)eHat000021ONb>2mk;8000000002k)eHat00002 z1ONd10RR91000000002s)eHat000021ONc-2LJ#7000000002!)eHat000021ONcN z0{{R3000000002+f7J{C000000t5g6SpWb40000000000_|*&m000000t5g6L;wP+00000000002-XY$000000t5g65C;GN00000 z000005Y`L;000000t5g6oCg2^00000000007}g8`00000e*y#m0E`C!0000000000 z03g;30000000IO604@mt000000000004UZB0000000IO600{&D000000000005H}J z0000000IO60O|n%0000000000065kR0000000IO60PzO^000000000006^9Z00000 z00IO60H*^0e*gdg00000002nV3;+NC00066009370000000000002<{9 z000000001Z)(iju000021ONc@1poj5000000001h)(iju000021ONc)1poj500000 z0001p)(iju000021ONas1^@s6000000001x)(iju000021ONbC1poj5000000001( z)(ijue*gdg0t5g6TmS$70000000000pwt<80000000000xYi5+00000 z0t5g6%?1Df0000000000z}5@^000000t5g6e^LVg00000000000La!10000000IO6 z03QPY00000000000MOP90000000IO60E7Vm00000000000NB<{9000000002!*9-sv000021ONbVe*^#k0000000000 z@Yf6g000000t5g6iv$1w0000000000_}2^o000000t5g6VgLXD00000000000N4xw z000000t5g6oCN>?00000000002-pk&000000t5g6_Xq$000000000005ZDX=00000 z0t5g66$StR00000e*gdg02tT|0000000IO607nM^000000000003g^50000000IO6 z0PO_;000000000004UfD0000000IO60Gb2<000000000005I4L0000000IO6051pt z0000000000065qT0000000IO60F?s(000000000006^Fbe*gdg00066001!p00000 z00000002nX3;+NC00066003hE0000000000002|100000000000MObD0000000IO6 z0Idc900000000000NC0L0000000IO601g8H00000000000N~mT0000000IO604N9m z00000e*gdg008LP3;+NC00066002w_0000000000008jX3;+NC00066003kI00000 z00000008*f3;+NC00066006oN00000000000002n3;+NC00066003A90000000000 z000Qv3;+NC00066001os0000000000000o%e+&Qs000021ONbo1ONa4000000000O z+YA5z000021ONbH1poj5000000000W+YA5z000021ONbU00000000000000e+YA5z z000021ONa&2><{9000000000m+YA5z000021ONcn1poj5000000000u+YA5z00002 ze*^#kKM4Q;0000000000K-&xe000000t5g6a|r+d0000000000NZSkm000000t5g6 zMF9W+0000000000P}>Xu000000t5g6nF9a-0000000000SlbK$000000t5g6r3L^1 z0000000000VA~7;000000t5g6cLV?ce*gdg000000BG9`0000000IO60678x00000 z000000C3w30000000IO60J8`H00000000000C?LB0000000IO60B`^R0000000000 z0D#*J0000000IO60P+L?00000000000EpWR0000000IO605b>x0000000000e*lo% z3;+NC00066003wM0000000000005ZV3;+NC00066009350000000000005xd3;+NC z00066005x|0000000000005}l3;+NC00066002k^0000000000006Mt3;+NC00066 z008|20000000000006k#3;+NCe*gdi1ONaz2><{9000000002M+YA5z000021ONc1 z2LJ#7000000002U+YA5z000021ONcB2mk;8000000002c+YA5z000021ONa`1ONa4 z000000002k+YA5z000021ONbX00000000000002s+YA5z000021ONaVe+B>m00000 z00000=-Uhc000000t5g6#0CHW0000000000@Y@Uk000000t5g6`vw320000000000 z_}dHs000000t5g6AOipZ00000000000Ne}!000000t5g6a{vGU00000000002;2++ z000000t5g6N&x@>00000e*gdg01(^^0000000IO60JsGJ000000000002tg100000 z00IO60LlRX000000000003h590000000IO60FDR%000000000004UrH0000000IO6 z0HOf^000000000005IGP0000000IO605%5z0000000000065$Xe*gdg00066001cm z0000000000002PT3;+NC00066005r?0000000000002nb3;+NC00066006oO00000 z00000002<{9e*gdg00000klYLa00000 z0t5g6b^rhX0000000000nA{8i000000t5g6-vj^v0000000000pxg`q000000t5g6 ziwOV#0000000000sN4(y000000t5g6ZU_JX0000000000u-ps)000000t5g6;s*c# z0000000000f4JNX0000000IO605b*v00000000000KnV~0000000IO60K^9X00000 z000000La`70000000IO60Llgc00000000000MOhF0000000IO604xRo0000000000 z0NC6N0000000IO60Hp^200000000000N~sV00000e*gjm008v`0000000000008LR z3;+NC00066003D70000000000008jZ3;+NC00066008d@0000000000008*h3;+NC z00066003bI00000000000002p3;+NC00066004IY0000000000000Qx3;+NC00066 z000&Pe*gdg000000000G-3$N#000021ONc@0ssI2000000000O-3$N#000021ONbu z0RR91000000000W-3$N#000021ONbd00000000000000e-3$N#000021ONb62LJ#7 z000000000m-3$N#000021ONcn0{{R300000e*gdgINb~Y000000t5g67YG0V00000 z00000K-~-g000000t5g6`vU*~0000000000NZkwo000000t5g6y9WRO0000000000 zP~8jw000000t5g6(+2I47)0000000000VBHJ= ze*gdg00IO60E`F#00000000000BGF|0000000IO60C58V00000000000C3$500000 z00IO60EPzu00000000000C?RD0000000IO60C@la00000000000D#>L0000000IO6 z038Sb00000000000EpcT0000000IO6e*n$|0000000000005BP3;+NC00066001uu z0000000000005ZX3;+NC00066002w~0000000000005xf3;+NC00066002J$00000 z00000005}n3;+NC00066001=u0000000000006Mv3;+NC00066006lH00000e*gdg z0002E-3$N#000021ONc%1^@s6000000002M-3$N#000021ONaF0{{R3000000002U z-3$N#000021ONbw1^@s6000000002c-3$N#000021ONbf00000000000002k-3$N# z000021ONc_1^@s6000000002sf87iK000000t5g63<&@L0000000000=-mte00000 z0t5g6>jwY;0000000000@ZAgm000000t5g6T?qgH0000000000_}vTu000000t5g6 z9|iyb00000000000NxA$000000t5g6djJ3c00000000002;K|;00000e*y#m0DJ%d z000000000001(~`0000000IO60DS-e000000000002tm30000000IO60AvCH00000 z0000003hBB0000000IO60Db@f000000000004UxJ0000000IO60Dk}g0000000000 z05IMR0000000IO606hi(e*gdg000000021N3;+NC00066002({0000000000002PV z3;+NC00066008?40000000000002nd3;+NC00066004Ue0000000000002<{9 z000000001p-V6W$e*gdg0t5g6o&x{?0000000000klqXc000000t5g6fdBvi00000 z00000nBEKk000000t5g6lK}t#0000000000pxz7s000000t5g6qy_*00000000000 zsNM_!000000t5g6cL)Fg0000000000u-*&+000000t5g6f7Jv400000000000Jz=^ z0000000IO60R9C400000000000Knc10000000IO60ObP!00000000000Lb1900000 z00IO60PF<-00000000000MOnH0000000IO60IUK400000000000NCCP0000000IO6 z0D=Gj00000e*gdg007|L3;+NC00066007wm0000000000008LT3;+NC00066004sk z0000000000008jb3;+NC00066000*P0000000000008*j3;+NC00066004&u00000 z000000002r3;+NC00066001cj0000000000000Qze+&Qs000021ONbD2LJ#700000 z0000G-wXf%000021ONbp00000000000000O-wXf%000021ONc51ONa4000000000W z-wXf%000021ONbM2><{9000000000e-wXf%000021ONaQ0RR91000000000m-wXf% z00002e*^#kS_uFE0000000000INuBa000000t5g6R09A20000000000K;H}i00000 z0t5g600#g70000000000NZ$+q000000t5g6-T?pr0000000000P~Qvy000000t5g6 zg#Z8m0000000000Slm z0000000000;NJ`Y000000t5g6=m!7*0000000000=-&(g000000t5g6(Fgzl00000 z00000@ZSso000000t5g669NDL0000000000_}>fw000000t5g6L<{9000000001B;0yo&000021ONbZ0{{R300000 z0001J;0yo&000021ONch1^@s6000000001R;0yo&000021ONcV2LJ#7000000001Z z;0yo&000021ONb<0ssI2000000001h;0yo&000021ONaQ0ssI2e*gdg00000h~NwW z000000t5g6E(QPq0000000000kl+je000000t5g6r~?210000000000nBWWm00000 z0t5g6^8o+=0000000000px_Ju000000t5g6$pQcX0000000000sNf6$000000t5g6 zI|u*(0000000000f3V;T0000000IO60Cfof00000000000Jz``0000000IO609ge9 z00000000000Kni30000000IO60Ez$r00000000000Lb7B0000000IO60OSY&00000 z000000MOtJ0000000IO60E++s00000000000NCIR00000e*gjm005>00000000000 z007|N3;+NC00066002%10000000000008LV3;+NC00066004{t0000000000008jd z3;+NC00066004~u0000000000008*l3;+NC00066005Q(00000000000002t3;+NC z00066002G&e*gdg0000000008;S2x(000021ONbd2mk;8000000000G;S2x(00002 z1ONbE1poj5000000000O;S2x(000021ONcw1ONa4000000000W;S2x(000021ONb% z0{{R3000000000e;S2x(000021ONb+1ONa400000e*gdgFyRaU000000t5g6djS9d z0000000000IN=Nc000000t5g6Tmb+80000000000K;aAk000000t5g6y8-|J00000 z00000NZ||s000000t5g6w+H|L0000000000P~i*!000000t5g6*$4mt0000000000 zSm6u+e*gdg00IO600jjA00000000000AS$^0000000IO60LBFX00000000000BGS1 z0000000IO60ImT500000000000C3?90000000IO60Nw=v00000000000C?dH00000 z00IO60FD3v00000000000D$2P0000000IO6e*l{S0000000000004;L3;+NC00066 z006ZF0000000000005BT3;+NC00066007wr0000000000005Zb3;+NC00066000sJ z0000000000005xj3;+NC00066002Y;0000000000005}r3;+NC00066002e=00000 ze*gdg00026;S2x(000021ONbq0ssI2000000002E;S2x(000021ONbP1ONa400000 z0002M;S2x(000021ONbM1poj5000000002U;S2x(000021ONcJ1ONa4000000002c z;S2x(000021ONbO1ONa4000000002kf8h)O000000t5g6(gXki0000000000;Nc7a z000000t5g6+5rFn0000000000=-~_i000000t5g6j{pDw0000000000@Zk&q00000 z0t5g6n+5;?0000000000_~8ry000000t5g6Xa)cP00000000000OAY)00000e*y#m z0HOl`000000000000`m?0000000IO60D%Jl000000000001)B~0000000IO602BxS z000000000002ty70000000IO60KEqQ000000000003hNF0000000IO60Fng&00000 z0000004U-N0000000IO60K@_Se*gdg00000001!J3;+NC000660058x0000000000 z0021R3;+NC00066006)Q0000000000002PZ3;+NC000660024$0000000000002nh z3;+NC00066000~Y0000000000002<{9000000001B;tT))000021ONaX2LJ#7000000001J;tT))00002 z1ONcm0ssI2000000001R;tT))000021ONcQ2mk;8000000001Z;tT))000021ONb$ z00000000000001h;tT))e*gdg0t5g6MFju=0000000000h~f+Y000000t5g6Py_$~ z0000000000km3vg000000t5g6k^lez0000000000nBoio000000t5g6fCK;l00000 z00000pyCVw000000t5g6BLe^c0000000000sNxI&000000t5g6f4B$$0000000000 z0I=c=0000000IO60Cxre00000000000J!1|0000000IO60Gt5;00000000000Kno5 z0000000IO60Lcdc00000000000LbDD0000000IO60G$B<00000000000MOzL00000 z00IO60Ja1G00000e*gdg007wH3;+NC00066003|W0000000000007|P3;+NC00066 z006iL0000000000008LX3;+NC00066000^W0000000000008jf3;+NC00066005E# z0000000000008*n3;+NC000660049W00000000000002ve+&Qs000021ONb&00000 z0000000008;|u@*000021ONbt2LJ#7000000000G;|u@*000021ONad1ONa400000 z0000O;|u@*000021ONcE2LJ#7000000000W;|u@*000021ONc01^@s6000000000e z;|u@*00002e*^#k%LV`d0000000000FyjmW000000t5g6Q3L=00000000000IO7Ze z000000t5g6R0RM40000000000K;sMm000000t5g6sR9510000000000NaG9u00000 z0t5g6t^@!80000000000P~!{$000000t5g6y9NLNe*gdg0000009fM;0000000IO6 z0Gk5<00000000000AS+`0000000IO60N@1x00000000000BGY30000000IO60C)ia z00000000000C3|B0000000IO60LBOa00000000000C?jJ0000000IO60F(d#00000 z00000e*l2v3;+NC00066004ag0000000000004;N3;+NC000660024%0000000000 z005BV3;+NC00066004vt0000000000005Zd3;+NC00066006fK0000000000005xl z3;+NC000660012c0000000000005}t3;+NCe*gdi1ONc&1^@s60000000026;|u@* z000021ONaR2LJ#7000000002E;|u@*000021ONc;1ONa4000000002M;|u@*00002 z1ONbs0RR91000000002U;|u@*000021ONb~1poj5000000002c;|u@*000021ONaE ze*^#k0000000000*y9WU000000t5g6)&T$j0000000000;NuJc000000t5g64+sDN z0000000000=;I6k000000t5g6!2h($00000 z00000_~Q%!000000t5g6Gywnr00000e*gdg0086+0000000IO60JZ`E0000000000 z00`s^0000000IO60M7;h000000000001)I10000000IO60MZ5k000000000002t&9 z0000000IO60QCd_000000000003hTH0000000IO60G0p%000000000004U@Pe*gdg z00066005T&0000000000001!L3;+NC00066001%r00000000000021T3;+NC00066 z003A80000000000002Pb3;+NC00066007Sc0000000000002nj3;+NC00066005u` z0000000000002*00000000000MO(N00000e*gjm002e<00000 z00000007wJ3;+NC00066005f+0000000000007|R3;+NC00066007Si0000000000 z008LZ3;+NC00066006`Y0000000000008jh3;+NC00066005l_0000000000008*p z3;+NC00066005i-e*gdg0000000000000000000000`y`0000000IO6 z0GbB?000000000001)O30000000IO604fOp000000000002t;B0000000IO608j=1 z000000000003hZJ0000000IO60I&f7e*gdg00000001cF3;+NC00066001Ha00000 z00000001!N3;+NC00066006}Z00000000000021V3;+NC00066001ij0000000000 z002Pd3;+NC000660027!0000000000002nl3;+NC00066008&_0000000000e*geb z<_rJ;000021ONb*1^@s60000000013<_rJ;000021ONaK2><{9000000001B<_rJ; z000021ONb80RR91000000001J<_rJ;000021ONc|1^@s6000000001R<_rJ;00002 z1ONbf1ONa4000000001Z<_rJ;e*gdg0t5g6W(5EM0000000000faVMU000000t5g6 z#{&QW0000000000h~^9c000000t5g6iUt4x0000000000kmd{k000000t5g6q5uE@ z0000000000nC1)s000000t5g6TL%CD0000000000pymt!000000t5g6e?SEQ00000 z000000I22+0000000IO605Amr00000000000I=o^0000000IO608s@10000000000 z0J!E10000000IO60NDos00000000000Kn!90000000IO60OGZ000000000003hfL ze*gdg00066005r^0000000000001cH3;+NC000660015b0000000000001!P3;+Ot z|9=1g000021ONc12mk;8000000000u=nMb=000021ONaI2LJ#7000000000$=nMb= z000021ONcV0{{R3000000000;=nMb=000021b+YkfC2yj0000000000Q0NQ*00000 z0t5g6f&u^l0000000000Sm+D@000000t5g6dI$gj0000000000VCW10000000t5g6 zCk6lj0000000000Xy^<8000000t5g6s09E30000000000aOeyG000000t5g6+W`Oo z0Dk}g000000C?yO0000000IO60IC2000000000000D$NW0000000IO604@gr00000 z000000Ep-e0000000IO607C)*00000000000FdYm0000000IO60J8-E0000000000 z0GQ|u0000000IO605J#v00000000000Dqw93;+NC00066004pr0000000000005}y z3;+NC00066006rP0000000000006M)3;+NC00066006oH0000000000006k?3;+NC z00066006510000000000006+~3;+NC00066003wO0000000000007A73;+NC0Dk}i z1ONa`1poj5000000002c=nMb=000021ONc600000000000002k=nMb=000021ONay z2><{9000000002s=nMb=000021ONbm2LJ#7000000002!=nMb=000021ONaO1poj5 z000000002+=nMb=000021ONc70Dk}g0000000000_~;A(000000t5g6e*^#k00000 z000000O000000t5g6uLA%800000000002000021ONa70ssI2000000001h=?nk>000021ONd21poj5000000001p z=?nk>000021ONbB0{{R3000000001x=?nk>000021ONaS0RR91000000001(=?nk> z000021ONcY1ONa40Dk}g00000py><%000000t5g690&ja0000000000sOby<00000 z0t5g6uK)l50000000000u;~l{000000t5g6S_A+90000000000xakZ4000000t5g6 z%?JPh0000000000!08MC000000t5g6umAu60000000000$bab!0000000IO607(M? z00000000000MO|S0000000IO6009U900000000000NCja0000000IO60Ad0F00000 z000000O08i0000000IO60DJ@h00000000000O;uq0000000IO60PO|<0000000000 z0PyJy000000Dl4m000vM0000000000008*u3;+NC00066001!s00000000000002$ z3;+NC00066002A%0000000000000Q;3;+NC00066004Og0000000000000o`3;+NC z000660068A0000000000000>33;+NC00066006N70Dk}g000000000W>I?t?00002 z1ONa41^@s6000000000e>I?t?000021ONc{0{{R3000000000m>I?t?000021ONbt z1ONa4000000000u>I?t?000021ONcm1ONa4000000000$>I?t?000021ONaL2><{9 z000000Dk}gNa_p#000000t5g6R0se70000000000Q0fc-000000t5g6VhI2M00000 z00000Sn3P_000000t5g6(*Xbg0000000000VCoD2000000t5g6`T+m{0000000000 zXzC0A000000t5g6rU3u|0000000000aOw;I0Dk}g00IO609FM600000000000C?&Q z0000000IO60I~-F00000000000D$TY0000000IO60LKOZ00000000000Ep@g00000 z00IO600IaA00000000000Fdeo0000000IO60I~o800000000000GR3w0000000IO6 z0Dr9o0000000000005xs3;+NC000660083z0000000000005}!3;+NC000660043X z0000000000006M+3;+NC000660049a0000000000006k^3;+NC000660006600000 z00000006-13;+NC00066002t|000000Dk}g0002U>I?t?000021ONam0{{R300000 z0002c>I?t?000021ONcD00000000000002k>I?t?000021ONcE00000000000002s z>I?t?000021ONbQ0ssI2000000002!>I?t?000021ONct0{{R3000000002+>VFIX z000000t5g62L%8C0000000000`05M*000000t5g6?gjt=00000000000P73@00000 z0t5g6j0OMz00000000002W0000000000003a?3;+NC00066008F&0000000000003y~3;+NC z00066007GY00000000000Dk~*>kI$@000021ONaD2mk;8000000001Z>kI$@00002 z1ONaU2><{9000000001h>kI$@000021ONcG00000000000001p>kI$@000021ONcJ z2mk;8000000001x>kI$@000021ONa*2><{9000000001(>kI$@0Dk}g0t5g62?YQE z0000000000pz90(000000t5g69s&RW0000000000sOt;>000000t5g6JOcm#00000 z00000u541WLs000021ONbJ1poj5000000000W>LI00000000000C?;S0000000IO60Gb5= z00000000000D$Za0000000IO60JH-D00000000000Ep}i0000000IO60F?v)00000 z000000Fdkq0000000IO603HPZ00000000000DqY53;+NC00066000vQ0000000000 z005xu3;+NC00066000;W0000000000005}$3;+NC00066001}$0000000000006M; z3;+NC00066005i>0000000000006k`3;+NC00066007Me0000000000006-33;+NC z0Dk}i1ONbk2mk;8000000002U>A000000t5g6tq1@B000000Dk}g z02u8I0000000IO60R9I6000000000003huQ0000000IO60E+_v000000000004VJY z0000000IO60KEVJ000000000005I(g0000000IO60A~aM0000000000066Uo00000 z00IO60Obe(000000000006^^w0Dk}g000660040X0000000000002ns3;+NC00066 z008|10000000000002(000000t5g6;{^Z! z0000000000Q0@!>000000t5g6z6JmQ0000000000Sndn}000000t5g6%me@c00000 z00000VD1b6000000t5g6rUw830000000000XzmOE0Dk}g00IO60KotN0000000000 z0C4UM0000000IO607wS_00000000000C?^U0000000IO60KxzO00000000000D$fc z0000000IO60Qv#|00000000000Eq4k0000000IO60K5kP00000000000Fdqs00000 z00IO60Dq1K0000000000005Zo3;+NC00066009330000000000005xw3;+NC00066 z007hh0000000000005}&3;+NC00066006@P0000000000006M=3;+NC00066003A4 z0000000000006k|3;+NC00066004;v000000Dk}g0002M?hF6`000021ONcU00000 z000000002U?hF6`000021ONar0RR91000000002c?hF6`000021ONam2mk;800000 z0002k?hF6`000021ONc70{{R3000000002s?hF6`000021ONcV00000000000002! z?tcsb000000t5g6Lj?c;0000000000@a_x%000000t5g6AprmY0000000000`0fk< z000000t5g69svLV00000000000PhR{000000t5g6L& z0000000000sP7B_000000t5g6I|Kj#0000000000u<{9000000000e@C*O|000021ONcb0000000000 z0000m@C*O|000021ONbK1^@s6000000000u@C*O|000021b+YkJO%&&0000000000 zK=2Fz000000t5g6%m4rY0000000000Nbn2*000000t5g6paK8@0000000000Q1A=@ z000000t5g6%>V!Z0000000000Snv!0000000t5g6&Hw-a0000000000VDJn800000 z0t5g6&j0`b0Dk}g000000BG000000t5g6AO-*c z00000000000Pzd}000000t5g6Q~>}000000000002=NR6000000t5g6WCQ>J00000 z0Dk}g01)vE0000000IO60Ez_w000000000002uKM0000000IO607VG^0000000000 z03h)U0000000IO609ye7000000000004VVc0000000IO60NMip000000000005I_k z0000000IO60LTRZ0000000000066gs0Dk}g00066001-x0000000000002Po3;+NC z00066006=T0000000000002nw3;+NC00066003AA0000000000002<&3;+NC00066 z005f^0000000000003C=3;+NC00066007ef0000000000003a|3;+NC000660Dk~m z0ssI2000000001J@eBX}000021ONc32mk;8000000001R@eBX}000021ONck00000 z000000001Z@eBX}000021ONcy1poj5000000001h@eBX}000021ONb&0{{R300000 z0001p@eBX}000021ONbi1^@s60Dk}g00000kns!v000000t5g6`Un630000000000 znDGn%000000t5g6*9QOq0000000000pz#a<000000t5g66$AhP0000000000sPPN{ z000000t5g6vjYGC0000000000u<;B4000000t5g6rv?B30000000000xPS2s00000 z00IO6044_j00000000000KoAK0000000IO60J;PK00000000000LbwS0000000IO6 z0HXl_00000000000MPLa0000000IO605<^u00000000000NC*i0000000IO60J{MI z00000000000O0Wq000000Dl4m002G)0000000000008Lm3;+NC00066001Ti00000 z00000008ju3;+NC00066004gn0000000000008*$3;+NC00066000FC0000000000 z0002;3;+NC000660027%0000000000000Q`3;+NC00066000~W0Dk}g000000000G z@(cg~000021ONbW0RR91000000000O@(cg~000021ONbU0RR91000000000W@(cg~ z000021ONd22LJ#7000000000e@(cg~000021ONcq0ssI2000000000m@(cg~00002 z1ONaz0{{R3000000Dk}gIPwet000000t5g6as>bY0000000000K=KR#000000t5g6 zIs^a!0000000000Nb(E-000000t5g6*8%_l0000000000Q1T1_000000t5g62?+oI z0000000000Sn>=2000000t5g6E&~7n0000000000VDbzA0Dk}g00IO60M!5h00000 z000000BG_I0000000IO6015~I00000000000C4gQ0000000IO60LTIW0000000000 z0C@5Y0000000IO60M-Bi00000000000D$rg0000000IO60Ji}E00000000000EqGo z0000000IO60Dr;=0000000000005Bk3;+NC00066008s^0000000000005Zs3;+NC z00066000C80000000000005x!3;+NC00066004Id0000000000005}+3;+NC00066 z007eh0000000000006M^3;+NC00066004jo000000Dk}g0002E@(cg~000021ONcg z2mk;8000000002M@(cg~000021ONcn00000000000002U@(cg~000021ONa>2><{9 z000000002c@(cg~000021ONbg1^@s6000000002k@(cg~000021ONbC0ssI200000 z0002s@_!5f000000t5g66axSN0000000000=<*Bz000000t5g6Tm=9C0000000000 z@bU}*000000t5g6mj?g<0000000000`0@+@000000t5g6uLl4C00000000000P_q0 z000000t5g6T>$_900000000002=fd8000000)GSm009R8000000000001)#G00000 z00IO603`?j000000000002uQO0000000IO60DTDn000000000003h=W0000000IO6 z0L=mb000000000004Vbe0000000IO60N4Nk000000000005J0m0000000IO6073%* z0Dk}g000000021i3;+NC00066006oL0000000000002Pq3;+NC00066002@100000 z00000002ny3;+NC00066007kk0000000000002<)3;+NC00066006iF0000000000 z003C?3;+NC00066006uO00000000000Dk~r^9%q0000021ONbx0ssI2000000001J z^9%q0000021ONaT0ssI2000000001R^9%q0000021ONcp00000000000001Z^9%q0 z000021ONcT2mk;8000000001h^9%q0000021ONap1^@s6000000001p^9%q00Dk}g z0t5g60s{a50000000000kn;=x000000t5g6)dc_m0000000000nDYz(000000t5g6 zjRF7w0000000000pz{m>000000t5g6*#!Uq0000000000sPhZ}000000t5g6c?kdj z0000000000u=5N6000000t5g6+J68500000000000J!rE0000000IO60P6(+00000 z000000KoGM0000000IO60MrEl00000000000Lb$U0000000IO60NVfn0000000000 z0MPRc0000000IO60B{2U00000000000NC>k0000000IO60QLd^000000Dk}g007|g z3;+NC00066002V=0000000000008Lo3;+NC00066004~x0000000000008jw3;+NC z00066002P&0000000000008*&3;+NC00066008_500000000000002=3;+NC00066 z007(o0000000000000Q|41WLs000021ONcS0RR91000000000G^b7z1000021ONa} z2mk;8000000000O^b7z1000021ONaj0{{R3000000000W^b7z1000021ONct00000 z000000000e^b7z1000021ONaQ1^@s6000000000m^b7z1000021b+Ykg9ZQq00000 z00000IP?qv000000t5g6Ne2J`0000000000K=cd%000000t5g6Bm)2d0000000000 zNc0Q<000000t5g6j0FGy0000000000Q1lD{000000t5g6Kn4H+0000000000So914 z000000t5g6G6ett0Dk}g000000ATbC0000000IO60Dc7k00000000000BH0K00000 z00IO60Nwxq00000000000C4mS0000000IO60N(%r00000000000C@Ba0000000IO6 z07(e|00000000000D$xi0000000IO6000R900000000000Dp+|3;+NC00066002S= z0000000000005Bm3;+NC00066006QC0000000000005Zu3;+NC00066002(}00000 z00000005x$3;+NC00066008F!0000000000005};3;+NC00066003bK0000000000 z006M`3;+NC0Dk}i1ONaL2mk;8000000002E^b7z1000021ONa90ssI2000000002M z^b7z1000021ONbb2><{9000000002U^b7z1000021ONc$1ONa4000000002c^b7z1 z000021ONc`1poj5000000002k^b7z1000021ONb+1AhPj0000000000;Peat00000 z0t5g6umb=90000000000==2N#000000t5g6GYJ3y0000000000@bnA-000000t5g6 z@&^C_0000000000`1A|_000000t5g6-~a#s00000000000QC$2000000t5g6vIqbG z000000Dk}g00{LA0000000IO60Q(03000000000001)*I0000000IO605<~w00000 z0000002uWQ0000000IO604)Ll000000000003h`Y0000000IO604xar0000000000 z04Vhg0000000IO60Lusf000000000005J6o0Dk}g00066008a?00000000000021k z3;+NC00066006NC0000000000002Ps3;+NC00066003tK0000000000002n!3;+NC z00066008j@0000000000002<+3;+NC00066002-20000000000003C^3;+NC00066 z0Dl0E2LJ#7000000001B^$Y+2000021ONc>2mk;8000000001J^$Y+2000021ONct z1^@s6000000001R^$Y+2000021ONc20ssI2000000001Z^$Y+2000021ONaF2LJ#7 z000000001h^$Y+2000021ONao0ssI20Dk}g00000i1iEr000000t5g6BLM&a00000 z00000ko61z000000t5g6=>Y%$0000000000nDq<*000000t5g6OaTA@0000000000 zp!Ey@000000t5g6JqG{)0000000000sPzm0000000t5g6N(BG_0000000000uz&Ro z0000000IO609FD300000000000J!xG0000000IO60Hgx|00000000000KoMO00000 z00IO60FMFy00000000000Lb+W0000000IO607wM@00000000000MPXe0000000IO6 z03`ze00000000000NC{m000000Dl4m0055$0000000000007|i3;+NC00066007Aa z0000000000008Lq3;+NC00066000pM0000000000008jy3;+NC00066003?T00000 z00000008*)3;+NC000660083#00000000000002?3;+NC00066006E90Dk}g00000 z00008_6z_3000021ONcx00000000000000G_6z_3000021ONbw0RR91000000000O z_6z_3000021ONbq0RR91000000000W_6z_3000021ONc^0{{R3000000000e_6z_3 z000021ONa*1^@s6000000Dk}gF!l@p000000t5g6;s5{u0000000000IQ9$x00000 z0t5g6Hv#|v0000000000K=up(000000t5g6(E$Je0000000000NcIc>000000t5g6 z;{X5v0000000000Q1%P}000000t5g65C#AM0000000000SoRD60Dk}g00IO609XP5 z00000000000AThE0000000IO60A2wA00000000000BH6M0000000IO60K)+Q00000 z000000C4sU0000000IO60OSAw00000000000C@Hc0000000IO6000F50000000000 z0D$%k0000000IO60Dt8G0000000000004;g3;+NC00066005E*0000000000005Bo z3;+NC00066007|x0000000000005Zw3;+NC00066003740000000000005x&3;+NC z00066000ID0000000000005}=3;+NC00066008Cy000000Dk}g00026_6z_300002 z1ONc>0{{R3000000002E_6z_3000021ONcW0{{R3000000002M_6z_3000021ONbu z2mk;8000000002U_6z_3000021ONa62LJ#7000000002c_6z_3000021ONbd0ssI2 z000000002k_J0fj000000t5g6=Kufz0000000000;Pwmv000000t5g6h6n%v00000 z00000==KZ%000000t5g6gb4ru0000000000@b(M<000000t5g6@&f<>0000000000 z`1T9{000000t5g6+6Mpt00000000000QU?4000000)GSm03ZYa000000000000{RC z0000000IO6022lP000000000001)>K0000000IO60O$Y!000000000002ucS00000 z00IO60KNwR000000000003i1a0000000IO60HXx}000000000004Vni0000000IO6 z0L}sc0Dk}g00000001!e3;+NC000660062600000000000021m3;+NC00066008R% z0000000000002Pu3;+NC00066007Gb0000000000002n$3;+NC000660009A00000 z00000002<;3;+NC00066003nI00000000000Dk~j_Y434000021ONcQ0RR9100000 z0001B_Y434000021ONaE1poj5000000001J_Y434000021ONab2><{9000000001R z_Y434000021ONbv0RR91000000001Z_Y434000021ONbP2mk;8000000001h_Y434 z0Dk}g0t5g6w*&wH0000000000i1!Qt000000t5g6>;M1&0000000000koOD#00000 z0t5g6?EnA(0000000000nD-0-000000t5g6ZU+DW0000000000p!W;_000000t5g6 zh64Zq0000000000sP_y2000000t5g6_HA0000000IO60Fws* z00000000000J!%I0000000IO60L%dZ00000000000KoSQ0000000IO60FeOz00000 z000000Lb?Y0000000IO60PX+)00000000000MPdg0000000IO60OSV%000000Dk}g z007wc3;+NC00066006)O0000000000007|k3;+NC00066007Sf0000000000008Ls z3;+NC00066000jL0000000000008j!3;+NC00066000370000000000008*+3;+NC z00066000OH00000000000002^41WLs000021ONcs1ONa40000000008_zVC500002 z1ONa61^@s6000000000G_zVC5000021ONac0RR91000000000O_zVC5000021ONb8 z1ONa4000000000W_zVC5000021ONc=00000000000000e_zVC5000021b+Yk8vy_S z0000000000F!&4r000000t5g67zqFX0000000000IQR?z000000t5g6@c;k-00000 z00000K==#*000000t5g6J_i5*0000000000Ncao@000000t5g6W&!{J0000000000 zQ1}c0000000t5g6#R32T0Dk}g0000009g180000000IO600II400000000000ATnG z0000000IO601F5J00000000000BHCO0000000IO608jz|00000000000C4yW00000 z00IO60F?y*00000000000C@Ne0000000IO60IUW800000000000Dpk^3;+NC00066 z0027&0000000000004;i3;+NC00066008m`0000000000005Bq3;+NC00066002<| z0000000000005Zy3;+NC00066008m;0000000000005x)3;+NC00066007|v00000 z00000005}?3;+NC0Dk}i1ONb_0{{R30000000026_zVC5000021ONc02mk;800000 z0002E_zVC5000021ONaG2mk;8000000002M_zVC5000021ONaM0ssI2000000002U z_zVC5000021ONbj2mk;8000000002c_zVC5000021ONb22Y&zn0000000000*!T

000000t5g6E&%`l0000000000`1lL}000000t5g6 z*9ZUr000000Dk}g008+60000000IO60P_F<000000000000{XE0000000IO60Q3L= z000000000001){M0000000IO606_%+000000000002uiU0000000IO60EYtr00000 z0000003i7c0000000IO60DA}k000000000004Vtk0Dk}g00066007+w0000000000 z001!g3;+NC00066000~T00000000000021o3;+NC00066000vK0000000000002Pw z3;+NC00066003YK0000000000002n&3;+NC00066008v>0000000000002<=3;+NC z000660Dl1X000000000000013`3wL6000021ONbQ2LJ#7000000001B`3wL600002 z1ONc}0{{R3000000001J`3wL6000021ONcM0RR91000000001R`3wL6000021ONaT z1poj5000000001Z`3wL6000021ONai1ONa40Dk}g00000fcXpn000000t5g6?gsz> z0000000000i1`cv000000t5g6%LM=c0000000000kogP%000000t5g6<_G`)00000 z00000nE4C<000000t5g6_W%F@0000000000p!o~{000000t5g6(g*+m0000000000 zsDJqk0000000IO60J{YM00000000000I>NC0000000IO60ICB300000000000J!-K z0000000IO602BrQ00000000000KoYS0000000IO60AK7>-00000 z0t5g6`2YX_0000000000Ncs!_000000t5g6j06Ax0000000000Q2Go20Dk}g00IO6 z0DuVq000000000009g7A0000000IO60HOo{00000000000ATtI0000000IO60Eh(u z00000000000BHIQ0000000IO60M!8i00000000000C4&Y0000000IO60E7Yn00000 z000000C@Tg0000000IO60Dp=C0000000000004mc3;+NC00066008;`0000000000 z004;k3;+NC00066003?Y0000000000005Bs3;+NC00066003+U0000000000005Z! z3;+NC00066001Qk0000000000005x+3;+NC00066007AY000000Dk}g0001}`V0U7 z000021ONah1poj50000000026`V0U7000021ONc(1poj5000000002E`V0U700002 z1ONcE1poj5000000002M`V0U7000021ONbR0{{R3000000002U`V0U7000021ONd0 z00000000000002c`hN@n000000t5g6E(HJp0000000000*!m0r000000t5g6r3e53 z0000000000;Q9;z000000t5g6eFy*m0000000000==ux*000000t5g6`~Uy|00000 z00000@cIk@000000t5g6ssR810000000000`1%Y0000000)GSm06+%-0000000000 z008?80000000IO60B`{S000000000000{dG0000000IO607eJ^000000000001*2O z0000000IO60CENZ000000000002uoW0000000IO60E7kr000000000003iDe00000 z00IO601W~F0Dk}g00000001ca3;+NC00066004Ug0000000000001!i3;+NC00066 z0070{{R3000000001Z z`wRd80Dk}g0t5g6aRdMW0000000000fcp#p000000t5g6VFdsH0000000000i2Dox z000000t5g6eFp#l0000000000koyb(000000t5g6odN&=0000000000nEMO>00000 z0t5g6iU$Ay0000000000p!*B}000000t5g6y?+J(00000000000I2&60000000IO6 z0003100000000000I>TE0000000IO60IC8200000000000J!@M0000000IO60ObS# z00000000000KoeU0000000IO60Eq+u00000000000Lc3c0000000IO60099200000 z0Dk}g007YY3;+NC00066005~40000000000007wg3;+NC00066001ln0000000000 z007|o3;+NC00066002h?0000000000008Lw3;+NC00066000I90000000000008j& z3;+NC00066007tn0000000000008*=41WLs000021ONa70RR910000000000{0sm9 z000021ONa}0RR910000000008{0sm9000021ONaS1^@s6000000000G{0sm900002 z1ONcg1ONa4000000000O{0sm9000021ONad2><{9000000000W{0sm9000021b+Yk z&IJGf0000000000DEtfn000000t5g6KnMT;0000000000F#HSv000000t5g6&IkYi z0000000000IQ$F%000000t5g61OWg50000000000K>Q2<000000t5g6z61aO00000 z00000Nc;={000000t5g6c?SRh0Dk}g0000008so40000000IO600ssC0000000000 z09gDC0000000IO606Yi)00000000000ATzK0000000IO6038GX00000000000BHOS z0000000IO60GkH@00000000000C4;a0000000IO60DK7m00000000000DpM=3;+NC z00066005x~0000000000004me3;+NC00066008j?0000000000004;m3;+NC00066 z000I70000000000005Bu3;+NC00066004Rk0000000000005Z$3;+NC00066003DC z0000000000005x;3;+NC0Dk}i1ONc!1^@s6000000001}{0sm9000021ONc}2LJ#7 z0000000026{0sm9000021ONcr2mk;8000000002E{0sm9000021ONaB0RR9100000 z0002M{0sm9000021ONag0ssI2000000002U{0sm9000021ONba0e=7h0000000000 z(EJPl000000t5g61p@#80000000000*!&Ct000000t5g6+Xest0000000000;QR~# z000000t5g6OacG^0000000000===--000000t5g6asmJV0000000000@caw_00000 z0t5g683+IX000000Dk}g0Qme20000000IO60C@rc0000000000008|A0000000IO6 z0Er0z000000000000{jI0000000IO600;p9000000000001*8Q0000000IO60OJS% z000000000002uuY0000000IO604f6j000000000003iJg0Dk}g00066006fF00000 z00000001cc3;+NC00066006520000000000001!k3;+NC00066007Ad0000000000 z0021s3;+NC00066000&T0000000000002P!3;+NC00066007N3j000000t5g6 zlL7z$0000000000fc*>r000000t5g6u>k-80000000000i2V!z000000t5g6cm)6e z0000000000ko^n*000000t5g65(fYP0000000000nEea@000000t5g63IPBB00000 z00000pnv@g0000000IO601E*C00000000000I2;80000000IO603`(g0000000000 z0I>ZG0000000IO608s}300000000000J!}O0000000IO603iba00000000000KokW z0000000IO60E!6!00000000000Lc9e000000Dl4m000aD0000000000007Ya3;+NC z00066000dE0000000000007wi3;+NC00066001xp0000000000007|q3;+NC00066 z006iI0000000000008Ly3;+NC00066001un0000000000008j)3;+NC00066000^S z0Dk}g000000002^{R{vA000021ONco2LJ#70000000000{tN&B000021ONaI0RR91 z0000000008{tN&B000021ONbw0ssI2000000000G{tN&B000021ONb@0RR9100000 z0000O{tN&B000021ONaZ2><{9000000Dk}gApQ&h000000t5g6#smNW0000000000 zDEiE>000000t5g6M*;u<0000000000Nd61}0Dk}g z00IO602%`T000000000008su60000000IO60Pq9=000000000009gJE0000000IO6 z0BZ;U00000000000AT(M0000000IO60FeX$00000000000BHUU0000000IO609pe8 z00000000000C4^c0000000#sB04^j30000000000004mg3;+NC000LB002$_00000 z00000008}azp4EL0FkmDmugT18-D`;@DZ9o`4GYckstye)*m1P)gK@O)gK@O)vyBq z@DZLs`4Hj+kstye*02Kr@DZXw`49pHkstye*02Kr@DZj!`4A!nkstye*02Kr@DZv& z`4B<{kstye*02Kr@DZ*+`4D0Skstye*02Kr@DZ{=`4EBykstye*02Kr@P84uK=}}& z1(6^EAJ(t~0PqpIK=}~D1(6^EAJ(t~0PqpMK=}~j1(6^EAJ(t~0PqpQK=}{?29Y2F zAJ(t~0PqpUK=}|N29Y2FAJ(t~0PqpYK=}|t29Y2FAJ(t~0PqpcK=}}229Y2FAJ(t~ z0PqpgK=}}Y29Y2FAJ(t~0Dtfi)f;s%i*0w30}0|4+5;z0Qj0tb;G0w30}0|4+5=0N!nA_tKm0w30}0|4+5>OlDr zLI;r`0w30}0|4+5?m+nvVh52R0w30}0|4+5@<90zf(MZx0w30}1AhSU5%xg&5TXZ> zAOauOumb?_5&A&+5W)wMAOauOumb?_5&l5=5aI`sAOauOumb?_5duN^5CRC1AOauO zumb?_5e7l|5F!YXAOauOumb?_5eh;15JCu%AOauOumb?_5e`B55Ml_CAOauOumb?_ z5fVZ95P}GiAOauOuzv#p@DUb4`4FNAkstye*02Kr@DUn8`4GYgkstye*02Kr@DUzC z`4Hj=kstye*02Kr@DU`4EB)k$)fpAJ(t~0PqpELHQ7(4Ur%MAJ(t~ z0PqpILHQ8E4Ur%MAJ(t~0PqpMLHQ8k4Ur%MAJ(t~0PqpQLHQ5@4v`=NAJ(t~0PqpU zLHQ6O4v`=NAJ(t~0PqpYLHQ6u4v`=NAJ(t~0PqpcLHQ734v`=NAJ(t~0PqpgLHQ7Z z4u6p#0w30}0|4+5)OuJsLJyH30w30} z0|4+5?m_twVh@oZ0w30}0|4+5@5aJJ!AOauOumb?_5duQ_5CRa9AOauOumb?_5e7o} z5F!wfAOauOumb?_5eh>25JC`40uzxS0w30}0|4+5Wkstye z*02Kr@DY|m`4FNMkstye*02Kr@DZ9q`4GYskstye*02Kr@DZLu`4Hk1kstye*02Kr z@DZXy`49pXkstye*02Kr@DZj$`4A!%kstye*02Kr@DZv)`4B=Ckstye*02Kr@DZ*; z`4D0ikstye*02Kr@P84qLirGa6_FqUAJ(t~0PqpELirG)6_FqUAJ(t~0PqpILirHF z6_FqUAJ(t~0PqpMLirHl6_FqUAJ(t~0PqpQLirE^7LgzVAJ(t~0PqpULirFP7LgzV zAJ(t~0PqpYLirFv7LgzVAJ(t~0PqpcLirG47LgzVAJ(t~0Dtfi(n9$Vf)O%PtLKl%B0w30}0|4+5?n3zx zVi%Dh0w30}1AhSU5%NO$5P}zxAOauOumb?_5%xm)5TX~6AOauOumb?_5&A;;5W*Lc zAOauOumb?_5&lB?5aJh+AOauOumb?_5duT`5CRyHAOauOumb?_5e7r~5F!|nAOauO zumb?_5eh^35JDJ{AOauOumb?_5e`H75MmgSAOauOuzv#p@DUP2`4EB_kstye*02Kr z@DUb6`4FNQkstye*02Kr&=DF#`4GYwkstye*02Kr&=DR(`4Hk5kstye*02Kr&=Dd- z`49pbkstye*02Kr&=Dp>`4A!*kstye*02Kr&=D#_`4B=Gkstye*02Kr&=D>}`4D0m zkstye)_<@A0MHRKL-`Pb8Id3YAJ(t~0MHROL-`P*8Id3YAJ(t~0MHRSL-`QG8Id3Y zAJ(t~0MHRWL-`Qm8Id3YAJ(t~0MHRaL-`N_8j&CZAJ(t~0MHReL-`OQ8j&CZAJ(t~ z0MHRiL-`Ow8j&CZAJ(t~0MHRmL-`P58j&CZAAi=c0|3wwQbYL=f*O$^0w30}0|3ww zRzvv^q8gDP0w30}0|3wwT0{8|!Wxkv0w30}0|3wwUPJj1;u?`40w30}0|3wwVng{5 z0vnMa0w30}0|3wwW<&W9A{&t)0w30}0|3wwYD4)DLK~4F0w30}0|3wwZbSJHVjGbl z0)HRYumb?l5pqNM5P}<#AOauOumb?l5q3lQ5TYBAAOauOumb?l5qd-U5W*XgAOauO zumb?l5q?AY5aJt=AOauOumb?l5rRYc5CR;LAOauOumb?l5r#wg5F#9rAOauOumb?l z5sE|k5JDW0AOauOumb?l5spLo5MmsWAb$cM*02Kr&=HbD`4EB}kstye*02Kr&=HnH z`4FNUkstye*02Kr&=HzL`4GY!kstye*02Kr&=HL-`Qm9g!dcAJ(t~0MHS_L-`N_9+4mdAJ(t~0MHS}L-`OQ9+4mdAJ(t~0MHT2 zL-`Ow9+4mdAJ(t~0MHT6L-`P59)FP_0w30}0|3ww(nI+Wf*z3|0w30}0|3ww)O=VuLLZSJ0w30}0|3ww?nC(yVt*fzAOauO zumb?l5%NR%5P~0(AOauOumb?l5%xp*5TYNEAOauOumb?l5&A><5W*jkAOauOumb?l z5&lE@5aJ(^AOauOumb?l5duW{5CR~PAOauOumb?l5e7v05F#LvAOauOumb?l5eh{4 z5JDi4AOauOumb?l5e`K85PxDIkstye*02Kr&=C?u`4EC2kstye*02Kr&=D3y`4FNY zkstye*02Kr&=DF$`4GY&kstye*02Kr&=DR)`4HkDkstye*02Kr&=Dd;`49pjkstye z*02Kr&=Dp?`4A!@kstye*02Kr&=D#``4B=Okstye*02Kr&=D>~`F{{%A(0>gAJ(t~ z0MHRKMEMYcA(0>gAJ(t~0MHROMEMY+A(0>gAJ(t~0MHRSMEMZHA(0>gAJ(t~0MHRW zMEMZnA(0>gAJ(t~0MHRaMEMW`B9R~hAJ(t~0MHReMEMXRB9R~hAJ(t~0MHRiMEMXx zB9R~hAJ(t~0MHRmM1T1ZVj__s0w30}0|3wwQbhR>f+CS10w30}0|3wwRz&#_q9TzX z0w30}0|3wwT15E}!Xl9%0w30}0|3wwUPSp2;v$hC0w30}0|3wwVnq260wa+i0w30} z0|3wwW<>cAA|sI?0w30}0|3wwYDD=ELL-qN0w30}0|3wwZhu7i5Mm>dAOauOumb?l z5pqQN5P~C-AOauOumb?l5q3oR5TYZIAOauOumb?l5qd=V5W*voAOauOumb?l5q?DZ z5aJ_|AOauOumb?l5rRbd5CSBTAOauOumb?l5r#zh5F#XzAOauOumb?l5sF0l5JDu8 zAOauOumb?l5r2+E`4D0xkstye*02Kr&=HbE`4EC6kstye*02Kr&=HnI`4FNckstye z*02Kr&=HzM`4GY+kstye*02Kr&=HuGMEMY6C6OQkAJ(t~0MHS# zMEMYcC6OQkAJ(t~0MHS(MEMY+C6OQkAJ(t~0MHS-MEMZHC6OQkAJ(t~0MHS>MEMZn zC6OQkAJ(t~0MHS_MEMW`CXpZlAJ(t~0MHS}MEMXRCXpZlAJ(t~0MHT2MEMXxCXpZl zAJ(t~0DsUC&P4eTVkVIw0w30}0|3ww(nR?Xf+mq50w30}0|3ww)O}bvLMM?R0w30}1AhR}5$;6!5Mn2hAOauOumb?l5%NU& z5P~O>AOauOumb?l5%xs+5TYlMAOauOumb?l5&A^=5W**sAOauOumb?l5&lH^5aK71 zAOauOumb?l5duZ|5CSNXAOauOumb?l5e7y15F#j%AOauOumb?l5eh~55JD)CAOauO zuzv#p&=C$r`4D0#kstye*02Kr&=C?v`4ECAkstye*02Kr&=D3z`4FNgkstye*02Kr z&=DF%`4GY=kstye*02Kr&=DR*`4HkLkstye*02Kr&=Dd<`49prkstye*02Kr&=Dp@ z`4A#0kstye*02Kr&=D#{`4B=Wkstye)_<@A0MHRGMfnh7DUl!oAJ(t~0MHRKMfnhd zDUl!oAJ(t~0MHROMfnh-DUl!oAJ(t~0MHRSMfniIDUl!oAJ(t~0MHRWMfnioDUl!o zAJ(t~0MHRaMfnf{Dv=-pAJ(t~0MHReMfngSDv=-pAJ(t~0MHRiMfngyDv=-pAAi=c z0|3wwPDS|;Vk(g!0w30}0|3wwQbqX?f+~?90w30}0|3wwRz>*`qAHOf0w30}0|3ww zT1EK~!YYv<0w30}0|3wwUPbv3;wq6K0w30}0|3wwVnz870xOXq0w30}0|3wwW<~iB zA}f&~0w30}0|3wwYDM`FLMxFV0)HRYumb?l5pG5K5MnElAOauOumb?l5pqTO5P~a_ zAOauOumb?l5q3rS5TYxQAOauOumb?l5qd@W5W*{wAOauOumb?l5q?Ga5aKJ5AOauO zumb?l5rRee5CSZbAOauOumb?l5r#$i5F#v*AOauOumb?l5sF3m5JD`GAb$cM*02Kr z&=HPB`4D0(kstye*02Kr&=HbF`4ECEkstye*02Kr&=HnJ`4FNkkstye*02Kr&=HzN z`4GY^kstye*02Kr&=HMfnioEs-DsAJ(t~ z0MHS_Mfnf{E|DMtAJ(t~0MHS}MfngSE|DMtAJ(t~0MHT2MfngyE`O0A0w30}0|3ww z&PDkUVlI&&0w30}0|3ww(na|Yf-aFD0w30}0|3ww)P7hwLVqujAOauOumb?l5$;9#5MnQpAOauOumb?l5%NX(5P~m}AOauO zumb?l5%xv-5TY-UAOauOumb?l5&A{>5W+8!AOauOumb?l5&lK_5aKV9AOauOumb?l z5duc}5CSlfAOauOumb?l5e7#25F#*{qB4;n0w30}0|3wwT1NR0!ZMK{ z0w30}0|3wwUPk#4;xdsS0w30}0|3wwVn+E80yB{y0w30}0|3wwW=8oCA~TU70w30} z0|3wwYJW!g5JEGNAOauOumb?l5pG8L5MnctAOauOumb?l5pqWP5P~z2AOauOumb?l z5q3uT5TY}YAOauOumb?l5qd`X5W+K&AOauOumb?l5q?Jb5aKhDAOauOumb?l5rRhf z5CSxjAOauOumb?l5r#(j5F#{@AOauOumb?l5r2wC`4B=hkstye*02Kr&=HPC`4D0> zkstye*02Kr&=HbG`4ECMkstye*02Kr&=HnK`4FNskstye*02Kr&=HzO`4GZ1kstye z*02Kr&=HuCM)?pzHIX0!AJ(t~0MHSxM)?q8HIX0!AJ(t~0MHS#M)?qeHIX0!AJ(t~0MHS( zM)?q;HIX0!AJ(t~0MHS-M)?rJHIX0!AJ(t~0MHS>M)?rpHIX0!AJ(t~0MHS_M)?o| zHjy9#AJ(t~0MHS}M)?pTHjy9#AJ(t~0DsUC%0~GRLN<{g0w30}0|3ww&PMqVVm6T= z0w30}0|3ww(nk3Zf;N#L0w30}0|3ww)<*ddqBfBr0w30}0|3ww+D7>h!Zwj00w30} z0|3ww-bVQl;x>^W0w30}0|3ww;zs!p0ymK$0w30}0|3ww=0^DtA~%sB0w30}1AhR} z5$Z7kstye*02Kr&=Cqi`4Bh0w30}0|3wwN0w30}0|3wwPC)q(VgivM0w30}0|3wwQb73- zf&!5s0w30}0|3wwRzUd>q5_d10w30}0|3wwT0r>_!UB;X0w30}0|3wwUO@Q};sTK% z0w30}0|3wwVnF#20t1mC0w30}0|3wwW`98W5F!JSAOauOumb?l5o$pB5JCfyAOauO zumb?l5pF>F5Ml$7AOauOumb?l5pqEJ5P}1dAOauOumb?l5q3cN5TXN-AOauOumb?l z5qd!R5W)kIAOauOumb?l5q?1V5aI)oAOauOumb?l5rRPZ5CQ~|AOauOumb?l5r2k2 z`4A!mkstye*02Kr&=HD2`4B<`kstye*02Kr&=HP6`4D0Rkstye*02Kr&=HbA`4EBx zkstye*02Kr&=HnE`4FN6kstye*02Kr&=HzI`4GYckstye*02Kr&=Hu8K=}|N1(6^EAJ(t~0MHStK=}|t1(6^EAJ(t~ z0MHSxK=}}21(6^EAJ(t~0MHS#K=}}Y1(6^EAJ(t~0MHS(K=}}&1(6^EAJ(t~0MHS- zK=}~D1(6^EAJ(t~0MHS>K=}~j1(6^EAJ(t~0MHS_K=}{?29Y2FAJ(t~0DsUC#z6TH zA_kEl0w30}0|3ww%0T%LLI#l_0w30}0|3ww&OrGPVg`{Q0w30}0|3ww(m?qTf(DTw z0w30}0|3ww)f;s%i*0w30} z0|3ww;z0Qj0tb;G0w30}1AhR}5#~Vo5F!VWAOauOumb?l5$Zts5JCr$AOauOumb?l z5$-_w5Ml?BAOauOumb?l5%NI!5P}DhAOauOumb?l5%xg&5TXZ>AOauOumb?l5&A&+ z5W)wMAOauOumb?l5&l5=5aI`sAOauOumb?l5duN^5CRC1AOauOuzv#p&=Cef`4A!q zkstye*02Kr&=Cqj`4B<~kstye*02Kr&=C$n`4D0Vkstye*02Kr&=C?r`4EB#kstye z*02Kr&=D3v`4FNAkstye*02Kr&=DFz`4GYgkstye*02Kr&=DR%`4Hj=kstye*02Kr z&=Dd*`49pLkstye)_<@A0MHR8LHQ6O36UTIAJ(t~0MHRCLHQ6u36UTIAJ(t~0MHRG zLHQ7336UTIAJ(t~0MHRKLHQ7Z36UTIAJ(t~0MHROLHQ7(36UTIAJ(t~0MHRSLHQ8E z36UTIAJ(t~0MHRWLHQ8k36UTIAJ(t~0MHRaLHQ5@3XvcJAAi=c0|3wwMnU-yA_|cp z0w30}0|3wwNLHQ8k4Ur%MAJ(t~0MHS_LHQ5@4u6p#0w30}0|3ww#zFZIA`X!t0w30} z0|3ww%0c-MLJpB20w30}0|3ww&O!MQVh)iY0w30}0|3ww(n0wUf)0@&0w30}0|3ww z)5aJJ!AOauOumb?l5duQ_5Pt#?kstye*02Kr&=Ceg`4A!ykstye*02Kr z&=Cqk`4B=7kstye*02Kr&=C$o`4D0dkstye*02Kr&=C?s`4EB-kstye*02Kr&=D3w z`4FNIkstye*02Kr&=DF!`4GYokstye*02Kr&=DR&`4Hj|kstye*02Kr&=Dd+`F{`s z5s@GQAJ(t~0MHR8LirFP5s@GQAJ(t~0MHRCLirFv5s@GQAJ(t~0MHRGLirG45s@GQ zAJ(t~0MHRKLirGa5s@GQAJ(t~0MHROLirG)5s@GQAJ(t~0MHRSLirHF5s@GQAJ(t~ z0MHRWLirHl5s@GQAJ(t~0MHRaLVx)X0uqrR0w30}0|3wwMnd@zA`+1x0w30}0|3ww zN<#S%LK2Z60w30}0|3wwPD1$*ViJ)c0w30}0|3wwQbPFkstye*02Kr&=HnG`4FNM zkstye*02Kr&=HzK`4GYskstye*02Kr&=Hu4LirE^6_FqU zAJ(t~0MHSpLirFP6_FqUAJ(t~0MHStLirFv6_FqUAJ(t~0MHSxLirG46_FqUAJ(t~ z0MHS#LirGa6_FqUAJ(t~0MHS(LirG)6_FqUAJ(t~0MHS-LirHF6_FqUAJ(t~0MHS> zLirHl6_FqUAJ(t~0DsUC!b15F0v3@V0w30}0|3ww#zOfJA{LP#0w30}0|3ww%0l@N zLKcxA0w30}0|3ww&O-SRViu7g0w30}0|3ww(n9$Vf)<5W*jkAOauOumb?F5&lE@5P#wykstye z*02Krun_`8`49pikstye*02Krun`7C`4A!?kstye*02Krun`JG`4B=Nkstye*02Kr zun`VK`4D0tkstye*02Krun`hO`4EC2kstye*02Krun`tS`4FNYkstye*02Krun`(W z`4GY&kstye*02Krun`_a`F{}NAdw&fAJ(t~0I(4vMEMW`A(0>gAJ(t~0I(4zMEMXR zA(0>gAJ(t~0I(4%MEMXxA(0>gAJ(t~0I(4*MEMY6A(0>gAJ(t~0I(4g zAJ(t~0I(4@MEMY+A(0>gAJ(t~0I(4{MEMZHA(0>gAJ(t~0I(50M1T1Z;vtbB0w30} z0|2lQLPYrx0wR$h0w30}0|2lQMnw4#A|jC>0w30}0|2lQN<{e(LL!kM0w30}0|2lQ zPDJ?-Vj__s0w30}0|2lQQbhR>f+CS10w30}0|2lQRz&#_q9TzX0w30}0|2lQT15E} z!Xl9%0w30}0|2lQUVlXS5aJ?{AOauOumb?F5n@F75CS8SAOauOumb?F5oSdB5F#Uy zAOauOumb?F5o$#F5JDr7AOauOumb?F5pG2J5Mm>dAOauOumb?F5pqQN5P~C-AOauO zumb?F5q3oR5TYZIAOauOumb?F5qd=V5W*voAOauOumb?F5r2L}`4HkGkstye*02Kr zun~ep`49pmkstye*02Krun~qt`4A!`kstye*02Krun~$x`4B=Rkstye*02Krun~?# z`4D0xkstye*02Kruo03(`4EC6kstye*02Kruo0F-`4FNckstye*02Kruo0R>`4GY+ zkstye*02KruzwMrMEMZnB#|HjAJ(t~0I(6FMEMW`C6OQkAJ(t~0I(6JMEMXRC6OQk zAJ(t~0I(6NMEMXxC6OQkAJ(t~0I(6RMEMY6C6OQkAJ(t~0I(6VMEMYcC6OQkAJ(t~ z0I(6ZMEMY+C6OQkAJ(t~0I(6dMEMZHC6OQkAJ(t~0DrI%zC`&D;w6zF0w30}0|2lQ z!bJHH0w$3l0w30}0|2lQ#zgrLA|{a_0w30}0|2lQ%0&4PLMD+Q0w30}0|2lQ&P4eT zVkVIw0w30}0|2lQ(nR?Xf+mq50w30}0|2lQ)AOauOumb?F z5%xs+5TYlMAOauOumb?F5&A^=5W**sAOauOuzv#puo3=5`4HkKkstye*02Krun_`9 z`49pqkstye*02Krun`7D`4A!~kstye*02Krun`JH`4B=Vkstye*02Krun`VL`4D0# zkstye*02Krun`hP`4ECAkstye*02Krun`tT`4FNgkstye*02Krun`(X`4GY=kstye z)_<@A0I(4rMfnioD3KrnAJ(t~0I(4vMfnf{DUl!oAJ(t~0I(4zMfngSDUl!oAJ(t~ z0I(4%MfngyDUl!oAJ(t~0I(4*Mfnh7DUl!oAJ(t~0I(4*`qAHOf0w30}0|2lQT1EK~!YYv<0)HRY zumb?F5ne_45aKG4AOauOumb?F5n@I85CSWaAOauOumb?F5oSgC5F#s)AOauOumb?F z5o$&G5JD@FAOauOumb?F5pG5K5MnElAOauOumb?F5pqTO5P~a_AOauOumb?F5q3rS z5TYxQAOauOumb?F5qd@W5W*{wAb$cM*02Krun~Sm`4HkOkstye*02Krun~eq`49pu zkstye*02Krun~qu`4A#3kstye*02Krun~$y`4B=Zkstye*02Krun~?$`4D0(kstye z*02Kruo03)`4ECEkstye*02Kruo0F;`4FNkkstye*02Kruo0R?`4GY^k$)fpAJ(t~ z0I(6BMfnioERi4rAJ(t~0I(6FMfnf{Es-DsAJ(t~0I(6JMfngSEs-DsAJ(t~0I(6N zMfngyEs-DsAJ(t~0I(6RMfnh7Es-DsAJ(t~0I(6VMfnhdEs-DsAJ(t~0I(6ZMfnh- zEs-DsAJ(t~0I(6dMfniIEq{?90w30}0|2lQzD4;E;w_ON0w30}0|2lQ!bSNI0xppt z0w30}0|2lQ#zpxMA}*020w30}0|2lQ%0>AQLN1XY0w30}0|2lQ&PDkUVlI&&0w30} z0|2lQ(na|Yf-aFD0w30}0|2lQ)5P!likstye*02Kruo3=6`4HkSkstye*02Krun_`A`49pykstye z*02Krun`7E`4A#7kstye*02Krun`JI`4B=dkstye*02Krun`VM`4D0-kstye*02Kr zun`hQ`4ECIkstye*02Krun`tU`4FNokstye*02Krun`(Y`F{|?Fp(evAJ(t~0I(4r zM)?rpFp(evAJ(t~0I(4vM)?o|F_9nwAJ(t~0I(4zM)?pTF_9nwAJ(t~0I(4%M)?pz zF_9nwAJ(t~0I(4*M)?q8F_9nwAJ(t~0I(4x0w30} z0|2lQMn?G%A~KO60w30}0|2lQN=Eq*LNbvc0w30}0|2lQPDc3{qB4;n0w30}0|2lQT7O3Q5W+H%AOauOumb?F5ne|5 z5aKeCAOauOumb?F5n@L95CSuiAOauOumb?F5oSjD5F#^?AOauOumb?F5o$*H5JEGN zAOauOumb?F5pG8L5MnctAOauOumb?F5pqWP5P~z2AOauOumb?F5q3uT5TY}YAOauO zumb?F5r29{`4GZ0kstye*02Krun~Sn`4HkWkstye*02Krun~er`49p$kstye*02Kr zun~qv`4A#Bkstye*02Krun~$z`4B=hkstye*02Krun~?%`4D0>kstye*02Kruo03* z`4ECMkstye*02Kruo0F<`4FNskstye*02KruzwMnM)?rJG?5?zAJ(t~0I(6BM)?rp zG?5?zAJ(t~0I(6FM)?o|HIX0!AJ(t~0I(6JM)?pTHIX0!AJ(t~0I(6NM)?pzHIX0! zAJ(t~0I(6RM)?q8HIX0!AJ(t~0I(6VM)?qeHIX0!AJ(t~0I(6ZM)?q;HIX0!AJ(t~ z0DrI%x<>gB!Znc~0w30}0|2lQzDD^F;x&;V0w30}0|2lQ!bbTJ0ydE#0w30}0|2lQ z#zy%NA~umA0w30}0|2lQ%0~GRLN<{g0w30}0|2lQ&PMqVVm6T=0w30}0|2lQ(nk3Z zf;N#L0w30}0|2lQ)<*ddqBfBr0w30}1AhRp5!y!i5W+T*AOauOumb?F5#C1m5aKqG zAOauOumb?F5#mPq5CS)mAOauOumb?F5#~nu5F$5`AOauOumb?F5$Zc(0w30}0|1Z_Mo0M& zB07;E0w30}0|1Z_N=Nw+LOPKk0w30}0|1Z_PDl9=Vmgr^0w30}0|1Z_Qb+j^f;y2P z0w30}0|1Z_R!8{|qB@Zv0)HRYumb>)5n4z25W+f)5nf065aK$KAOauO zumb>)5n@OA5CS`qAOauOumb>)5oSmE5F$H~AOauOumb>)5o$;I5JEeVAOauOumb>) z5pGBM5Mn!#AOauOumb>)5pqZQ5Q00AAOauOumb>)5q3xU5TZMgAb$cM*02KrkP&)E z`4GZ8kstye*02KrkP&`I`4Hkekstye*02KrkP(7M`49p;kstye*02KrkP(JQ`4A#J zkstye*02KrkP(VU`4B=pkstye*02KrkP(hY`4D0}kstye*02KrkP(tc`4ECUkstye z*02KrkP((g`49jAfd7A@JduAO0w30}0|1Z_nn(E%!aR{60w30}0|1Z_o=5o*;yjTc z0w30}0|1Z_qDT1<0zHu+0w30}0|1Z_rbqb@B0Z5H0w30}0|1Z_sz><{LOqcn0w30} z0|1Z_u1EP0Vm*-{0w30}0|1Z_vPbz4f<2KS0w30}0|1Z_wnzC8qCJ0+AOauOumb>) z5xPhD5W+o?AOauOumb>)5xz(H5aK)5yD6L5CT4tAOauOumb>)5ynUP z5F$R2AOauOumb>)5z0sT5JEnYAOauOumb>)5za^X5Mn-&AOauOumb>)5z)5!Off5TbuRkstye*02KrkP+HP`4GZBkstye*02KrkP+TT`4Hkhkstye z*02KrkP+fX`49p>kstye*02KrkP+rb`4A#Mkstye*02KrkP+%f`4B=skstye*02Kr zkP+@j`4D11kstye*02KrkP-4n`4ECXkstye*02KrkP-Gr`4E4iKan5;AJ(t~0FV*- zNBI!KKan5;AJ(t~0FV*>NBI!qKan5;AJ(t~0I(4PK=}{?0FfX9AJ(t~0I(4TK=}|N z0FfX9AJ(t~0I(4XK=}|t0FfX9AJ(t~0I(4bK=}}20FfX9AJ(t~0I(4fK=}}Y0FfX9 zAJ(t~0I(4jK>2?Vq5zQ~0w30}0|2lQ8bJ9F!T^yV0w30}0|2lQ9zgjJ;sB8#0w30} z0|2lQB0%{N0s)aA0w30}0|2lQCP4WRA_0*g0w30}0|2lQDnR)VLIIH=0w30}0|2lQ zEP0w30}0|2lQvOxI|f(4Nv0w30}0|0-p5w<}25TXT70w30}0|2lQ8bSFG!U&Nd0w30}0|2lQ9zppK;s}u-0w30}0|2lQB0>2O z0tt~I0w30}0|2lQCPDcSA_0w30}0|2lQqCxo(0u7NM z0w30}0|2lQra}1-A`Ous0w30}0|2lQszLb>LJg510w30}0|2lQu0i<_VhxcX0w30} z0|2lQvO)O}f(?I>AOauOumb?F5w=135TXr{AOauOumb?F5xPP75W)?SAOauOumb?F z5xznB5aJDyAOauOumb?F5yC`4E4C50M}OAJ(t~0I(7ELHQ7(50M}OAJ(t~0I(7ILHQ8E50M}OAJ(t~0I(7M zLHQ8k50M}OAJ(t~0I(4PLirE^5Ro7PAJ(t~0I(4TLirFP5Ro7PAJ(t~0I(4XLirFv z5Ro7PAJ(t~0I(4bLirG45Ro7PAJ(t~0I(4fLiv9Xf)J4)0w30}0|2lQ7DD+Dq7acF z0w30}0|2lQ8bbLH!Vr-l0w30}0|2lQ9zyvL;t-J_0w30}0|2lQB0~8P0uhlQ0w30} z0|2lQCPMiTA`y`w0w30}0|2lQDnj`XLJ^T50w30}0|2lQE<*VbViA!b0w30}0|2lQ zGD3g(5P}hrAOauOumb?F5jH~k5TX&0AOauOumb?F5jsNo5W*3WAOauOumb?F5k5ls z5aJP$AOauOumb?F5kf-w5CRgBAOauOumb?F5k^A!5F!$hAOauOumb?F5lTY&5JD1> zAOauOumb?F5l%w+5MmOMAOauOumb?F5mJ9b`4EB)5i&#h5P}(z zAOauOumb>)5jI2l5TY58AOauOumb>)5jsQp5W*ReAOauOumb>)5k5ot5aJn;AOauO zumb>)5kf=x5CR&JAOauOumb>)5k^D#5F#3pAOauOumb>)5lTb(5JDP}AOauOumb>) z5l%z-5MmmUAOe3M*02KrkP%Wt`4EB{kstye*02KrkP%ix`4FNSkstye*02KrkP%u# z`4GYykstye*02KrkP%)(`4Hk7kstye*02KrkP%`-`49pdkstye*02KrkP&7>`4A!- zkstye*02KrkP&J_`4B=Ikstye*02KrkP&V}`4D0oksyBpAJ(t~0FV)KL-`Pb8<8La zAJ(t~0FV)OL-`P*8<8LaAJ(t~0FV)SL-`QG8<8LaAJ(t~0FV)WL-`Qm8<8LaAJ(t~ z0FV)aL-`N_9FZUbAJ(t~0FV)eL-`OQ9FZUbAJ(t~0FV)iL-`Ow9FZUbAJ(t~0FV)m zL-`P59Fcz@0w30}0|1Z_l0*3rf*g?`0w30}0|1Z_mP7dvq8yPR0w30}0|1Z_nnU>z z!W@wx0w30}0|1Z_o)5wb)15P}_%AOauO zumb>)5w=755TYHCAOauOumb>)5xPV95W*diAOauOumb>)5xztD5aJz?AOauOumb>) z5yC_H5CR^NAOauOumb>)5ynIL5F#FtAOauOumb>)5z0gP5JDc2AOauOumb>)5za&T z5MqBGkstye*02KrkP*^D`4EC0kstye*02KrkP+5H`4FNWkstye*02KrkP+HL`4GY$ zkstye*02KrkP+TP`4HkBkstye*02KrkP+fT`49phkstye*02KrkP+rX`4A!>kstye z*02KrkP+%b`4B=Mkstye*02KrkP+@f`4E3%ACVveAJ(t~0FV*#L-`PbACVveAJ(t~ z0FV*(L-`P*ACVveAJ(t~0FV*-L-`QGACVveAJ(t~0FV*>L-`QmACVveAJ(t~0FV&^ zMEMW`Adw&fAJ(t~0FV&|MEMXRAdw&fAJ(t~0FV(1MEMXxAdw&fAJ(t~0FV(5MEQRZ zVjz(q0w30}0|1Z_5=8kBf*_F~0w30}0|1Z_7DV|Fq9BnV0w30}0|1Z_8btXJ!XS|# z0w30}0|1Z_9z^*N;vkVA0w30}0|1Z_B1HKR0wIwg0w30}0|1Z_CPeuVA|a6=0w30} z0|1Z_Dn$7ZLLreL0w30}0|1Z_E<}I%5Mm*bAOauOumb>)5i&&i5P~6*AOauOumb>) z5jI5m5TYTGAOauOumb>)5jsTq5W*pmAOauOumb>)5k5ru5aJ<`AOauOumb>)5kf@y z5CS5RAOauOumb>)5k^G$5F#RxAOauOumb>)5lTe)5JDo6AOauOumb>)5l(+Z`4D0v zkstye*02KrkP%Wu`4EC4kstye*02KrkP%iy`4FNakstye*02KrkP%u$`4GY)kstye z*02KrkP%))`4HkFkstye*02KrkP%`;`49plkstye*02KrkP&7?`4A!_kstye*02Kr zkP&J``4B=Qkstye*02KrkP&}wMEMY6Bat8iAJ(t~0FV)KMEMYcBat8iAJ(t~0FV)O zMEMY+Bat8iAJ(t~0FV)SMEMZHBat8iAJ(t~0FV)WMEMZnBat8iAJ(t~0FV)aMEMW` zB#|HjAJ(t~0FV)eMEMXRB#|HjAJ(t~0FV)iMEMXxB#|HjAJ(t~0FZwXjzswoVkD6u z0w30}0|1Z_l0^9sf+Ue30w30}0|1Z_mPGjwq9l)5wb-25P~I)5w=A6 z5TYfKAOauOumb>)5xPYA5W*#qAOauOumb>)5xzwE5aK0~AOauOumb>)5yC|I5CSHV zAOauOumb>)5ynLM5F#d#AOauOumb>)5z0jQ5JD!AAOauOumgVpkP*&A`4D0zkstye z*02KrkP*^E`4EC8kstye*02KrkP+5I`4FNekstye*02KrkP+HM`4GY;kstye*02Kr zkP+TQ`4HkJkstye*02KrkP+fU`49ppkstye*02KrkP+rY`4A!}kstye*02KrkP+%c z`4B=Ukstye*06sA0FV*xMEMY6Cy^imAJ(t~0FV*#MEMYcCy^imAJ(t~0FV*(MEMY+ zCy^imAJ(t~0FV*-MEMZHCy^imAJ(t~0FV*>MEMZnCy^imAJ(t~0FV&^Mfnf{D3Krn zAJ(t~0FV&|MfngSD3KrnAJ(t~0FV(1MfngyD3KrnAJ%`c0|1Z_4n_G8VknUy0w30} z0|1Z_5=HqCf+&$70w30}0|1Z_7Df3Gq9~Cd0w30}0|1Z_8b$dK!YGj-0w30}0|1Z_ z9!2>O;wX_I0w30}0|1Z_B1QQS0x6Lo0w30}0|1Z_CPn!WA}Ns|0w30}0|1Z_Dn)5iUjf5Mn8jAOauOumb>)5i&*j5P~U@AOauOumb>)5jI8n5TYrO zAOauOumb>)5jsWr5W*>uAOauOumb>)5k5uv5aKD3AOauOumb>)5kf`z5CSTZAOauO zumb>)5k^J%5F#p(AOauOumb>)5lTh*5JD=EAOe3M*02KrkP%Kr`4D0%kstye*02Kr zkP%Wv`4ECCkstye*02KrkP%iz`4FNikstye*02KrkP%u%`4GY?kstye*02KrkP%)* z`4HkNkstye*02KrkP%`<`49ptkstye*02KrkP&7@`4A#2kstye*02KrkP&J{`4B=Y zksyBpAJ(t~0FV)GMfnh7E0G`qAJ(t~0FV)KMfnhdE0G`qAJ(t~0FV)OMfnh-E0G`q zAJ(t~0FV)SMfniIE0G`qAJ(t~0FV)WMfnioE0G`qAJ(t~0FV)aMfnf{ERi4rAJ(t~ z0FV)eMfngSERi4rAJ(t~0FV)iMfngyERla80w30}0|1Z_jz#$pVl0s$0w30}0|1Z_ zl12Ftf-I3B0w30}0|1Z_mPPpxqAZah0w30}0|1Z_nnn2#!Yq*>0w30}0|1Z_o<;c( z;w+IM0w30}0|1Z_qDA=-0xgjs0w30}0|1Z_rbYP>A}x_10w30}0|1Z_szvz_LM?xh zAOauOumb>)5w1n~5MnKnAOauOumb>)5wb=35P~g{AOauOumb>)5w=D75TY%SAOauO zumb>)5xPbB5W+2yAOauOumb>)5xzzF5aKP7AOauOumb>)5yD0J5CSfdAOauOumb>) z5ynON5F##-AOauOumb>)5z0mR5JG=0kstye*02KrkP*&B`4D0*kstye*02KrkP*^F z`4ECGkstye*02KrkP+5J`4FNmkstye*02KrkP+HN`4GY`kstye*02KrkP+TR`4HkR zkstye*02KrkP+fV`49pxkstye*02KrkP+rZ`4A#6kstye*02KrkP+%d`4E3XFOeVu zAJ(t~0FV*xMfnh7FOeVuAJ(t~0FV*#MfnhdFOeVuAJ(t~0FV*(Mfnh-FOeVuAJ(t~ z0FV*-MfniIFOeVuAJ(t~0FV*>MfnioFOeVuAJ(t~0FV&^M)?o|Fp(evAJ(t~0FV&| zM)?pTFp(evAJ(t~0FV(1M)`jbLNJja0w30}0|1Z_4o3M9Vla^)0w30}0|1Z_5=QwD zf-sRF0w30}0|1Z_7Do9HqA-yl0w30}0|1Z_8b)5iUmg5MnWrAOauOumb>)5i&;k5P~t0AOauOumb>)5jIBo5TY@WAOauOumb>) z5jsZs5W+E$AOauOumb>)5k5xw5aKbBAOauOumb>)5kf}!5CSrhAOauOumb>)5k^M& z5F#>>AOauOumb>)5lVkX`4B=fkstye*02KrkP%Ks`4D0) z5w1r05MnivAOauOumb>)5wb@45P~(4AOauOumb>)5w=G85TZ4aAOauOumb>)5xPeC z5W+Q)AOauOumb>)5xz$G5aKnFAOauOumb>)5yD3K5CS%lAOauOumb>)5ynRO5F$2_ zAOauOumgVpkP*s8`4B=jkstye*02KrkP*&C`4D0@kstye*02KrkP*^G`4ECOkstye z*02KrkP+5K`4FNukstye*02KrkP+HO`4GZ3kstye*02KrkP+TS`4HkZkstye*02Kr zkP+fW`49p(kstye*02KrkP+ra`4A#Ekstye*06sA0FV*tM)?pzH<2I$AJ(t~0FV*x zM)?q8H<2I$AJ(t~0FV*#M)?qeH<2I$AJ(t~0FV*(M)?q;H<2I$AJ(t~0FV*-M)?rJ zH<2I$AJ(t~0FV*>M)?rpH<2I$AJ(t~0FV&^NBIx}IFTR%AJ(t~0FV&|NBIyUIFTR% zAJ%`c0|1Z_3P<@6LO78i0w30}0|1Z_4oCSAVmOf?0w30}0|1Z_5=Z$Ef;f>N0w30} z0|1Z_7DxFIqBxNt0w30p0|4+58b|pM!Z?v20w30p0|4+59!L2Q;y95Y0w30p0|4+5 zB1icU0y&W&0w30p0|4+5CP(=YA~}&D0v~_YkOKhl5h_Rd5JEYTAOauOkOKhl5iUph z5MnuzAOauOkOKhl5i&>l5P~_8AOauOkOKhl5jIEp5TZGeAOauOkOKhl5jsct5W+c; zAOauOkOKhl5k5!x5aKzJAOauOkOKhl5kg1#5CS@pAOauOkOKhl5k^P(5F$E}AOe3M z){p}L@DWN!`4B=nkstye){p}L@DWZ&`4D0{kstye){p}L@DWl+`4ECSkstye){p}L z@DWx=`4FNykstye){p}L@DW-^`4GZ7kstye){p}L@DW}|`4Hkdkstye){p}L@DXB1 z`49p-kstye){p}L@DXN5`4A#IksyBpAJ&iq0PqoNNBIy!JCPs)AJ&iq0PqoRNBIz9 zJCPs)AJ&iq0PqoVNBIzfJCPs)AJ&iq0PqoZNBIz zkstye){p}L@Db)m`4E31Kan5;AJ&iq0Pqp&NBIy!Kan5;AJ&iq0Pqp+NBIz9Kan5; zAJ&iq0Pqp=NBIzfKan5;AJ&iq0Pqp^NBIz2?VA^?#f0w30}0|1Z_ z3PAY~LI9B<0w30}0|1Z_4nX-3VgQjK0w30}0|1Z_5)5h_6W5JCZwAOauOumb>)5iUUa5Mlw5AOauO zumb>)5i&se5P|`bAOauOumb>)5jH^i5TXH*AOauOumb>)5jsHm5W)eGAOauOumb>) z5k5fq5aI!mAOauOumb>)5kf%u5CQ^`AOauOumb>)5k`MN`4A!kkstye*02KrkP%8i z`4B<^kstye*02KrkP%Km`4D0Pkstye*02KrkP%Wq`4EBvkstye*02KrkP%iu`4FN4 zkstye*02KrkP%uy`4GYakstye*02KrkP%)$`4Hj)kstye*02KrkP%`)`49pFkstye z*02KrkP&}oK=}|N1CbyCAJ(t~0FV)CK=}|t1CbyCAJ(t~0FV)GK=}}21CbyCAJ(t~ z0FV)KK=}}Y1CbyCAJ(t~0FV)OK=}}&1CbyCAJ(t~0FV)SK=}~D1CbyCAJ(t~0FV)W zK=}~j1CbyCAJ(t~0FV)aK=}{?1d$*DAJ(t~0FZwXhCulcA_S2j0w30}0|1Z_ia_}g zLIjZ@0w30}0|1Z_jzIYkVg!*O0w30}0|1Z_l0f+of&`Hu0w30}0|1Z_mO%Lsq6Cp3 z0w30}0|1Z_nn3vw!UT~Z0w30}0|1Z_o)5voA>5JCl!AOauOumb>)5w1Y_5Ml+9AOauOumb>) z5wbw}5P}7fAOauOumb>)5w<}25TXT)5xPM65W)qKAOauOumb>)5xzkA z5aI=qAOauOumb>)5yC+E5CR5~AOauOumgVpkP*f}`4A!okstye*02KrkP*s2`4B<| zkstye*02KrkP*&6`4D0Tkstye*02KrkP*^A`4EBzkstye*02KrkP+5E`4FN8kstye z*02KrkP+HI`4GYekstye*02KrkP+TM`4Hj;kstye*02KrkP+fQ`49pJkstye*06sA z0FV*pK=}|N2azBGAJ(t~0FV*tK=}|t2azBGAJ(t~0FV*xK=}}22azBGAJ(t~0FV*# zK=}}Y2azBGAJ(t~0FV*(K=}}&2azBGAJ(t~0FV*-K=}~D2azBGAJ(t~0FV*>K=}~j z2azBGAJ(t~0FV&^LHQ5@2$3KHAJ%`c0|1Z_20{4{A_$Qn0w30}0|1Z_3PJf0LI{x{ z0w30}0|1Z_4ng@4VhE8S0w30}0|1Z_5<&S8f(Vfy0w30}0|1Z_7D4$Cq6m>70w30} z0|1Z_8bSFG!U&Nd0w30}0|1Z_9zppK;s}u-0w30}0|1Z_B0>2O0tt~I0v~_Yumb>) z5hg+T5F!bYAOauOumb>)5h_9X5JCx&AOauOumb>)5iUXb5Ml|DAOauOumb>)5i&vf z5P}JjAOauOumb>)5jH{j5TXf@AOauOumb>)5jsKn5W)$OAOauOumb>)5k5ir5aJ1u zAOauOumb>)5kf)v5CRI3AOe3M*02KrkP${f`4A!skstye*02KrkP%8j`4B=1kstye z*02KrkP%Kn`4D0Xkstye*02KrkP%Wr`4EB%kstye*02KrkP%iv`4FNCkstye*02Kr zkP%uz`4GYikstye*02KrkP%)%`4Hj?kstye*02KrkP%`*`49pNksyBpAJ(t~0FV)8 zLHQ6O3y~lKAJ(t~0FV)CLHQ6u3y~lKAJ(t~0FV)GLHQ733y~lKAJ(t~0FV)KLHQ7Z z3y~lKAJ(t~0FV)OLHQ7(3y~lKAJ(t~0FV)SLHQ8E3y~lKAJ(t~0FV)WLHQ8k3y~lK zAJ(t~0FV)aLHQ5@43U2z0w30}0|1Z_hC%rdA`For0w30}0|1Z_ib44hLJW~00w30} z0|1Z_jzRelVhoWW0w30}0|1Z_l0o?pf((%$0w30}0|1Z_mO=Rtq70EB0w30}0|1Z_ znnC#x!VHlh0w30}0|1Z_o0w30}0|1Z_qCxo(0u6tWAOauOumb>)5vD=; z5F!ncAOauOumb>)5voD?5JC-+AOauOumb>)5w1b`5Mm9HAOauOumb>)5wbz~5P}Vn zAOauOumb>)5w=135TXr{AOauOumb>)5xPP75W)?SAOauOumb>)5xznB5aJDyAOauO zumb>)5yCLHQ8k50M}OAJ(t~ z0FV&^Liv9X0uYfP0w30}0|1Z_215A|A`p=v0w30}0|1Z_3PSl1LJ*N40w30}0|1Z_ z4np}5Vi1ua0w30}0|1Z_5<>Y9f)J4)0w30}0|1Z_7DD+Dq7acF0w30}0|1Z_8bbLH z!Vr-l0w30}0|1Z_9zyvL;t-J_0w30}0|1Z_B0_)p5CRdAAOauOumb>)5hg)5h_CY5JC}=AOauOumb>)5iUac5MmLLAOauOumb>)5i&yg5P}hrAOauO zumb>)5jH~k5TX&0AOauOumb>)5jsNo5W*3WAOauOumb>)5k5ls5aJP$AOauOumb>) z5kh}L`49pUkstye*02KrkP${g`4A!!kstye*02KrkP%8k`4B=9kstye*02KrkP%Ko z`4D0fkstye*02KrkP%Ws`4EB)5vD@<5F!)5voG@5JDA^AOauOumb>)5w1e{5MmXPAOauOumb>)5wb%05P}tvAOauOumb>) z5w=445TX^4AOauOumb>)5xPS85W*FaAOauOumb>)5xzqC5aJb)AOauOumgVpkP*T{ z`49pYkstye*02KrkP*g0`4A!&kstye*02KrkP*s4`4B=Dkstye*02KrkP*&8`4D0j zkstye*02KrkP*^C`4EB@kstye*02KrkP+5G`4FNOkstye*02KrkP+HK`4GYukstye z*02KrkP+TO`4Hk3kstye*06sA0FV*lLirE^7m*+WAJ(t~0FV*pLirFP7m*+WAJ(t~ z0FV*tLirFv7m*+WAJ(t~0FV*xLirG47m*+WAJ(t~0FV*#LirGa7m*+WAJ(t~0FV*( zLirG)7m*+WAJ(t~0FV*-LirHF7m*+WAJ(t~0FV*>LirHl7m*+WAJ%`c0|1Z_0z>%_ z0vM4X0w30}0|1Z_21EG}A{db%0w30}0|1Z_3Pbr2LKu-C0w30}0|1Z_4nz46Vi=Ji z0w30}0|1Z_5<~eAf*6q?0w30}0|1Z_7DM?Eq8O1N0w30p0|4+58bkRI!WfYt0w30p z0|4+59z*#M;uw)20v~_YkOKhl5h6qR5CR#IAOauOkOKhl5hg?V5F#0oAOauOkOKhl z5h_FZ5JDM|AOauOkOKhl5iUdd5MmjTAOauOkOKhl5i&#h5P}(zAOauOkOKhl5jI2l z5TY58AOauOkOKhl5jsQp5W*ReAOauOkOKhl5k5ot5aJn;AOe3M){p}L@DV~o`49pc zkstye){p}L@DWBs`4A!+kstye){p}L@DWNw`4B=Hkstye){p}L@DWZ!`4D0nkstye z){p}L@DWl&`4EB{kstye){p}L@DWx+`4FNSkstye){p}L@DW-=`4GYykstye){p}L z@DW}^`4Hk7ksyBpAJ&iq0PqoFL-`N_8<8LaAJ&iq0PqoJL-`OQ8<8LaAJ&iq0PqoN zL-`Ow8<8LaAJ&iq0PqoRL-`P58<8LaAJ&iq0PqoVL-`Pb8<8LaAJ&iq0PqoZL-`P* z8<8LaAJ&iq0PqodL-`QG8<8LaAJ&iq0PqohL-`Qm8z*0w30p0|4+5ibMGjLL8AG0w30p0|4+5jzjqnVjPhm0w30p z0|4+5l0*3rf*g?`0w30p0|4+5mP7dvq8yPR0w30p0|4+5nnU>z!W@wx0w30p0|4+5 zoMAOauOkOKhl5vD`=5F#CsAOauOkOKhl5voJ^ z5JDZ1AOauOkOKhl5w1h|5MmvXAOauOkOKhl5wb)15P}_%AOauOkOKhl5w=755TYHC zAOauOkOKhl5xPV95W*diAOauOkOKhl5xztD5aNFwkstye){p}L@Daj8`49pgkstye z){p}L@DavC`4A!=kstye){p}L@Da*G`4B=Lkstye){p}L@Da{K`4D0rkstye){p}L z@Db8O`4EC0kstye){p}L@DbKS`4FNWkstye){p}L@DbWW`4GY$kstye){p}L@Dbia z`4E5N9+4mdAJ&iq0PqpwL-`N_ACVveAJ&iq0Pqp!L-`OQACVveAJ&iq0Pqp&L-`Ow zACVveAJ&iq0Pqp+L-`P5ACVveAJ&iq0Pqp=L-`PbACVveAJ&iq0Pqp^L-`P*ACVve zAJ&iq0Pqp|L-`QGACVveAJ&iq0Pqq1L-~IY;vbP90w30p0|4+50z~-`0w9qf0w30p z0|4+521NM~A|R0<0w30p0|4+53Pkx3LLiYK0w30p0|4+54n+A7Vjz(q0w30p0|4+5 z5=8kBf*_F~0w30p0|4+57DV|Fq9BnV0w30p0|4+58btXJ!XS|#0w30p0|4+59z=in z5aJ+_AOauOkOKhl5h6tS5CS2QAOauOkOKhl5hg_W5F#OwAOauOkOKhl5h_Ia5JDl5 zAOauOkOKhl5iUge5Mm*bAOauOkOKhl5i&&i5P~6*AOauOkOKhl5jI5m5TYTGAOauO zkOKhl5jsTq5W*pmAOauOkOKhl5k7xJ`4HkEkstye){p}L@DV~p`49pkkstye){p}L z@DWBt`4A!^kstye){p}L@DWNx`4B=Pkstye){p}L@DWZ#`4D0vkstye){p}L@DWl( z`4EC4kstye){p}L@DWx-`4FNakstye){p}L@DW->`4GY)kstye){p}L@DYDrMEMZn zB9R~hAJ&iq0PqoFMEMW`Bat8iAJ&iq0PqoJMEMXRBat8iAJ&iq0PqoNMEMXxBat8i zAJ&iq0PqoRMEMY6Bat8iAJ&iq0PqoVMEMYcBat8iAJ&iq0PqoZMEMY+Bat8iAJ&iq z0PqodMEMZHBat8iAJ&iq0Pueienj~Y;v5F#a!AOauOkOKhl5voM_5JDx9AOauO zkOKhl5w1k}5Mm{fAOauOkOKhl5wb-25P~IuAOe3M){p}L@DV;m`4HkMkstye){p}L@DV~q`49pskstye){p}L@DWBu`4A#1 zkstye){p}L@DWNy`4B=Xkstye){p}L@DWZ$`4D0%kstye){p}L@DWl)`4ECCkstye z){p}L@DWx;`4FNikstye){p}L@DW-?`4GY?ksyBpAJ&iq0PqoBMfnioDv=-pAJ&iq z0PqoFMfnf{E0G`qAJ&iq0PqoJMfngSE0G`qAJ&iq0PqoNMfngyE0G`qAJ&iq0PqoR zMfnh7E0G`qAJ&iq0PqoVMfnhdE0G`qAJ&iq0PqoZMfnh-E0G`qAJ&iq0PqodMfniI zE0KR70w30p0|4+5ent5Z;wzCL0w30p0|4+5f<^fd0xXdr0w30p0|4+5hDG@hA}o<0 z0w30p0|4+5ibeSlLM)LW0w30p0|4+5jz#$pVl0s$0w30p0|4+5l12Ftf-I3B0w30p z0|4+5mPPpxqAZah0w30p0|4+5nnn2#!YqH0AOauOkOKhl5uQc)5aKM6AOauOkOKhl z5u!!;5CSccAOauOkOKhl5vE1?5F#y+AOauOkOKhl5voP`5JD}HAOauOkOKhl5w1n~ z5MnKnAOauOkOKhl5wb=35P~g{AOauOkOKhl5w=D75TY%SAOauOkOKhl5xPbB5W;^g zkstye){p}L@DaX6`4HkQkstye){p}L@DajA`49pwkstye){p}L@DavE`4A#5kstye z){p}L@Da*I`4B=bkstye){p}L@Da{M`4D0*kstye){p}L@Db8Q`4ECGkstye){p}L z@DbKU`4FNmkstye){p}L@DbWY`4E4?E|DMtAJ&iq0PqpsMfnioE|DMtAJ&iq0Pqpw zMfnf{FOeVuAJ&iq0Pqp!MfngSFOeVuAJ&iq0Pqp&MfngyFOeVuAJ&iq0Pqp+Mfnh7 zFOeVuAJ&iq0Pqp=MfnhdFOeVuAJ&iq0Pqp^Mfnh-FOeVuAJ&iq0Pqp|Mfraa!Y`2^ z0w30p0|4+5{zdr^;xCaP0w30p0|4+50!H}|0x*#v0w30p0|4+521fZ1A~2C40w30p z0|4+53P$-5LNJja0w30p0|4+54o3M9Vla^)0w30p0|4+55=QwDf-sRF0w30p0|4+5 z7Do9HqA-yl0w30p0|4+58b*Kl5W+B#AOauOkOKhl5gtbQ5aKYAAOauOkOKhl5h6zU z5CSogAOauOkOKhl5hh0Y5F#;=AOauOkOKhl5h_Oc5JEALAOauOkOKhl5iUmg5MnWr zAOauOkOKhl5i&;k5P~t0AOauOkOKhl5jIBo5TY@WAOauOkOKhl5juZH`4GY}kstye z){p}L@DV;n`4HkUkstye){p}L@DV~r`49p!kstye){p}L@DWBv`4A#9kstye){p}L z@DWNz`4B=fkstye){p}L@DWZ%`4D0H%5W+N(AOauOkOKhl5uQf*5aKkEAOauOkOKhl5u!%<5CS!k zAOauOkOKhl5vE4@5F#~^AOauOkOKhl5voS{5JEMPAOauOkOKhl5w1r05MnivAOauO zkOKhl5wb@45P~(4AOauOkOKhl5w=G85TZ4aAOauOkOO}J@DaL3`4GZ2kstye){p}L z@DaX7`4HkYkstye){p}L@DajB`49p&kstye){p}L@DavF`4A#Dkstye){p}L@Da*J z`4B=jkstye){p}L@Da{N`4D0@kstye){p}L@Db8R`4ECOkstye){p}L@DbKV`4FNu zkstye){uV#0PqpoM)?rJHjy9#AJ&iq0PqpsM)?rpHjy9#AJ&iq0PqpwM)?o|H<2I$ zAJ&iq0Pqp!M)?pTH<2I$AJ&iq0Pqp&M)?pzH<2I$AJ&iq0Pqp+M)?q8H<2I$AJ&iq z0Pqp=M)?qeH<2I$AJ&iq0Pqp^M)?q;H<2I$AJ%`60|4+5`bPN>!Z(p10w30p0|4+5 z{zmx_;x~~X0w30p0|4+50!R4}0yvQ%0w30p0|4+521of2A~=yC0w30p0|4+53P<@6 zLO78i0w30p0|4+54oCSAVmOf?0w30p0|4+55=Z$Ef;f>N0w30p0|4+57DxFIqBxNt z0v~_YkOKhF5gJGN5W+Z-AOauOkOKhF5gteR5aKwIAOauOkOKhF5h6$V5CS=oAOauO zkOKhF5hh3Z5F$B|AOauOkOKhF5h_Rd5JEYTAOauOkOKhF5iUph5MnuzAOauOkOKhF z5i&>l5P~_8AOauOkOKhF5jIEp5TZGeAOe3M*5VJpse%Il&|?Dt@InAU`D1_tkw5?+ z)*m1P)gK@O)qn#4&;am9`9J^ww2}V*7r+Bw)*m1P)qn#4@BrWek%0pM@FCy!1JtRDlBk&>`$c`9S~xv>*T<*1!W_)*m1P)qn#4@BrWek%0pM@FCyZwzyn{_-v9p>Apn3Nt{R04zpbr4~^_Q`+1QQg1002~=4?y{e=zs zfB*ngpbtR#dg=d{0ssIM004ke{d>Zwm(Z{T-~pqTv9Sb80g#umu>>EN9I^x;e-I9W z(*I*H%l&%*sr>^0k@Hglsq+H}s(=Fk&;WEt`2hex`Jp!m`2hfc(xKx3kpKVykO0sP zk9ao`7^8UP#s5+48naUcW$R6ziMQlJHZQc2?fmjE095+48naR37VR6ziM zQlJHZQUmAzmmdTGao`u98UP#se-a-60C4~U0929r|Ci$f0II+N0FVNp2$7%x0FVK| z7m=U=0FVIS7?I@z0;=Q#1FFLXfKuWE0jl8x0IHw`fKq4a|Citc6RK(M|Cf*e02H(z zK>47<0Fm+g|Cgb|0Fj`h0FmMY0jl8x6sqL$|CgY{0FmbK|CgYn0g(X!e}dBD0|BZ5 z0Dw|G=>M0XqX3ah{{NRB{6F#Fm!BE{8~_p@0040y1OQY)0Dw}U1%Of$;{TTb8~_p@ z0040S0{~P(0Dw}U1%Oh`=Kq%;1ORd17oQpc8~_p@0040S0svH7`Tv*W0|2VP0RWH! zpa_wm0RWHzz!#CA0RWHyf8ZIBPx;0K=?02}}k z9{>PxpacL^K>&bKpapPxfCB(jK>&bKpap&#fs^tR$s^kL{ssjgrQsM&vs^J3ws-OjcQUK}ym*4{vssir+mlyy56rrO5 zk)Wdhkz)4$m!P8ne~|(JfRf<_fKrzA|Ca#-0Fj`h0Fk1@0Fmee0;=Kz0jdE2fYK-X z|Cgb|0Fj`h0FnIZ|CgY{0Ff5>|CgYn0FmJX0IJ{v6RNuC|CgYn0Fe;*|CbK{02K2B z0IHzF0Fi9)|Citc6RKkS|CjRv0IHzF0Fejy|Ca&)02K2BSpce_qX3Zu`2UyS0~4xQ z`~R2V0~4x&0|3w=bVvE3H%R#*06_Uc00GKB008ko0030@Px;0K=?02}}k z9{>PxzySbMfCB*V06-p*JmLSBfCB*V0N^5#b?N_?fCB*V0ssJ!fCB*V0YDy+fCB)~ z0MtkMM)CibfCB*V06-p*{d>WvmWc)!27m(q&;WFo>aqkTPM`q*kN`jyk)Z~>f zs^J3ws-P1<`7i7Lm!Jayk>LXXs-PD@`3wF3m!LlYxu639k+Apwm!LmDxgP)kaR3AW zRGj$#m-7Pvsy6DEUb6%if9UQ1m!Jayk=peCm*WEfs=xsNkOBY+k)QzpkO9CKk)Qzp zkN|)ik>>*gss;dn66FH|s^kL`s>2F^QsM&vs^J3ws-OjcQsUqLm!Jayk&yTQmjD2O z5(NMN6rdMC`5^#+QUL$}IRF5FQh@^i&>?h3`JyL4`5^#6`9S~ym-Dj(6(WE4|Citc z6ROqv|CjRv0IHw^0g-z5|Citc6RObq|Ce{;mwJ;08%-Ag`G5le&;WEt`2hex`Jo>G z`2hfc(xAftk>~>es-^z_m!QJ{k>LXXs-PD@`CR<}m-7PvDxkvvkzx1$m*4{vDuDw4 z&>?h3`Jx|}E=~m-8p8mQRrmjw-~$t?tNQfh%ew>`e