@@ -3,7 +3,7 @@ set(SRC_LIST | |||
"data_op_parser.cc" | |||
"op_parser_factory.cc" | |||
"pre_checker.cc" | |||
"register_tbe.cc" | |||
"op_registration_tbe.cc" | |||
"parser_api.cc" | |||
"parser_inner_ctx.cc" | |||
"proto_file_parser.cc" | |||
@@ -37,7 +37,7 @@ | |||
#include "graph/opsproto_manager.h" | |||
#include "graph/utils/type_utils.h" | |||
#include "omg/parser/parser_inner_ctx.h" | |||
#include "parser/common/register_tbe.h" | |||
#include "parser/common/op_registration_tbe.h" | |||
#include "tbe_plugin_loader.h" | |||
#include "mmpa/mmpa_api.h" | |||
@@ -1,5 +1,5 @@ | |||
/** | |||
* Copyright 2020 Huawei Technologies Co., Ltd | |||
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. | |||
* | |||
* Licensed under the Apache License, Version 2.0 (the "License"); | |||
* you may not use this file except in compliance with the License. | |||
@@ -22,21 +22,17 @@ | |||
#include "common/pass.h" | |||
namespace ge { | |||
/// | |||
/// @ingroup domi_omg | |||
/// @brief graph pass | |||
/// @author | |||
/// | |||
class GraphPass : public Pass<ge::ComputeGraph> { | |||
public: | |||
/// | |||
/// run graph pass | |||
/// @param [in] graph graph to be optimized | |||
/// @return SUCCESS optimize successfully | |||
/// @return NOT_CHANGED not optimized | |||
/// @return others optimized failed | |||
/// @author | |||
/// | |||
virtual Status Run(ge::ComputeGraphPtr graph) = 0; | |||
virtual Status ClearStatus() { return SUCCESS; }; | |||
}; | |||
@@ -74,10 +74,9 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelSaver::SaveJsonToFi | |||
GELOGE(FAILED, "[Open][File] [%s] failed. %s", file_path, err_msg); | |||
return FAILED; | |||
} | |||
const char *model_char = model_str.c_str(); | |||
uint32_t len = static_cast<uint32_t>(model_str.length()); | |||
// Write data to file | |||
mmSsize_t mmpa_ret = mmWrite(fd, const_cast<void *>(static_cast<const void *>(model_char)), len); | |||
mmSsize_t mmpa_ret = mmWrite(fd, const_cast<void *>(static_cast<const void *>(model_str.c_str())), len); | |||
if (mmpa_ret == EN_ERROR || mmpa_ret == EN_INVALID_PARAM) { | |||
char_t err_buf[kMaxErrStrLen + 1U] = {}; | |||
const auto err_msg = mmGetErrorFormatMessage(mmGetErrorCode(), &err_buf[0], kMaxErrStrLen); | |||
@@ -1,5 +1,5 @@ | |||
/** | |||
* Copyright 2020 Huawei Technologies Co., Ltd | |||
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. | |||
* | |||
* Licensed under the Apache License, Version 2.0 (the "License"); | |||
* you may not use this file except in compliance with the License. | |||
@@ -41,11 +41,9 @@ public: | |||
static Status SaveJsonToFile(const char *file_path, const Json &model); | |||
private: | |||
/// | |||
/// @ingroup domi_common | |||
/// @brief Check validity of the file path | |||
/// @return Status result | |||
/// | |||
static Status CheckPath(const string &file_path); | |||
static int CreateDirectory(const std::string &directory_path); | |||
@@ -53,4 +51,4 @@ private: | |||
} // namespace parser | |||
} // namespace ge | |||
#endif //PARSER_COMMON_FILE_SAVER_H_ | |||
#endif // PARSER_COMMON_FILE_SAVER_H_ |
@@ -15,7 +15,7 @@ COMMON_LOCAL_SRC_FILES := \ | |||
data_op_parser.cc \ | |||
op_parser_factory.cc \ | |||
pre_checker.cc \ | |||
register_tbe.cc \ | |||
op_registration_tbe.cc \ | |||
parser_api.cc \ | |||
parser_inner_ctx.cc \ | |||
proto_file_parser.cc \ | |||
@@ -1,5 +1,5 @@ | |||
/** | |||
* Copyright 2020 Huawei Technologies Co., Ltd | |||
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. | |||
* | |||
* Licensed under the Apache License, Version 2.0 (the "License"); | |||
* you may not use this file except in compliance with the License. | |||
@@ -1,5 +1,5 @@ | |||
/** | |||
* Copyright 2020 Huawei Technologies Co., Ltd | |||
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. | |||
* | |||
* Licensed under the Apache License, Version 2.0 (the "License"); | |||
* you may not use this file except in compliance with the License. | |||
@@ -77,7 +77,6 @@ class OpParserFactory { | |||
* @brief OpParser creation function | |||
* @return Created OpParser | |||
*/ | |||
// typedef shared_ptr<OpParser> (*CREATOR_FUN)(void); | |||
using CREATOR_FUN = std::function<std::shared_ptr<OpParser>(void)>; | |||
/** | |||
@@ -14,7 +14,7 @@ | |||
* limitations under the License. | |||
*/ | |||
#include "parser/common/register_tbe.h" | |||
#include "parser/common/op_registration_tbe.h" | |||
#include <map> | |||
#include <memory> | |||
#include <string> |
@@ -1,5 +1,5 @@ | |||
/** | |||
* Copyright 2020 Huawei Technologies Co., Ltd | |||
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. | |||
* | |||
* Licensed under the Apache License, Version 2.0 (the "License"); | |||
* you may not use this file except in compliance with the License. | |||
@@ -19,7 +19,7 @@ | |||
#include "common/util.h" | |||
#include "tbe_plugin_loader.h" | |||
#include "framework/common/debug/ge_log.h" | |||
#include "parser/common/register_tbe.h" | |||
#include "parser/common/op_registration_tbe.h" | |||
#include "framework/omg/parser/parser_inner_ctx.h" | |||
#include "external/ge/ge_api_types.h" | |||
@@ -1,5 +1,5 @@ | |||
/** | |||
* Copyright 2020 Huawei Technologies Co., Ltd | |||
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. | |||
* | |||
* Licensed under the Apache License, Version 2.0 (the "License"); | |||
* you may not use this file except in compliance with the License. | |||
@@ -16,7 +16,7 @@ | |||
#include "omg/parser/parser_factory.h" | |||
#include "framework/common/debug/ge_log.h" | |||
#include "common/register_tbe.h" | |||
#include "common/op_registration_tbe.h" | |||
namespace domi { | |||
FMK_FUNC_HOST_VISIBILITY WeightsParserFactory *WeightsParserFactory::Instance() { | |||
@@ -100,7 +100,8 @@ static float Fp16ToFloat(const uint16_t &fp_val) { | |||
e_ret = 0; | |||
m_ret = 0; | |||
} else { | |||
e_ret = (static_cast<uint32_t>(hf_exp) - static_cast<uint32_t>(kFp16ExpBias)) + static_cast<uint32_t>(kFp32ExpBias); | |||
e_ret = static_cast<uint32_t>((static_cast<uint32_t>(hf_exp) - static_cast<uint32_t>(kFp16ExpBias)) + | |||
static_cast<uint32_t>(kFp32ExpBias)); | |||
m_ret = hf_man & kFp16ManMask; | |||
m_ret = m_ret << (kFp32ManLen - kFp16ManLen); | |||
} | |||
@@ -132,7 +133,8 @@ static double Fp16ToDouble(const uint16_t &fp_val) { | |||
e_ret = 0; | |||
m_ret = 0; | |||
} else { | |||
e_ret = (static_cast<uint64_t>(hf_exp) - static_cast<uint64_t>(kFp16ExpBias)) + static_cast<uint64_t>(kFp64ExpBias); | |||
e_ret = static_cast<uint64_t>((static_cast<uint64_t>(hf_exp) - static_cast<uint64_t>(kFp16ExpBias)) + | |||
static_cast<uint64_t>(kFp64ExpBias)); | |||
m_ret = hf_man & kFp16ManMask; | |||
m_ret = m_ret << (kFp64ManLen - kFp16ManLen); | |||
} | |||
@@ -150,7 +152,7 @@ static double Fp16ToDouble(const uint16_t &fp_val) { | |||
/// @return Return uint8 value of fp16_t object | |||
static uint8_t GetUint8ValByMan(uint8_t s_ret, const uint64_t &long_int_m, const uint16_t &shift_out) { | |||
bool need_round = IsRoundOne(long_int_m, shift_out + kFp16ManLen); | |||
auto m_ret = static_cast<uint8_t>((long_int_m >> (kFp16ManLen + shift_out)) & kBitLen8Max); | |||
auto m_ret = static_cast<uint8_t>((long_int_m >> static_cast<uint16_t>(kFp16ManLen + shift_out)) & kBitLen8Max); | |||
need_round = need_round && ((s_ret == 0 && m_ret < kInt8Max) || (s_ret == 1 && m_ret <= kInt8Max)); | |||
if (need_round) { | |||
m_ret++; | |||
@@ -172,14 +174,14 @@ static int8_t Fp16ToInt8(const uint16_t &fp_val) { | |||
int8_t ret; | |||
uint8_t ret_v; | |||
// 1.get s_ret and shift it to bit0. | |||
uint8_t s_ret = Fp16ExtracSign(fp_val); | |||
uint8_t s_ret = static_cast<uint8_t>(Fp16ExtracSign(fp_val)); | |||
// 2.get hf_e and hf_m | |||
uint16_t hf_e = Fp16ExtracExp(fp_val); | |||
uint16_t hf_m = Fp16ExtracMan(fp_val); | |||
if (Fp16IsDenorm(fp_val)) { // Denormalized number | |||
ret_v = 0; | |||
ret = *(ge::PtrToPtr<uint8_t, uint8_t>(&ret_v)); | |||
ret = static_cast<int8_t>(*(ge::PtrToPtr<uint8_t, uint8_t>(&ret_v))); | |||
return ret; | |||
} | |||
@@ -215,7 +217,7 @@ static int8_t Fp16ToInt8(const uint16_t &fp_val) { | |||
ret_v = GetUint8ValByMan(s_ret, long_int_m, shift_out); | |||
} | |||
ret = *(ge::PtrToPtr<uint8_t, uint8_t>(&ret_v)); | |||
ret = static_cast<int8_t>(*(ge::PtrToPtr<uint8_t, uint8_t>(&ret_v))); | |||
return ret; | |||
} | |||
@@ -226,7 +228,7 @@ static int8_t Fp16ToInt8(const uint16_t &fp_val) { | |||
static uint8_t Fp16ToUInt8(const uint16_t &fp_val) { | |||
uint8_t m_ret = 0; | |||
// 1.get s_ret and shift it to bit0. | |||
uint8_t s_ret = Fp16ExtracSign(fp_val); | |||
uint8_t s_ret = static_cast<uint8_t>(Fp16ExtracSign(fp_val)); | |||
// 2.get hf_e and hf_m | |||
uint16_t hf_e = Fp16ExtracExp(fp_val); | |||
uint16_t hf_m = Fp16ExtracMan(fp_val); | |||
@@ -258,7 +260,7 @@ static uint8_t Fp16ToUInt8(const uint16_t &fp_val) { | |||
} | |||
if (overflow_flag == 0U) { | |||
bool need_round = IsRoundOne(long_int_m, shift_out + kFp16ManLen); | |||
m_ret = static_cast<uint8_t>((long_int_m >> (kFp16ManLen + shift_out)) & kBitLen8Max); | |||
m_ret = static_cast<uint8_t>((long_int_m >> static_cast<uint16_t>(kFp16ManLen + shift_out)) & kBitLen8Max); | |||
if (need_round && m_ret != kBitLen8Max) { | |||
m_ret++; | |||
} | |||
@@ -280,7 +282,7 @@ static uint8_t Fp16ToUInt8(const uint16_t &fp_val) { | |||
/// @return Return uint16 value of fp16_t object | |||
static uint16_t GetUint16ValByMan(uint16_t s_ret, const uint64_t &long_int_m, const uint16_t &shift_out) { | |||
bool need_round = IsRoundOne(long_int_m, shift_out + kFp16ManLen); | |||
auto m_ret = static_cast<uint16_t>((long_int_m >> (kFp16ManLen + shift_out)) & kBitLen16Max); | |||
auto m_ret = static_cast<uint16_t>((long_int_m >> static_cast<uint16_t>(kFp16ManLen + shift_out)) & kBitLen16Max); | |||
if (need_round && m_ret < kInt16Max) { | |||
m_ret++; | |||
} | |||
@@ -343,7 +345,7 @@ static int16_t Fp16ToInt16(const uint16_t &fp_val) { | |||
// Generate final result | |||
ret_v = GetUint16ValByMan(s_ret, long_int_m, shift_out); | |||
} | |||
ret = *(ge::PtrToPtr<uint16_t, uint16_t>(&ret_v)); | |||
ret = static_cast<int16_t>(*(ge::PtrToPtr<uint16_t, uint16_t>(&ret_v))); | |||
return ret; | |||
} | |||
@@ -378,7 +380,7 @@ static uint16_t Fp16ToUInt16(const uint16_t &fp_val) { | |||
} | |||
} | |||
bool need_round = IsRoundOne(long_int_m, shift_out + kFp16ManLen); | |||
m_ret = static_cast<uint16_t>((long_int_m >> (kFp16ManLen + shift_out)) & kBitLen16Max); | |||
m_ret = static_cast<uint16_t>((long_int_m >> static_cast<int16_t>(kFp16ManLen + shift_out)) & kBitLen16Max); | |||
if (need_round && m_ret != kBitLen16Max) { | |||
m_ret++; | |||
} | |||
@@ -419,7 +421,7 @@ static int32_t Fp16ToInt32(const uint16_t &fp_val) { | |||
} | |||
} | |||
bool need_round = IsRoundOne(long_int_m, shift_out + kFp16ManLen); | |||
auto m_ret = static_cast<uint32_t>((long_int_m >> (kFp16ManLen + shift_out)) & kBitLen32Max); | |||
auto m_ret = static_cast<uint32_t>((long_int_m >> static_cast<uint32_t>(kFp16ManLen + shift_out)) & kBitLen32Max); | |||
if (need_round && m_ret < kInt32Max) { | |||
m_ret++; | |||
} | |||
@@ -434,7 +436,7 @@ static int32_t Fp16ToInt32(const uint16_t &fp_val) { | |||
ret_v = (s_ret << static_cast<uint16_t>(kBitShift31)) | (m_ret); | |||
} | |||
return *(ge::PtrToPtr<uint32_t, uint32_t>(&ret_v)); | |||
return static_cast<int32_t>(*(ge::PtrToPtr<uint32_t, uint32_t>(&ret_v))); | |||
} | |||
/// @ingroup fp16_t math conversion static method | |||
@@ -468,7 +470,7 @@ static uint32_t Fp16ToUInt32(const uint16_t &fp_val) { | |||
} | |||
} | |||
bool need_round = IsRoundOne(long_int_m, shift_out + kFp16ManLen); | |||
m_ret = static_cast<uint32_t>(long_int_m >> (kFp16ManLen + shift_out)) & kBitLen32Max; | |||
m_ret = static_cast<uint32_t>(long_int_m >> static_cast<uint16_t>(kFp16ManLen + shift_out)) & kBitLen32Max; | |||
if (need_round && m_ret != kBitLen32Max) { | |||
m_ret++; | |||
} | |||
@@ -483,10 +485,10 @@ static uint32_t Fp16ToUInt32(const uint16_t &fp_val) { | |||
static uint16_t Fp16AddCalVal(uint16_t s_ret, int16_t e_ret, uint16_t m_ret, uint32_t m_trunc, uint16_t shift_out) { | |||
uint16_t m_min = kFp16ManHideBit << shift_out; | |||
uint16_t m_max = m_min << 1; | |||
uint16_t m_max = static_cast<uint16_t>(m_min << 1U); | |||
// Denormal | |||
while (m_ret < m_min && e_ret > 0) { // the value of m_ret should not be smaller than 2^23 | |||
m_ret = m_ret << 1; | |||
m_ret = static_cast<uint16_t>(m_ret << 1U); | |||
m_ret += (kFp32SignMask & m_trunc) >> kFp32SignIndex; | |||
m_trunc = m_trunc << 1; | |||
e_ret = e_ret - 1; | |||
@@ -560,12 +562,12 @@ static uint16_t Fp16Add(uint16_t v_1, uint16_t v_2) { | |||
uint32_t m_trunc = 0; | |||
int16_t e_ret = std::max(e_a, e_b); | |||
int16_t e_tmp = std::abs(e_a - e_b); | |||
int16_t e_tmp = static_cast<int16_t>(std::abs(e_a - e_b)); | |||
if (e_a > e_b) { | |||
m_trunc = (m_b << (static_cast<uint16_t>(kBitShift32) - static_cast<uint16_t>(e_tmp))); | |||
m_trunc = (m_b << static_cast<uint16_t>(static_cast<uint16_t>(kBitShift32) - static_cast<uint16_t>(e_tmp))); | |||
m_b = RightShift(m_b, e_tmp); | |||
} else if (e_a < e_b) { | |||
m_trunc = (m_a << (static_cast<uint16_t>(kBitShift32) - static_cast<uint16_t>(e_tmp))); | |||
m_trunc = (m_a << static_cast<uint16_t>(static_cast<uint16_t>(kBitShift32) - static_cast<uint16_t>(e_tmp))); | |||
m_a = RightShift(m_a, e_tmp); | |||
} | |||
// calculate mantissav | |||
@@ -690,7 +692,7 @@ static uint16_t Fp16Div(uint16_t v_1, uint16_t v_2) { | |||
m_b = m_tmp; | |||
} | |||
m_div = static_cast<float>(m_a * 1.0f / m_b); | |||
fp16_t fp_div = m_div; | |||
fp16_t fp_div = static_cast<fp16_t>(m_div); | |||
ret = fp_div.val; | |||
if (s_a != s_b) { | |||
ret |= kFp16SignMask; | |||
@@ -917,7 +919,7 @@ fp16_t &fp16_t::operator=(const int8_t &i_val) { | |||
e_ret = kFp16ManLen; | |||
while ((m_ret & kFp16ManHideBit) == 0) { | |||
m_ret = m_ret << 1; | |||
m_ret = static_cast<uint16_t>(m_ret << 1U); | |||
e_ret = e_ret - 1U; | |||
} | |||
e_ret = e_ret + kFp16ExpBias; | |||
@@ -935,7 +937,7 @@ fp16_t &fp16_t::operator=(const uint8_t &ui_val) { | |||
if (static_cast<bool>(m_ret)) { | |||
e_ret = kFp16ManLen; | |||
while ((m_ret & kFp16ManHideBit) == 0) { | |||
m_ret = m_ret << 1; | |||
m_ret = static_cast<uint16_t>(m_ret << 1U); | |||
e_ret = e_ret - 1U; | |||
} | |||
e_ret = e_ret + kFp16ExpBias; | |||
@@ -948,18 +950,18 @@ fp16_t &fp16_t::operator=(const uint8_t &ui_val) { | |||
static void SetValByUint16Val(const uint16_t &input_val, const uint16_t &sign, uint16_t &ret_val) { | |||
uint32_t m_tmp = (input_val & kFp32AbsMax); | |||
uint16_t m_min = kFp16ManHideBit; | |||
uint16_t m_max = m_min << 1; | |||
uint16_t m_max = static_cast<uint16_t>(m_min << 1U); | |||
uint16_t len = static_cast<uint16_t>(GetManBitLength(m_tmp)); | |||
if (static_cast<bool>(m_tmp)) { | |||
int16_t e_ret; | |||
if (len > static_cast<uint16_t>(kDim11)) { | |||
e_ret = kFp16ExpBias + kFp16ManLen; | |||
uint16_t e_tmp = len - static_cast<uint16_t>(kDim11); | |||
uint16_t e_tmp = static_cast<uint16_t>(len - static_cast<uint16_t>(kDim11)); | |||
uint32_t trunc_mask = 1; | |||
for (int i = 1; i < e_tmp; i++) { | |||
trunc_mask = (trunc_mask << 1) + 1; | |||
} | |||
uint32_t m_trunc = (m_tmp & trunc_mask) << (static_cast<uint16_t>(kBitShift32) - e_tmp); | |||
uint32_t m_trunc = (m_tmp & trunc_mask) << static_cast<uint16_t>(static_cast<uint16_t>(kBitShift32) - e_tmp); | |||
for (int i = 0; i < e_tmp; i++) { | |||
m_tmp = (m_tmp >> 1); | |||
e_ret = e_ret + 1; | |||
@@ -978,8 +980,8 @@ static void SetValByUint16Val(const uint16_t &input_val, const uint16_t &sign, u | |||
} | |||
} else { | |||
e_ret = static_cast<int16_t>(kFp16ExpBias); | |||
m_tmp = m_tmp << (kManBitLength - len); | |||
e_ret = e_ret + (len - 1); | |||
m_tmp = m_tmp << static_cast<uint16_t>(kManBitLength - len); | |||
e_ret = e_ret + static_cast<int16_t>(len - 1); | |||
} | |||
auto m_ret = static_cast<uint16_t>(m_tmp); | |||
ret_val = Fp16Constructor(sign, static_cast<uint16_t>(e_ret), m_ret); | |||
@@ -990,7 +992,7 @@ fp16_t &fp16_t::operator=(const int16_t &i_val) { | |||
if (i_val == 0) { | |||
val = 0; | |||
} else { | |||
uint16_t ui_val = *(ge::PtrToPtr<const int16_t, const int16_t>(&i_val)); | |||
uint16_t ui_val = static_cast<uint16_t>(*(ge::PtrToPtr<const int16_t, const int16_t>(&i_val))); | |||
auto s_ret = static_cast<uint16_t>(ui_val >> static_cast<uint16_t>(kBitShift15)); | |||
if (static_cast<bool>(s_ret)) { | |||
int16_t iValM = -i_val; | |||
@@ -1008,17 +1010,17 @@ fp16_t &fp16_t::operator=(const uint16_t &ui_val) { | |||
int16_t e_ret; | |||
uint16_t m_ret = ui_val; | |||
uint16_t m_min = kFp16ManHideBit; | |||
uint16_t m_max = m_min << 1; | |||
uint16_t m_max = static_cast<uint16_t>(m_min << 1U); | |||
uint16_t len = static_cast<uint16_t>(GetManBitLength(m_ret)); | |||
if (len > kManBitLength) { | |||
e_ret = kFp16ExpBias + kFp16ManLen; | |||
uint32_t m_trunc; | |||
uint32_t trunc_mask = 1; | |||
uint16_t e_tmp = len - kManBitLength; | |||
uint16_t e_tmp = static_cast<uint16_t>(len - kManBitLength); | |||
for (int i = 1; i < e_tmp; i++) { | |||
trunc_mask = (trunc_mask << 1) + 1; | |||
} | |||
m_trunc = (m_ret & trunc_mask) << (static_cast<uint16_t>(kBitShift32) - e_tmp); | |||
m_trunc = (m_ret & trunc_mask) << static_cast<int16_t>(static_cast<uint16_t>(kBitShift32) - e_tmp); | |||
for (int i = 0; i < e_tmp; i++) { | |||
m_ret = (m_ret >> 1); | |||
e_ret = e_ret + 1; | |||
@@ -1040,8 +1042,8 @@ fp16_t &fp16_t::operator=(const uint16_t &ui_val) { | |||
} | |||
} else { | |||
e_ret = static_cast<int16_t>(kFp16ExpBias); | |||
m_ret = m_ret << (static_cast<uint16_t>(kDim11) - len); | |||
e_ret = e_ret + (len - 1); | |||
m_ret = m_ret << static_cast<uint16_t>(static_cast<uint16_t>(kDim11) - len); | |||
e_ret = static_cast<int16_t>(e_ret + (len - 1)); | |||
} | |||
val = Fp16Constructor(0u, static_cast<uint16_t>(e_ret), m_ret); | |||
} | |||
@@ -1054,15 +1056,15 @@ static void SetValByUint32Val(const uint32_t &input_val, const uint16_t &sign, u | |||
uint32_t m_min = kFp16ManHideBit; | |||
uint32_t m_max = m_min << 1; | |||
uint16_t len = static_cast<uint16_t>(GetManBitLength(m_tmp)); | |||
if (len > kDim11) { | |||
if (len > static_cast<uint16_t>(kDim11)) { | |||
e_ret = kFp16ExpBias + kFp16ManLen; | |||
uint32_t m_trunc = 0; | |||
uint32_t trunc_mask = 1; | |||
uint16_t e_tmp = len - static_cast<uint16_t>(kDim11); | |||
uint16_t e_tmp = static_cast<uint16_t>(len - static_cast<uint16_t>(kDim11)); | |||
for (int i = 1; i < e_tmp; i++) { | |||
trunc_mask = (trunc_mask << 1) + 1; | |||
} | |||
m_trunc = (m_tmp & trunc_mask) << (static_cast<uint16_t>(kBitShift32) - e_tmp); | |||
m_trunc = (m_tmp & trunc_mask) << static_cast<uint16_t>(static_cast<uint16_t>(kBitShift32) - e_tmp); | |||
for (int i = 0; i < e_tmp; i++) { | |||
m_tmp = (m_tmp >> 1); | |||
e_ret = e_ret + 1; | |||
@@ -1085,8 +1087,8 @@ static void SetValByUint32Val(const uint32_t &input_val, const uint16_t &sign, u | |||
} | |||
} else { | |||
e_ret = static_cast<int16_t>(kFp16ExpBias); | |||
m_tmp = m_tmp << (static_cast<uint16_t>(kDim11) - len); | |||
e_ret = e_ret + (len - 1); | |||
m_tmp = m_tmp << static_cast<uint16_t>(static_cast<uint16_t>(kDim11) - len); | |||
e_ret = e_ret + static_cast<int16_t>(len - 1); | |||
} | |||
auto m_ret = static_cast<uint16_t>(m_tmp); | |||
ret_val = Fp16Constructor(sign, static_cast<uint16_t>(e_ret), m_ret); | |||
@@ -1116,11 +1118,11 @@ fp16_t &fp16_t::operator=(const uint32_t &ui_val) { | |||
uint32_t m_min = kFp16ManHideBit; | |||
uint32_t m_max = m_min << 1; | |||
uint16_t len = static_cast<uint16_t>(GetManBitLength(m_tmp)); | |||
if (len > kDim11) { | |||
if (len > static_cast<uint16_t>(kDim11)) { | |||
e_ret = kFp16ExpBias + kFp16ManLen; | |||
uint32_t m_trunc = 0; | |||
uint32_t trunc_mask = 1; | |||
uint16_t e_tmp = len - static_cast<uint16_t>(kDim11); | |||
uint16_t e_tmp = static_cast<uint16_t>(len - static_cast<uint16_t>(kDim11)); | |||
for (int i = 1; i < e_tmp; i++) { | |||
trunc_mask = (trunc_mask << 1) + 1; | |||
} | |||
@@ -1147,8 +1149,8 @@ fp16_t &fp16_t::operator=(const uint32_t &ui_val) { | |||
} | |||
} else { | |||
e_ret = static_cast<int16_t>(kFp16ExpBias); | |||
m_tmp = m_tmp << (static_cast<uint16_t>(kDim11) - len); | |||
e_ret = e_ret + (len - 1); | |||
m_tmp = m_tmp << static_cast<uint16_t>(static_cast<uint16_t>(kDim11) - len); | |||
e_ret = static_cast<int16_t>(e_ret + (len - 1)); | |||
} | |||
auto m_ret = static_cast<uint16_t>(m_tmp); | |||
val = Fp16Constructor(0u, static_cast<uint16_t>(e_ret), m_ret); | |||
@@ -1200,7 +1202,7 @@ fp16_t &fp16_t::operator=(const double &d_val) { | |||
} else { // Regular case with no overflow or underflow | |||
e_ret = static_cast<int16_t>(e_d - 0x3F0u); | |||
need_round = IsRoundOne(m_d, m_len_delta); | |||
need_round = IsRoundOne(m_d, static_cast<uint16_t>(m_len_delta)); | |||
m_ret = static_cast<uint16_t>(m_d >> m_len_delta); | |||
if (need_round) { | |||
m_ret++; | |||
@@ -169,7 +169,7 @@ inline bool Fp16IsInf(const uint16_t x) { | |||
/// @ingroup fp16 special value judgment | |||
/// @brief whether a fp16 is NaN | |||
inline bool Fp16IsNan(const uint16_t x) { | |||
return ((((x) & kFp16ExpMask) == kFp16ExpMask) && ((x) & kFp16ManMask)); | |||
return ((((x) & kFp16ExpMask) == kFp16ExpMask) && (((x) & kFp16ManMask) != 0U)); | |||
} | |||
/// @ingroup fp16 special value judgment | |||
/// @brief whether a fp16 is invalid | |||
@@ -211,38 +211,38 @@ constexpr uint32_t kFp32MaxExp = 0xFFU; | |||
constexpr uint32_t kFp32MaxMan = 0x7FFFFFU; | |||
/// @ingroup fp32 special value judgment | |||
/// @brief whether a fp32 is NaN | |||
inline bool Fp32IsNan(const uint16_t x) { | |||
return ((((x) & kFp32ExpMask) == kFp32ExpMask) && ((x) & kFp32ManMask)); | |||
inline bool Fp32IsNan(const uint32_t x) { | |||
return ((((x) & kFp32ExpMask) == kFp32ExpMask) && (((x) & kFp32ManMask) != 0U)); | |||
} | |||
/// @ingroup fp32 special value judgment | |||
/// @brief whether a fp32 is infinite | |||
inline bool Fp32IsInf(const uint16_t x) { | |||
return ((((x) & kFp32ExpMask) == kFp32ExpMask) && (!((x) & kFp32ManMask))); | |||
inline bool Fp32IsInf(const uint32_t x) { | |||
return ((((x) & kFp32ExpMask) == kFp32ExpMask) && (((x) & kFp32ManMask) == 0U)); | |||
} | |||
/// @ingroup fp32 special value judgment | |||
/// @brief whether a fp32 is a denormalized value | |||
inline bool Fp32IsDenorm(const uint16_t x) { | |||
return ((((x)&kFp32ExpMask) == 0)); | |||
inline bool Fp32IsDenorm(const uint32_t x) { | |||
return ((((x)&kFp32ExpMask) == 0)); | |||
} | |||
/// @ingroup fp32 basic operator | |||
/// @brief get sign of fp32 | |||
inline bool Fp32ExtracSign(const uint16_t x) { | |||
return (((x) >> kFp32SignIndex) & 1); | |||
inline uint32_t Fp32ExtracSign(const uint16_t x) { | |||
return (((x) >> kFp32SignIndex) & 1U); | |||
} | |||
/// @ingroup fp32 basic operator | |||
/// @brief get exponent of fp16 | |||
inline bool Fp32ExtracExp(const uint16_t x) { | |||
return (((x)&kFp32ExpMask) >> kFp32ManLen); | |||
inline uint32_t Fp32ExtracExp(const uint32_t x) { | |||
return (((x)&kFp32ExpMask) >> kFp32ManLen); | |||
} | |||
/// @ingroup fp32 basic operator | |||
/// @brief get mantissa of fp16 | |||
inline uint16_t Fp32ExtracMan(const uint16_t x) { | |||
return (((x)&kFp32ManMask) | (((((x) >> kFp32ManLen) & kFp32MaxExp) > 0 ? 1 : 0) * kFp32ManHideBit)); | |||
inline uint32_t Fp32ExtracMan(const uint32_t x) { | |||
return (((x)&kFp32ManMask) | (((((x) >> kFp32ManLen) & kFp32MaxExp) > 0 ? 1 : 0) * kFp32ManHideBit)); | |||
} | |||
/// @ingroup fp32 basic operator | |||
/// @brief constructor of fp32 from sign exponent and mantissa | |||
inline uint16_t Fp32Constructor(const uint16_t s, const uint16_t e, const uint16_t m) { | |||
return (((s) << kFp32SignIndex) | ((e) << kFp32ManLen) | ((m) & kFp32MaxMan)); | |||
inline uint32_t Fp32Constructor(const uint32_t s, const uint32_t e, const uint32_t m) { | |||
return (((s) << kFp32SignIndex) | ((e) << kFp32ManLen) | ((m) & kFp32MaxMan)); | |||
} | |||
/// @ingroup fp64 basic parameter | |||
/// @brief fp64 exponent bias | |||
@@ -279,13 +279,13 @@ constexpr uint64_t kFp64MaxExp = 0x07FF; | |||
constexpr uint64_t kFp64MaxMan = 0xFFFFFFFFFFFLLu; | |||
/// @ingroup fp64 special value judgment | |||
/// @brief whether a fp64 is NaN | |||
inline bool Fp64IsNan(const uint16_t x) { | |||
return ((((x) & kFp64ExpMask) == kFp64ExpMask) && ((x) & kFp64ManMask)); | |||
inline bool Fp64IsNan(const uint64_t x) { | |||
return ((((x) & kFp64ExpMask) == kFp64ExpMask) && (((x) & kFp64ManMask) != 0ULL)); | |||
} | |||
/// @ingroup fp64 special value judgment | |||
/// @brief whether a fp64 is infinite | |||
inline bool Fp64IsInf(const uint16_t x) { | |||
return ((((x) & kFp64ExpMask) == kFp64ExpMask) && (!((x) & kFp64ManMask))); | |||
inline bool Fp64IsInf(const uint64_t x) { | |||
return ((((x) & kFp64ExpMask) == kFp64ExpMask) && (((x) & kFp64ManMask) == 0ULL)); | |||
} | |||
/// @ingroup integer special value judgment | |||
/// @brief maximum positive value of int8_t (0111 1111) | |||
@@ -1,5 +1,5 @@ | |||
/** | |||
* Copyright 2020 Huawei Technologies Co., Ltd | |||
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. | |||
* | |||
* Licensed under the Apache License, Version 2.0 (the "License"); | |||
* you may not use this file except in compliance with the License. | |||
@@ -1,5 +1,5 @@ | |||
/** | |||
* Copyright (c) Huawei Technologies Co., Ltd. 2020~2022. All rights reserved. | |||
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. | |||
* | |||
* Licensed under the Apache License, Version 2.0 (the "License"); | |||
* you may not use this file except in compliance with the License. | |||
@@ -1,5 +1,5 @@ | |||
/** | |||
* Copyright 2020 Huawei Technologies Co., Ltd | |||
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. | |||
* | |||
* Licensed under the Apache License, Version 2.0 (the "License"); | |||
* you may not use this file except in compliance with the License. | |||
@@ -1,5 +1,5 @@ | |||
/** | |||
* Copyright (c) Huawei Technologies Co., Ltd. 2020~2022. All rights reserved. | |||
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. | |||
* | |||
* Licensed under the Apache License, Version 2.0 (the "License"); | |||
* you may not use this file except in compliance with the License. | |||
@@ -1,5 +1,5 @@ | |||
/** | |||
* Copyright (c) Huawei Technologies Co., Ltd. 2020~2022. All rights reserved. | |||
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. | |||
* | |||
* Licensed under the Apache License, Version 2.0 (the "License"); | |||
* you may not use this file except in compliance with the License. | |||
@@ -1,5 +1,5 @@ | |||
/** | |||
* Copyright (c) Huawei Technologies Co., Ltd. 2020~2022. All rights reserved. | |||
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. | |||
* | |||
* Licensed under the Apache License, Version 2.0 (the "License"); | |||
* you may not use this file except in compliance with the License. | |||
@@ -1,5 +1,5 @@ | |||
/** | |||
* Copyright (c) Huawei Technologies Co., Ltd. 2020~2022. All rights reserved. | |||
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. | |||
* | |||
* Licensed under the Apache License, Version 2.0 (the "License"); | |||
* you may not use this file except in compliance with the License. | |||
@@ -1,5 +1,5 @@ | |||
/** | |||
* Copyright (c) Huawei Technologies Co., Ltd. 2020~2022. All rights reserved. | |||
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. | |||
* | |||
* Licensed under the Apache License, Version 2.0 (the "License"); | |||
* you may not use this file except in compliance with the License. | |||
@@ -42,7 +42,7 @@ const char *const kFileConstant = "FileConstant"; | |||
namespace ge { | |||
Status OnnxFileConstantParser::ParseParams(const Message *op_src, ge::Operator &op_def) { | |||
GE_CHECK_NOTNULL(op_src); | |||
const ge::onnx::NodeProto *node = reinterpret_cast<const ge::onnx::NodeProto *>(op_src); | |||
const ge::onnx::NodeProto *node = PtrToPtr<const Message, const ge::onnx::NodeProto>(op_src); | |||
GELOGD("Onnx op node name = %s, op type= %s, parse params", node->name().c_str(), node->op_type().c_str()); | |||
ge::onnx::TensorProto tensor_proto; | |||
@@ -1,5 +1,5 @@ | |||
/** | |||
* Copyright (c) Huawei Technologies Co., Ltd. 2020~2022. All rights reserved. | |||
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. | |||
* | |||
* Licensed under the Apache License, Version 2.0 (the "License"); | |||
* you may not use this file except in compliance with the License. | |||
@@ -1,5 +1,5 @@ | |||
/** | |||
* Copyright (c) Huawei Technologies Co., Ltd. 2020~2022. All rights reserved. | |||
* Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. | |||
* | |||
* Licensed under the Apache License, Version 2.0 (the "License"); | |||
* you may not use this file except in compliance with the License. | |||
@@ -274,7 +274,7 @@ set(PARSER_SRC_FILES | |||
"${PARSER_DIR}/parser/common/pre_checker.cc" | |||
"${PARSER_DIR}/parser/common/proto_file_parser.cc" | |||
"${PARSER_DIR}/parser/common/prototype_pass_manager.cc" | |||
"${PARSER_DIR}/parser/common/register_tbe.cc" | |||
"${PARSER_DIR}/parser/common/op_registration_tbe.cc" | |||
"${PARSER_DIR}/parser/common/tbe_plugin_loader.cc" | |||
"${PARSER_DIR}/parser/common/thread_pool.cc" | |||
"${PARSER_DIR}/parser/common/auto_mapping_subgraph_io_index_func.cc" | |||
@@ -21,7 +21,7 @@ | |||
#include "parser/common/op_parser_factory.h" | |||
#include "graph/operator_reg.h" | |||
#include "register/op_registry.h" | |||
#include "parser/common/register_tbe.h" | |||
#include "parser/common/op_registration_tbe.h" | |||
#include "framework/omg/parser/model_parser.h" | |||
#include "framework/omg/parser/parser_factory.h" | |||
#include "external/parser/caffe_parser.h" | |||
@@ -19,7 +19,7 @@ | |||
#include "parser/common/op_parser_factory.h" | |||
#include "graph/operator_reg.h" | |||
#include "register/op_registry.h" | |||
#include "parser/common/register_tbe.h" | |||
#include "parser/common/op_registration_tbe.h" | |||
#include "external/parser/onnx_parser.h" | |||
#include "st/parser_st_utils.h" | |||
#include "external/ge/ge_api_types.h" | |||
@@ -23,7 +23,6 @@ | |||
#include "graph/operator_reg.h" | |||
#include "register/op_registry.h" | |||
#include "external/register/register.h" | |||
#include "parser/common/register_tbe.h" | |||
#include "st/parser_st_utils.h" | |||
#include "tests/depends/ops_stub/ops_stub.h" | |||
#include "parser/common/acl_graph_parser_util.h" | |||
@@ -68,7 +67,7 @@ | |||
#include "parser/common/parser_fp16_t.h" | |||
#include "parser/common/op_parser_factory.h" | |||
#include "parser/common/prototype_pass_manager.h" | |||
#include "parser/common/register_tbe.h" | |||
#include "parser/common/op_registration_tbe.h" | |||
#include "parser/common/pass_manager.h" | |||
#include "parser/tensorflow/parser_graph_optimizer.h" | |||
#include "metadef/inc/register/scope/scope_pass_registry_impl.h" | |||
@@ -275,7 +275,7 @@ set(PARSER_SRC_FILES | |||
"${PARSER_DIR}/parser/common/pre_checker.cc" | |||
"${PARSER_DIR}/parser/common/proto_file_parser.cc" | |||
"${PARSER_DIR}/parser/common/prototype_pass_manager.cc" | |||
"${PARSER_DIR}/parser/common/register_tbe.cc" | |||
"${PARSER_DIR}/parser/common/op_registration_tbe.cc" | |||
"${PARSER_DIR}/parser/common/tbe_plugin_loader.cc" | |||
"${PARSER_DIR}/parser/common/thread_pool.cc" | |||
"${PARSER_DIR}/parser/common/auto_mapping_subgraph_io_index_func.cc" | |||
@@ -23,7 +23,7 @@ | |||
#include "graph/operator_reg.h" | |||
#include "external/graph/types.h" | |||
#include "register/op_registry.h" | |||
#include "parser/common/register_tbe.h" | |||
#include "parser/common/op_registration_tbe.h" | |||
#include "framework/omg/parser/model_parser.h" | |||
#include "framework/omg/parser/parser_factory.h" | |||
#include "external/parser/caffe_parser.h" | |||
@@ -23,7 +23,7 @@ | |||
#include "graph/operator_reg.h" | |||
#include "external/graph/types.h" | |||
#include "register/op_registry.h" | |||
#include "parser/common/register_tbe.h" | |||
#include "parser/common/op_registration_tbe.h" | |||
#include "external/parser/onnx_parser.h" | |||
#include "ut/parser/parser_ut_utils.h" | |||
#include "external/ge/ge_api_types.h" | |||
@@ -20,7 +20,7 @@ | |||
#include "graph/operator_reg.h" | |||
#include "external/graph/types.h" | |||
#include "register/op_registry.h" | |||
#include "parser/common/register_tbe.h" | |||
#include "parser/common/op_registration_tbe.h" | |||
#include "external/parser/onnx_parser.h" | |||
#include "ut/parser/parser_ut_utils.h" | |||
#include "external/ge/ge_api_types.h" | |||
@@ -22,7 +22,7 @@ | |||
#include "graph/operator_reg.h" | |||
#include "external/graph/types.h" | |||
#include "register/op_registry.h" | |||
#include "parser/common/register_tbe.h" | |||
#include "parser/common/op_registration_tbe.h" | |||
namespace ge { | |||
@@ -25,7 +25,6 @@ | |||
#include "external/graph/types.h" | |||
#include "register/op_registry.h" | |||
#include "external/register/register.h" | |||
#include "parser/common/register_tbe.h" | |||
#include "tests/depends/ops_stub/ops_stub.h" | |||
#include "parser/common/acl_graph_parser_util.h" | |||
#include "metadef/third_party/graphengine/inc/external/ge/ge_api_types.h" | |||
@@ -71,7 +70,7 @@ | |||
#include "parser/common/parser_fp16_t.h" | |||
#include "parser/common/op_parser_factory.h" | |||
#include "parser/common/prototype_pass_manager.h" | |||
#include "parser/common/register_tbe.h" | |||
#include "parser/common/op_registration_tbe.h" | |||
#include "parser/common/pass_manager.h" | |||
#include "parser/tensorflow/parser_graph_optimizer.h" | |||
#include "metadef/inc/register/scope/scope_pass_registry_impl.h" | |||