Browse Source

sync-from-trunk-to-blue-zone-0928

pull/187/head
wuweikang taoxiangdong 4 years ago
parent
commit
a2ec076850
7 changed files with 282 additions and 282 deletions
  1. +0
    -1
      ge/ge_runner.mk
  2. +1
    -0
      ge/graph/preprocess/insert_op/ge_aipp_op.cc
  3. +10
    -12
      ge/graph/preprocess/multi_batch_copy_graph.cc
  4. +2
    -0
      ge/graph/preprocess/multi_batch_copy_graph.h
  5. +232
    -232
      ge/session/inner_session.cc
  6. +29
    -29
      inc/framework/common/ge_types.h
  7. +8
    -8
      third_party/fwkacllib/inc/ops/nn_calculation_ops.h

+ 0
- 1
ge/ge_runner.mk View File

@@ -402,7 +402,6 @@ LOCAL_C_INCLUDES := $(RUNNER_LOCAL_C_INCLUDES)

LOCAL_SRC_FILES := ../../out/ge/lib64/stub/ge_api.cc \
../../out/ge/lib64/stub/ge_prof.cc \

LOCAL_SHARED_LIBRARIES :=

LOCAL_LDFLAGS := -lrt -ldl


+ 1
- 0
ge/graph/preprocess/insert_op/ge_aipp_op.cc View File

@@ -825,6 +825,7 @@ Status AippOp::AddAttrToAippData(const OpDescPtr &aipp_data_op_desc) {
}

Status AippOp::AddNodeToGraph(const NodePtr &aipp_node, int64_t max_dynamic_aipp_size) {
static int index = 0;
std::vector<int64_t> input_shape_dim(1, max_dynamic_aipp_size);
GeShape input_shape(input_shape_dim);
// construct input tensor


+ 10
- 12
ge/graph/preprocess/multi_batch_copy_graph.cc View File

@@ -40,6 +40,7 @@
#include "inc/pass_manager.h"
#include "graph/common/local_context.h"

using std::map;
using std::set;
using std::string;
using std::vector;
@@ -263,27 +264,24 @@ Status MultiBatchGraphCopyer::Init() {
}

Status MultiBatchGraphCopyer::LabelStatus() {
for (const auto &data : origin_data_nodes_) {
auto data_shape = NodeUtils::GetOutputDesc(*data, kDataOutIndex).GetShape();
if (!IsAllDimsPositive(data_shape.GetDims())) {
origin_nodes_status_[data.get()] = kNodeInBatchBranch;
}
}
map<string, vector<NodePtr>> frame_enters;
InitStatus(frame_enters);

bool changed = true;
// If anyone of in node is kNodeInBatchBranch, it is also kNodeInBatchBranch
while (changed) {
changed = false;
for (const auto &node : origin_all_nodes_) {
auto iter = origin_nodes_status_.find(node.get());
if (iter != origin_nodes_status_.end()) {
continue;
}
for (auto &in_node : node->GetInAllNodes()) {
bool is_in_batch = origin_nodes_status_.find(in_node.get()) != origin_nodes_status_.end() &&
origin_nodes_status_[in_node.get()] == kNodeInBatchBranch;
if (is_in_batch) {
origin_nodes_status_[node.get()] = kNodeInBatchBranch;
changed = true;
if (origin_nodes_status_.find(node.get()) == origin_nodes_status_.end() ||
origin_nodes_status_[node.get()] != kNodeInBatchBranch) {
origin_nodes_status_[node.get()] = kNodeInBatchBranch;
ResetEnterStatus(frame_enters, node);
changed = true;
}
break;
}
}


+ 2
- 0
ge/graph/preprocess/multi_batch_copy_graph.h View File

@@ -69,6 +69,8 @@ class MultiBatchGraphCopyer {

// label status for origin_all_nodes_
Status LabelStatus();
void InitStatus(std::map<string, vector<NodePtr>> &frame_enters);
void ResetEnterStatus(std::map<string, vector<NodePtr>> &frame_enters, const NodePtr &node);
// add nodes functions
Status CreateNewNodes();



+ 232
- 232
ge/session/inner_session.cc View File

@@ -1,18 +1,18 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "session/inner_session.h"

@@ -39,300 +39,300 @@ namespace {
const int32_t kDumpStatus = 0;

Status CheckReuseMemoryOption(const std::map<string, string> &options) {
auto iter = options.find(OPTION_EXEC_DISABLE_REUSED_MEMORY);
if (iter != options.end()) {
if (iter->second == "0") {
GELOGD("%s=0, reuse memory is open", OPTION_EXEC_DISABLE_REUSED_MEMORY);
} else if (iter->second == "1") {
GELOGD("%s=1, reuse memory is close", OPTION_EXEC_DISABLE_REUSED_MEMORY);
} else {
GELOGE(PARAM_INVALID, "option %s=%s is invalid", OPTION_EXEC_DISABLE_REUSED_MEMORY, iter->second.c_str());
return FAILED;
}
}
return SUCCESS;
auto iter = options.find(OPTION_EXEC_DISABLE_REUSED_MEMORY);
if (iter != options.end()) {
if (iter->second == "0") {
GELOGD("%s=0, reuse memory is open", OPTION_EXEC_DISABLE_REUSED_MEMORY);
} else if (iter->second == "1") {
GELOGD("%s=1, reuse memory is close", OPTION_EXEC_DISABLE_REUSED_MEMORY);
} else {
GELOGE(PARAM_INVALID, "option %s=%s is invalid", OPTION_EXEC_DISABLE_REUSED_MEMORY, iter->second.c_str());
return FAILED;
}
}
return SUCCESS;
}
}

static std::mutex mutex_; // BuildGraph and RunGraph use
bool InnerSession::is_dump_server_inited_ = false;
InnerSession::InnerSession(uint64_t session_id, const std::map<string, string> &options)
: init_flag_(false), session_id_(session_id), options_(options) {}
: init_flag_(false), session_id_(session_id), options_(options) {}

Status InnerSession::Initialize() {
if (init_flag_) {
GELOGW("[InnerSession:%lu] session already initialize.", session_id_);
return SUCCESS;
}
if (init_flag_) {
GELOGW("[InnerSession:%lu] session already initialize.", session_id_);
return SUCCESS;
}

// If the global options and the session options are duplicated, the session options is preferred.
auto all_options = options_;
all_options.insert(GetMutableGlobalOptions().begin(), GetMutableGlobalOptions().end());
// If the global options and the session options are duplicated, the session options is preferred.
auto all_options = options_;
all_options.insert(GetMutableGlobalOptions().begin(), GetMutableGlobalOptions().end());

Status ret = CheckReuseMemoryOption(all_options);
if (ret != SUCCESS) {
GELOGE(ret, "[InnerSession:%lu] check reuse memory option failed.", session_id_);
return ret;
}
Status ret = CheckReuseMemoryOption(all_options);
if (ret != SUCCESS) {
GELOGE(ret, "[InnerSession:%lu] check reuse memory option failed.", session_id_);
return ret;
}

UpdateThreadContext(std::map<std::string, std::string>{});
UpdateThreadContext(std::map<std::string, std::string>{});

GE_CHK_RT_RET(rtSetDevice(GetContext().DeviceId()));
GE_CHK_RT_RET(rtSetDevice(GetContext().DeviceId()));

DumpProperties dump_properties;
dump_properties.InitByOptions();
GE_CHK_STATUS_RET(AddDumpProperties(dump_properties), "Add dump properties failed");
DumpProperties dump_properties;
dump_properties.InitByOptions();
GE_CHK_STATUS_RET(AddDumpProperties(dump_properties), "Add dump properties failed");

ret = graph_manager_.Initialize(options_);
if (ret != SUCCESS) {
GELOGE(ret, "[InnerSession:%lu] initialize failed.", session_id_);
GE_CHK_STATUS(RemoveDumpProperties(), "Remove dump properties failed");
return ret;
}
ret = graph_manager_.Initialize(options_);
if (ret != SUCCESS) {
GELOGE(ret, "[InnerSession:%lu] initialize failed.", session_id_);
GE_CHK_STATUS(RemoveDumpProperties(), "Remove dump properties failed");
return ret;
}

ret = VarManager::Instance(session_id_)->SetMemoryMallocSize(all_options);
if (ret != SUCCESS) {
GELOGE(ret, "failed to set malloc size");
(void)graph_manager_.Finalize();
GE_CHK_STATUS(RemoveDumpProperties(), "Remove dump properties failed");
GE_CHK_RT(rtDeviceReset(static_cast<int32_t>(GetContext().DeviceId())));
return ret;
}
ret = VarManager::Instance(session_id_)->SetMemoryMallocSize(all_options);
if (ret != SUCCESS) {
GELOGE(ret, "failed to set malloc size");
(void)graph_manager_.Finalize();
GE_CHK_STATUS(RemoveDumpProperties(), "Remove dump properties failed");
GE_CHK_RT(rtDeviceReset(static_cast<int32_t>(GetContext().DeviceId())));
return ret;
}

int32_t version = static_cast<int32_t>(SessionVersion::ClOUD_VERSION);
const int DEFAULT_DEVICE_ID = 0;
const int DEFAULT_JOB_ID = 0;
ret = VarManager::Instance(session_id_)->Init(version, session_id_, DEFAULT_DEVICE_ID, DEFAULT_JOB_ID);
if (ret != SUCCESS) {
GELOGE(ret, "failed to init session instance");
GE_CHK_STATUS(RemoveDumpProperties(), "Remove dump properties failed");
}
init_flag_ = true;
return SUCCESS;
int32_t version = static_cast<int32_t>(SessionVersion::ClOUD_VERSION);
const int DEFAULT_DEVICE_ID = 0;
const int DEFAULT_JOB_ID = 0;
ret = VarManager::Instance(session_id_)->Init(version, session_id_, DEFAULT_DEVICE_ID, DEFAULT_JOB_ID);
if (ret != SUCCESS) {
GELOGE(ret, "failed to init session instance");
GE_CHK_STATUS(RemoveDumpProperties(), "Remove dump properties failed");
}
init_flag_ = true;
return SUCCESS;
}

Status InnerSession::Finalize() {
std::lock_guard<std::mutex> lock(resource_mutex_);
if (!init_flag_) {
GELOGW("[InnerSession:%lu] session does not initialize.", session_id_);
return SUCCESS;
}
UpdateThreadContext(std::map<std::string, std::string>{});
Status ret = graph_manager_.Finalize();
if (ret != SUCCESS) {
// Subsequent code execution is required, so no return is required
GELOGE(ret, "[InnerSession:%lu] finalize failed.", session_id_);
}
std::lock_guard<std::mutex> lock(resource_mutex_);
if (!init_flag_) {
GELOGW("[InnerSession:%lu] session does not initialize.", session_id_);
return SUCCESS;
}
UpdateThreadContext(std::map<std::string, std::string>{});
Status ret = graph_manager_.Finalize();
if (ret != SUCCESS) {
// Subsequent code execution is required, so no return is required
GELOGE(ret, "[InnerSession:%lu] finalize failed.", session_id_);
}

ModelManager::GetInstance()->DestroyAicpuSession(session_id_);
init_flag_ = false;
// release var memory
GELOGI("VarManager free var memory.");
(void)VarManager::Instance(session_id_)->FreeVarMemory();
// release analyzer saved info(Session Level)
Analyzer::GetInstance()->DestroySessionJsonObject(session_id_);
ModelManager::GetInstance()->DestroyAicpuSession(session_id_);
init_flag_ = false;
// release var memory
GELOGI("VarManager free var memory.");
(void)VarManager::Instance(session_id_)->FreeVarMemory();
// release analyzer saved info(Session Level)
Analyzer::GetInstance()->DestroySessionJsonObject(session_id_);

GE_CHK_RT(rtDeviceReset(static_cast<int32_t>(GetContext().DeviceId())));
GE_CHK_STATUS_RET(RemoveDumpProperties(), "Remove dump properties failed");
GE_CHK_RT(rtDeviceReset(static_cast<int32_t>(GetContext().DeviceId())));
GE_CHK_STATUS_RET(RemoveDumpProperties(), "Remove dump properties failed");

return ret;
return ret;
}

Status InnerSession::GetVariable(const std::string &name, Tensor &val) {
UpdateThreadContext(std::map<std::string, std::string>{});
return graph_manager_.GetVariable(name, val);
UpdateThreadContext(std::map<std::string, std::string>{});
return graph_manager_.GetVariable(name, val);
}

Status InnerSession::AddGraph(uint32_t graph_id, const Graph &graph) {
std::map<std::string, std::string> options;
return AddGraph(graph_id, graph, options);
std::map<std::string, std::string> options;
return AddGraph(graph_id, graph, options);
}

Status InnerSession::AddGraph(uint32_t graph_id, const Graph &graph,
const std::map<std::string, std::string> &options) {
std::lock_guard<std::mutex> lock(resource_mutex_);
if (!init_flag_) {
GELOGE(GE_SESS_INIT_FAILED, "[InnerSession:%lu] initialize failed.", session_id_);
return GE_SESS_INIT_FAILED;
}
UpdateThreadContext(options);
Status ret = graph_manager_.AddGraph(graph_id, graph, options, domi::GetContext());
if (ret != SUCCESS) {
GELOGE(ret, "[InnerSession:%lu] add graph %u failed.", session_id_, graph_id);
return ret;
}
const std::map<std::string, std::string> &options) {
std::lock_guard<std::mutex> lock(resource_mutex_);
if (!init_flag_) {
GELOGE(GE_SESS_INIT_FAILED, "[InnerSession:%lu] initialize failed.", session_id_);
return GE_SESS_INIT_FAILED;
}
UpdateThreadContext(options);
Status ret = graph_manager_.AddGraph(graph_id, graph, options, domi::GetContext());
if (ret != SUCCESS) {
GELOGE(ret, "[InnerSession:%lu] add graph %u failed.", session_id_, graph_id);
return ret;
}

GELOGI("[InnerSession:%lu] add graph success, graph_id=%u.", session_id_, graph_id);
return SUCCESS;
GELOGI("[InnerSession:%lu] add graph success, graph_id=%u.", session_id_, graph_id);
return SUCCESS;
}

Status InnerSession::RunGraph(uint32_t graph_id, const std::vector<Tensor> &inputs, std::vector<Tensor> &outputs) {
GELOGI("[InnerSession:%lu] run graph on session, graph_id=%u.", session_id_, graph_id);
if (mutex_.try_lock()) {
std::lock_guard<std::mutex> lock(mutex_, std::adopt_lock);
if (!init_flag_) {
GELOGE(GE_SESS_INIT_FAILED, "[InnerSession:%lu] initialize failed.", session_id_);
return GE_SESS_INIT_FAILED;
}
UpdateThreadContext(graph_id);
vector<GeTensor> geInputs;
for (auto &item : inputs) {
geInputs.push_back(TensorAdapter::AsGeTensor(item));
}
vector<GeTensor> geOutputs;
Status ret = graph_manager_.RunGraph(graph_id, geInputs, geOutputs, session_id_);
domi::GetContext().out_nodes_map.clear();
domi::GetContext().user_out_nodes.clear();
if (ret != SUCCESS) {
GELOGE(ret, "[InnerSession:%lu] run graph failed, graph_id=%u.", session_id_, graph_id);
return ret;
}
outputs.clear();
for (auto &item : geOutputs) {
outputs.push_back(TensorAdapter::AsTensor(item));
}
GELOGI("[InnerSession:%lu] run graph on session, graph_id=%u.", session_id_, graph_id);
if (mutex_.try_lock()) {
std::lock_guard<std::mutex> lock(mutex_, std::adopt_lock);
if (!init_flag_) {
GELOGE(GE_SESS_INIT_FAILED, "[InnerSession:%lu] initialize failed.", session_id_);
return GE_SESS_INIT_FAILED;
}
UpdateThreadContext(graph_id);
vector<GeTensor> geInputs;
for (auto &item : inputs) {
geInputs.push_back(TensorAdapter::AsGeTensor(item));
}
vector<GeTensor> geOutputs;
Status ret = graph_manager_.RunGraph(graph_id, geInputs, geOutputs, session_id_);
domi::GetContext().out_nodes_map.clear();
domi::GetContext().user_out_nodes.clear();
if (ret != SUCCESS) {
GELOGE(ret, "[InnerSession:%lu] run graph failed, graph_id=%u.", session_id_, graph_id);
return ret;
}
outputs.clear();
for (auto &item : geOutputs) {
outputs.push_back(TensorAdapter::AsTensor(item));
}

GELOGI("[InnerSession:%lu] run graph success, graph_id=%u.", session_id_, graph_id);
return SUCCESS;
} else {
GELOGE(GE_SESS_ALREADY_RUNNING, "[InnerSession:%lu] run graph failed, graph_id=%u.", session_id_, graph_id);
return GE_SESS_ALREADY_RUNNING;
}
GELOGI("[InnerSession:%lu] run graph success, graph_id=%u.", session_id_, graph_id);
return SUCCESS;
} else {
GELOGE(GE_SESS_ALREADY_RUNNING, "[InnerSession:%lu] run graph failed, graph_id=%u.", session_id_, graph_id);
return GE_SESS_ALREADY_RUNNING;
}
}

Status InnerSession::RemoveGraph(uint32_t graph_id) {
std::lock_guard<std::mutex> lock(resource_mutex_);
if (!init_flag_) {
GELOGE(GE_SESS_INIT_FAILED, "[InnerSession:%lu] initialize failed.", session_id_);
return GE_SESS_INIT_FAILED;
}
UpdateThreadContext(graph_id);
Status ret = graph_manager_.RemoveGraph(graph_id);
if (ret != SUCCESS) {
GELOGE(ret, "[InnerSession:%lu] remove graph failed, graph_id=%u.", session_id_, graph_id);
return ret;
}
std::lock_guard<std::mutex> lock(resource_mutex_);
if (!init_flag_) {
GELOGE(GE_SESS_INIT_FAILED, "[InnerSession:%lu] initialize failed.", session_id_);
return GE_SESS_INIT_FAILED;
}
UpdateThreadContext(graph_id);
Status ret = graph_manager_.RemoveGraph(graph_id);
if (ret != SUCCESS) {
GELOGE(ret, "[InnerSession:%lu] remove graph failed, graph_id=%u.", session_id_, graph_id);
return ret;
}

GELOGI("[InnerSession:%lu] remove graph success, graph_id=%u.", session_id_, graph_id);
return SUCCESS;
GELOGI("[InnerSession:%lu] remove graph success, graph_id=%u.", session_id_, graph_id);
return SUCCESS;
}

Status InnerSession::RegisterCallBackFunc(
const std::string &key,
const std::function<Status(uint32_t, const std::map<std::string, ge::Tensor> &)> &callback) {
std::lock_guard<std::mutex> lock(resource_mutex_);
if (!init_flag_) {
GELOGE(GE_SESS_INIT_FAILED, "[InnerSession:%lu] initialize failed.", session_id_);
return GE_SESS_INIT_FAILED;
}
UpdateThreadContext(std::map<std::string, std::string>{});
Status ret = graph_manager_.RegisterCallBackFunc(key, callback);
if (ret != SUCCESS) {
GELOGE(ret, "[InnerSession:%lu] register %s callback function failed.", session_id_, key.c_str());
return ret;
}
const std::string &key,
const std::function<Status(uint32_t, const std::map<std::string, ge::Tensor> &)> &callback) {
std::lock_guard<std::mutex> lock(resource_mutex_);
if (!init_flag_) {
GELOGE(GE_SESS_INIT_FAILED, "[InnerSession:%lu] initialize failed.", session_id_);
return GE_SESS_INIT_FAILED;
}
UpdateThreadContext(std::map<std::string, std::string>{});
Status ret = graph_manager_.RegisterCallBackFunc(key, callback);
if (ret != SUCCESS) {
GELOGE(ret, "[InnerSession:%lu] register %s callback function failed.", session_id_, key.c_str());
return ret;
}

GELOGI("[InnerSession:%lu] register %s callback function success.", session_id_, key.c_str());
return SUCCESS;
GELOGI("[InnerSession:%lu] register %s callback function success.", session_id_, key.c_str());
return SUCCESS;
}

Status InnerSession::BuildGraph(uint32_t graph_id, const std::vector<InputTensorInfo> &inputs) {
UpdateThreadContext(graph_id);
GELOGI("[InnerSession:%lu] build graph on session, graph_id=%u.", session_id_, graph_id);
std::vector<ge::GeTensor> ge_inputs;
for (auto const &input : inputs) {
std::vector<int64_t> input_dims;
std::transform(input.dims.begin(), input.dims.end(), std::back_inserter(input_dims),
[](int64_t x) -> int64_t { return x; });
GeShape input_shape(input_dims);
GeTensorDesc input_tensor_desc;
input_tensor_desc.SetShape(input_shape);
input_tensor_desc.SetDataType(static_cast<ge::DataType>(input.data_type));
ge_inputs.emplace_back(input_tensor_desc);
}
GeRootModelPtr ge_root_model = nullptr;
Status ret = graph_manager_.BuildGraph(graph_id, ge_inputs, ge_root_model, session_id_, true);
if (ret != SUCCESS) {
GELOGE(ret, "[InnerSession:%lu] build graph failed, graph_id=%u.", session_id_, graph_id);
return ret;
}
GELOGI("[InnerSession:%lu] build graph success, graph_id=%u.", session_id_, graph_id);
return ret;
UpdateThreadContext(graph_id);
GELOGI("[InnerSession:%lu] build graph on session, graph_id=%u.", session_id_, graph_id);
std::vector<ge::GeTensor> ge_inputs;
for (auto const &input : inputs) {
std::vector<int64_t> input_dims;
std::transform(input.dims.begin(), input.dims.end(), std::back_inserter(input_dims),
[](int64_t x) -> int64_t { return x; });
GeShape input_shape(input_dims);
GeTensorDesc input_tensor_desc;
input_tensor_desc.SetShape(input_shape);
input_tensor_desc.SetDataType(static_cast<ge::DataType>(input.data_type));
ge_inputs.emplace_back(input_tensor_desc);
}
GeRootModelPtr ge_root_model = nullptr;
Status ret = graph_manager_.BuildGraph(graph_id, ge_inputs, ge_root_model, session_id_, true);
if (ret != SUCCESS) {
GELOGE(ret, "[InnerSession:%lu] build graph failed, graph_id=%u.", session_id_, graph_id);
return ret;
}
GELOGI("[InnerSession:%lu] build graph success, graph_id=%u.", session_id_, graph_id);
return ret;
}

Status InnerSession::RunGraphAsync(uint32_t graph_id, const std::vector<InputTensorInfo> &inputs,
RunAsyncCallback callback) {
UpdateThreadContext(graph_id);
GELOGI("[InnerSession:%lu] run graph on session, graph_id=%u.", session_id_, graph_id);
Status ret = graph_manager_.RunGraphAsync(graph_id, inputs, session_id_, callback);
if (ret != SUCCESS) {
GELOGE(ret, "[InnerSession:%lu] run graph failed, graph_id=%u.", session_id_, graph_id);
return ret;
}
GELOGI("[InnerSession:%lu] run graph success, graph_id=%u.", session_id_, graph_id);
return ret;
RunAsyncCallback callback) {
UpdateThreadContext(graph_id);
GELOGI("[InnerSession:%lu] run graph on session, graph_id=%u.", session_id_, graph_id);
Status ret = graph_manager_.RunGraphAsync(graph_id, inputs, session_id_, callback);
if (ret != SUCCESS) {
GELOGE(ret, "[InnerSession:%lu] run graph failed, graph_id=%u.", session_id_, graph_id);
return ret;
}
GELOGI("[InnerSession:%lu] run graph success, graph_id=%u.", session_id_, graph_id);
return ret;
}

const GraphManager &InnerSession::getGraphManagerObj() const { return graph_manager_; }

void InnerSession::UpdateThreadContext(const std::map<std::string, std::string> &options) {
GetThreadLocalContext().SetGlobalOption(GetMutableGlobalOptions());
GetThreadLocalContext().SetSessionOption(options_);
GetThreadLocalContext().SetGraphOption(options);
GetContext().SetSessionId(session_id_);
SetRtSocVersion();
GetThreadLocalContext().SetGlobalOption(GetMutableGlobalOptions());
GetThreadLocalContext().SetSessionOption(options_);
GetThreadLocalContext().SetGraphOption(options);
GetContext().SetSessionId(session_id_);
SetRtSocVersion();
}

void InnerSession::UpdateThreadContext(uint32_t graph_id) {
auto options = graph_manager_.GetGraphOptions(graph_id);
if (options == nullptr) {
GELOGW("graph level options is null.");
UpdateThreadContext(std::map<std::string, std::string>{});
} else {
UpdateThreadContext(*options);
}
auto options = graph_manager_.GetGraphOptions(graph_id);
if (options == nullptr) {
GELOGW("graph level options is null.");
UpdateThreadContext(std::map<std::string, std::string>{});
} else {
UpdateThreadContext(*options);
}
}

bool InnerSession::IsGraphNeedRebuild(uint32_t graph_id) {
UpdateThreadContext(graph_id);
return graph_manager_.IsGraphNeedRebuild(graph_id);
UpdateThreadContext(graph_id);
return graph_manager_.IsGraphNeedRebuild(graph_id);
}

Status InnerSession::GetAllVariables(std::map<std::string, GeTensorDesc> &all_variables) {
return VarManager::Instance(session_id_)->GetAllVariables(all_variables);
return VarManager::Instance(session_id_)->GetAllVariables(all_variables);
}

Status InnerSession::GenCheckPointGraph(const std::map<std::string, GeTensorDesc> &all_variables, Graph &graph) {
return graph_manager_.GenCheckPointGraph(all_variables, graph);
return graph_manager_.GenCheckPointGraph(all_variables, graph);
}

Status InnerSession::SaveVariables(const Graph &graph, const std::vector<std::string> &var_names,
const std::vector<Tensor> &outputs, std::vector<Tensor> &var_values) {
return graph_manager_.SaveVariables(graph, var_names, outputs, var_values);
const std::vector<Tensor> &outputs, std::vector<Tensor> &var_values) {
return graph_manager_.SaveVariables(graph, var_names, outputs, var_values);
}

Status InnerSession::AddDumpProperties(const DumpProperties &dump_properties) {
if (!is_dump_server_inited_) {
if (dump_properties.IsDumpOpen() || dump_properties.IsOpDebugOpen()) {
GE_IF_BOOL_EXEC(AdxDataDumpServerInit() != kDumpStatus, GELOGE(PARAM_INVALID, "Data dump server init failed");
return PARAM_INVALID)
GELOGI("Init adx data dump server success");
is_dump_server_inited_ = true;
}
}
PropertiesManager::Instance().AddDumpProperties(session_id_, dump_properties);
return SUCCESS;
if (!is_dump_server_inited_) {
if (dump_properties.IsDumpOpen() || dump_properties.IsOpDebugOpen()) {
GE_IF_BOOL_EXEC(AdxDataDumpServerInit() != kDumpStatus, GELOGE(PARAM_INVALID, "Data dump server init failed");
return PARAM_INVALID)
GELOGI("Init adx data dump server success");
is_dump_server_inited_ = true;
}
}
PropertiesManager::Instance().AddDumpProperties(session_id_, dump_properties);
return SUCCESS;
}

Status InnerSession::RemoveDumpProperties() {
PropertiesManager::Instance().RemoveDumpProperties(session_id_);
if (is_dump_server_inited_ && PropertiesManager::Instance().GetDumpPropertiesMap().empty()) {
GE_IF_BOOL_EXEC(AdxDataDumpServerUnInit() != kDumpStatus, GELOGE(PARAM_INVALID, "Data dump server uninit failed");
return PARAM_INVALID)
GELOGI("UnInit adx data dump server success");
is_dump_server_inited_ = false;
}
return SUCCESS;
PropertiesManager::Instance().RemoveDumpProperties(session_id_);
if (is_dump_server_inited_ && PropertiesManager::Instance().GetDumpPropertiesMap().empty()) {
GE_IF_BOOL_EXEC(AdxDataDumpServerUnInit() != kDumpStatus, GELOGE(PARAM_INVALID, "Data dump server uninit failed");
return PARAM_INVALID)
GELOGI("UnInit adx data dump server success");
is_dump_server_inited_ = false;
}
return SUCCESS;
}

void InnerSession::SetRtSocVersion() {


+ 29
- 29
inc/framework/common/ge_types.h View File

@@ -1,18 +1,18 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef INC_FRAMEWORK_COMMON_GE_TYPES_H_
#define INC_FRAMEWORK_COMMON_GE_TYPES_H_
@@ -29,30 +29,30 @@

namespace ge {
enum RuntimeType {
HOST = 0,
DEVICE = 1
HOST = 0,
DEVICE = 1
};

enum PerfLevel {
GEN_TASK_WITH_FUSION = -1,
GEN_TASK_WITHOUT_L2FUSION = 3,
GEN_TASK_WITHOUT_FUSION = 4
GEN_TASK_WITH_FUSION = -1,
GEN_TASK_WITHOUT_L2FUSION = 3,
GEN_TASK_WITHOUT_FUSION = 4
};

enum FrameworkType {
CAFFE = 0,
MINDSPORE = 1,
TENSORFLOW = 3,
ANDROID_NN,
FRAMEWORK_RESERVED,
CAFFE = 0,
MINDSPORE = 1,
TENSORFLOW = 3,
ANDROID_NN,
FRAMEWORK_RESERVED,
};

enum OpEngineType {
ENGINE_SYS = 0, // default engine
ENGINE_AICORE = 1,
ENGINE_VECTOR = 2,
ENGINE_AICUBE = 3, // not support
ENGINE_AIVECTOR = 4 // not support
ENGINE_SYS = 0, // default engine
ENGINE_AICORE = 1,
ENGINE_VECTOR = 2,
ENGINE_AICUBE = 3, // not support
ENGINE_AIVECTOR = 4 // not support
};

enum InputAippType{


+ 8
- 8
third_party/fwkacllib/inc/ops/nn_calculation_ops.h View File

@@ -725,14 +725,14 @@ REG_OP(Conv2D)
| | int8 | int8 | int32 | int8 | int32
-----------|---------|---------|---------|----------|--------
|Format | NCHW | NCHW | ND | ND | NCHW
| | NHWC | NHWC | | | NHWC
| | | HWCN | | |
| | NHWC | HWCN | | | NHWC
@endverbatim
* It should be noted that the data types must correspond to each other, but the
* format does not need to . \n

* Type float32 is allowed only in mixed precision (float32->float16) scenarios.
* Mixed precision is enabled by default.
* \n
*
*@par Attributes:
* @li strides: A list of 4 integers. Specifying the strides of the
*@li strides: Required. A list of 4 integers. Specifying the strides of the
* convolution along the height and width. The dimension order is determined
* by the data format of "x". By default the N and C dimensions are set to 1.
* @li pads: A list of 4 integers. Specifying the top, bottom, left and right
@@ -865,8 +865,8 @@ REG_OP(Conv2DCompress)
* "HxW(filter)" indicates the filter size after dilation.

*@par Quantization supported or not
* Yes
*@li Yes
*
*@par Third-party framework compatibility
*@li Compatible with the TensorFlow operator "conv2d".
*@li Compatible with the Caffe operator 2D "Convolution".


Loading…
Cancel
Save