diff --git a/ge/generator/ge_generator.cc b/ge/generator/ge_generator.cc index 938a8bc6..9a8a628c 100644 --- a/ge/generator/ge_generator.cc +++ b/ge/generator/ge_generator.cc @@ -85,8 +85,9 @@ static Status CheckEngineTypeSupport(const NodePtr &node, OpEngineType engine_ty } else { ErrorManager::GetInstance().ATCReportErrMessage("E14001", {"opname", "optype", "value", "reason"}, {op_desc->GetName(), op_desc->GetType(), "engine type", - "it only support kEngineNameDefault/kAIcoreEngine/kVectorEngine"}); - GELOGE(FAILED, "CheckEngineType: engine type: %d not support.", static_cast(engine_type)); + "it only support default/AIcoreEngine/VectorEngine"}); + GELOGE(FAILED, "[Check][EngineType]value:%d not support, " + "only support default/AIcoreEngine/VectorEngine now", static_cast(engine_type)); return FAILED; } @@ -190,17 +191,20 @@ static Status AddInputs(const ComputeGraphPtr &graph, const NodePtr &node, const (void)AttrUtils::SetBool(data_op, "_is_single_op", true); - GE_CHK_BOOL_EXEC(data_op->AddInputDesc(tensor) == GRAPH_SUCCESS, return FAILED, "Add input desc fail"); - GE_CHK_BOOL_EXEC(data_op->AddOutputDesc(tensor) == GRAPH_SUCCESS, return FAILED, "Add output desc fail"); + GE_CHK_BOOL_EXEC(data_op->AddInputDesc(tensor) == GRAPH_SUCCESS, return FAILED, + "[Add][InputDesc]fail for node:%s", data_op->GetName().c_str()); + GE_CHK_BOOL_EXEC(data_op->AddOutputDesc(tensor) == GRAPH_SUCCESS, return FAILED, + "[Add][OutputDesc]fail for node:%s", data_op->GetName().c_str()); if (attr) { - GE_CHK_BOOL_EXEC(AttrUtils::SetInt(data_op, ATTR_NAME_INDEX, index), return FAILED, "Set index fail"); + GE_CHK_BOOL_EXEC(AttrUtils::SetInt(data_op, ATTR_NAME_INDEX, index), return FAILED, + "[Set][Attr:%s]fail for node:%s", ATTR_NAME_INDEX.c_str(), data_op->GetName().c_str()); } ge::NodePtr arg_node = graph->AddNode(data_op); GE_CHK_BOOL_EXEC(arg_node != nullptr, return FAILED, "Insert Data node fail"); GE_CHK_STATUS(GraphUtils::AddEdge(arg_node->GetOutDataAnchor(0), node->GetInDataAnchor(index)), - "Add edge[%s->%s] fail", data_op->GetName().c_str(), node->GetName().c_str()); + "[Add][Edge]fail from node:%s to node:%s", data_op->GetName().c_str(), node->GetName().c_str()); return SUCCESS; } @@ -215,20 +219,23 @@ static Status AddOutputs(const ComputeGraphPtr &graph, const NodePtr &node, cons for (const auto &out_desc : outputs) { GeTensorDesc tensor = out_desc.GetTensorDesc(); TensorUtils::SetInputTensor(tensor, true); - GE_CHK_BOOL_EXEC(op_desc->AddInputDesc(tensor) == GRAPH_SUCCESS, return FAILED, "Add input desc fail."); + GE_CHK_BOOL_EXEC(op_desc->AddInputDesc(tensor) == GRAPH_SUCCESS, return FAILED, + "[Add][InputDesc]fail for node:%s", op_desc->GetName().c_str()); TensorUtils::SetInputTensor(tensor, false); TensorUtils::SetOutputTensor(tensor, true); - GE_CHK_BOOL_EXEC(op_desc->AddOutputDesc(tensor) == GRAPH_SUCCESS, return FAILED, "Add output desc fail."); + GE_CHK_BOOL_EXEC(op_desc->AddOutputDesc(tensor) == GRAPH_SUCCESS, return FAILED, + "[Add][OutputDesc]fail for node:%s", op_desc->GetName().c_str()); count++; } GE_CHECK_NOTNULL_EXEC(graph, return PARAM_INVALID); ge::NodePtr out_node = graph->AddNode(op_desc); - GE_CHK_BOOL_EXEC(out_node != nullptr, return FAILED, "Insert Output node fail"); + GE_CHK_BOOL_EXEC(out_node != nullptr, return FAILED, + "[Add][Node:%s]fail in graph:%u", op_desc->GetName().c_str(), graph->GetGraphID()); GE_CHECK_NOTNULL_EXEC(node, return PARAM_INVALID); for (int32_t i = 0; i < count; ++i) { GE_CHK_STATUS(GraphUtils::AddEdge(node->GetOutDataAnchor(i), out_node->GetInDataAnchor(i)), - "Add edge[%s->%s] fail", node->GetName().c_str(), out_node->GetName().c_str()); + "[Add][Edge]fail from node:%s to node:%s", node->GetName().c_str(), out_node->GetName().c_str()); } return SUCCESS; @@ -248,7 +255,7 @@ static void GetOpsProtoPath(string &opsproto_path) { return; } string path_base = PluginManager::GetPath(); - GELOGI("path_base is %s.", path_base.c_str()); + GELOGI("path_base is %s", path_base.c_str()); path_base = path_base.substr(0, path_base.rfind('/')); path_base = path_base.substr(0, path_base.rfind('/') + 1); opsproto_path = (path_base + "ops/op_proto/custom/" + ":") + (path_base + "ops/op_proto/built-in/"); @@ -333,7 +340,7 @@ Status GeGenerator::Initialize(const map &options, OmgContext &o ErrorManager::GetInstance().SetStage(ErrorMessage::kInitialize, ErrorMessage::kOpsProtoInit); string opsproto_path; GetOpsProtoPath(opsproto_path); - GELOGI("Get opsproto path is %s.", opsproto_path.c_str()); + GELOGI("Get opsproto path is %s", opsproto_path.c_str()); OpsProtoManager *manager = OpsProtoManager::Instance(); map option_tmp; option_tmp.emplace(std::pair(string("ge.opsProtoLibPath"), opsproto_path)); @@ -712,7 +719,7 @@ Status GeGenerator::BuildSingleOp(OpDescPtr &op_desc, const vector &in auto node = comp_graph->FindNode(op_desc->GetName()); Status ret = CheckEngineTypeSupport(node, engine_type); if (ret != SUCCESS) { - GELOGE(ret, "check engine type failed"); + GELOGE(ret, "[Check][EngineType]value:%d for node:%s not support", engine_type, node->GetName().c_str()); return ret; } } @@ -786,9 +793,9 @@ Status GeGenerator::BuildSingleOpModel(OpDescPtr &op_desc, const vector &outputs, OpEngineType engine_type, ModelBufferData &model_buff) { ErrorManager::GetInstance().SetStage(ErrorMessage::kModelCompile, ErrorMessage::kOther); - GELOGI("Start to build single op online, input size: %zu, output size: %zu.", inputs.size(), outputs.size()); + GELOGI("Start to build single op online, input size: %zu, output size: %zu", inputs.size(), outputs.size()); Status status = BuildSingleOp(op_desc, inputs, outputs, kFileNameSuffix, engine_type, model_buff, false); - GELOGI("Finish build single online model, status: %u.", status); + GELOGI("Finish build single online model, status: %u", status); return status; } diff --git a/ge/graph/build/logical_stream_allocator.cc b/ge/graph/build/logical_stream_allocator.cc index c966c5b3..3bc29b70 100644 --- a/ge/graph/build/logical_stream_allocator.cc +++ b/ge/graph/build/logical_stream_allocator.cc @@ -33,13 +33,21 @@ using std::queue; namespace ge { LogicalStreamPass::LogicalStreamPass(const string &name) : name_(name) {} -const string &LogicalStreamPass::GetName() const { return name_; } +const string &LogicalStreamPass::GetName() const { + return name_; +} -bool LogicalStreamPass::IsEngineSkip(const Subgraph &subgraph) const { return subgraph.engine_conf.skip_assign_stream; } +bool LogicalStreamPass::IsEngineSkip(const Subgraph &subgraph) const { + return subgraph.engine_conf.skip_assign_stream; +} -bool LogicalStreamPass::IsEngineAttach(const Subgraph &subgraph) const { return subgraph.engine_conf.attach; } +bool LogicalStreamPass::IsEngineAttach(const Subgraph &subgraph) const { + return subgraph.engine_conf.attach; +} -bool LogicalStreamPass::IsEngineIndependent(const Subgraph &subgraph) const { return subgraph.engine_conf.independent; } +bool LogicalStreamPass::IsEngineIndependent(const Subgraph &subgraph) const { + return subgraph.engine_conf.independent; +} bool LogicalStreamPass::HasStreamLabel(const Subgraph &subgraph) const { return !subgraph.subgraph_info.GetStreamLabel().empty(); @@ -60,14 +68,14 @@ Status AssignByLabelPass::Run(ComputeGraphPtr graph, const vector & // Subgraphs of the same stream_label are assigned to the same stream, // and different stream_labels are assigned new streams. auto iter = label_streams.find(stream_label); - if (iter != label_streams.end()) { - subgraph->stream_id = iter->second; - } else { + if (iter == label_streams.end()) { subgraph->stream_id = next_stream; - GELOGI("Assign new stream %ld for label %s", next_stream, stream_label.c_str()); + GELOGI("Assign new stream %ld for label %s.", next_stream, stream_label.c_str()); label_streams.emplace(stream_label, next_stream); - ++next_stream; + next_stream++; + } else { + subgraph->stream_id = iter->second; } changed = true; } @@ -92,15 +100,15 @@ Status IndependentStreamPass::Run(ComputeGraphPtr graph, const vectorsubgraph_info.GetStreamLabel(); auto &label_streams = engine_streams[engine]; auto iter = label_streams.find(stream_label); - if (iter != label_streams.end()) { - subgraph->stream_id = iter->second; - } else { + if (iter == label_streams.end()) { subgraph->stream_id = next_stream; - GELOGI("Assign new independent stream %ld for engine %s (label: %s)", next_stream, engine.c_str(), + GELOGI("Assign new independent stream %ld for engine %s (label: %s).", next_stream, engine.c_str(), stream_label.c_str()); label_streams.emplace(stream_label, next_stream); - ++next_stream; + next_stream++; + } else { + subgraph->stream_id = iter->second; } changed = true; } @@ -121,13 +129,15 @@ Status AssignByDependencyPass::Run(ComputeGraphPtr graph, const vectorstream_id = reusable_subgraph->stream_id; } else { int64_t stream_id = AssignNewStream(reusable_subgraph); subgraph->stream_id = stream_id; - GELOGI("Reusable subgraph %s has not been assigned a stream, now assign new stream %ld", + GELOGI("Reusable subgraph %s has not been assigned a stream, now assign new stream %ld.", reusable_subgraph->name.c_str(), stream_id); } @@ -137,11 +147,9 @@ Status AssignByDependencyPass::Run(ComputeGraphPtr graph, const vectorreused_subgraph = reusable_subgraph; reused_subgraphs_.emplace_back(subgraph, reusable_subgraph); - GELOGI("Subgraph %s of engine %s reuses stream of subgraph %s of engine %s", subgraph->name.c_str(), + GELOGI("Subgraph %s of engine %s reuses stream of subgraph %s of engine %s.", subgraph->name.c_str(), subgraph->engine_conf.id.c_str(), reusable_subgraph->name.c_str(), reusable_subgraph->engine_conf.id.c_str()); - } else { - (void)AssignNewStream(subgraph); } changed = true; } @@ -191,13 +199,15 @@ bool AssignByDependencyPass::CouldReuse(const SubgraphPtr &subgraph, const Subgr auto iter = pld_subgraph_map.find(end_pld_pair.second); if (iter != pld_subgraph_map.end()) { const SubgraphPtr &pred_subgraph_succ = iter->second; - if (pred_subgraph_succ != subgraph && pred_subgraph_succ->engine_conf.id == pred_subgraph->engine_conf.id) { + if ((pred_subgraph_succ != subgraph) && + (pred_subgraph_succ->engine_conf.id == pred_subgraph->engine_conf.id)) { return false; } } } - if ((subgraph->engine_conf.id == pred_subgraph->engine_conf.id) || IsEngineAttach(*subgraph)) { + if ((subgraph->engine_conf.id == pred_subgraph->engine_conf.id) || + IsEngineAttach(*subgraph)) { return true; } @@ -249,7 +259,7 @@ int64_t AssignByDependencyPass::AssignNewStream(SubgraphPtr subgraph) { engine_stream_num_[engine_name] = stream_id + 1; } - GELOGI("Subgraph %s assigns new temp stream %ld (engine: %s)", subgraph->name.c_str(), stream_id, + GELOGI("Subgraph %s assigns new temp stream %ld (engine: %s).", subgraph->name.c_str(), stream_id, engine_name.c_str()); return stream_id; @@ -282,7 +292,7 @@ void AssignByDependencyPass::UpdateAssignedSubgraphs(Context &context) { GELOGI("Subgraph %s of engine %s reuses default stream %ld.", subgraph->name.c_str(), subgraph->engine_conf.id.c_str(), context.default_stream); } else { - GELOGI("Stream of subgraph %s has been updated to %ld", subgraph->name.c_str(), subgraph->stream_id); + GELOGI("Stream of subgraph %s has been updated to %ld.", subgraph->name.c_str(), subgraph->stream_id); } } } @@ -293,7 +303,7 @@ void AssignByDependencyPass::UpdateReusedSubgraphs() { auto &cur_subgraph = item.first; auto &reused_graph = item.second; cur_subgraph->stream_id = reused_graph->stream_id; - GELOGI("Stream of subgraph %s has been updated to %ld", cur_subgraph->name.c_str(), cur_subgraph->stream_id); + GELOGI("Stream of subgraph %s has been updated to %ld.", cur_subgraph->name.c_str(), cur_subgraph->stream_id); } } @@ -330,7 +340,7 @@ Status NodeStreamUpdatePass::Run(ComputeGraphPtr graph, const vectorname.c_str(), subgraph->stream_id, + GELOGI("Subgraph %s is assigned stream %ld (engine: %s).", subgraph->name.c_str(), subgraph->stream_id, engine_name.c_str()); } } @@ -353,11 +363,11 @@ Status NodeStreamUpdatePass::Run(ComputeGraphPtr graph, const vectorGetName().c_str(), node->GetType().c_str(), subgraph->name.c_str(), context.default_stream, engine_name.c_str()); } else if (IsEngineSkip(*subgraph) && node->GetInNodes().empty()) { - GELOGD("Node %s of type %s in subgraph %s doesn't need to assign a stream (engine: %s)", + GELOGD("Node %s of type %s in subgraph %s doesn't need to assign a stream (engine: %s).", node->GetName().c_str(), node->GetType().c_str(), subgraph->name.c_str(), engine_name.c_str()); } else { node->GetOpDesc()->SetStreamId(stream_id); - GELOGD("Node %s of type %s in subgraph %s is assigned stream %ld (engine: %s)", node->GetName().c_str(), + GELOGD("Node %s of type %s in subgraph %s is assigned stream %ld (engine: %s).", node->GetName().c_str(), node->GetType().c_str(), subgraph->name.c_str(), stream_id, engine_name.c_str()); } } @@ -387,7 +397,7 @@ int64_t UpdateForSkippedEnginePass::GetSingleInoutStream(const NodePtr &node) co if (stream_ids.size() == 1) { int64_t stream_id = *(stream_ids.begin()); - GELOGI("The stream of all input and output nodes of node %s (type: %s) is %ld", node->GetName().c_str(), + GELOGI("The stream of all input and output nodes of node %s (type: %s) is %ld.", node->GetName().c_str(), node->GetType().c_str(), stream_id); return stream_id; } @@ -406,7 +416,7 @@ Status UpdateForSkippedEnginePass::Run(ComputeGraphPtr graph, const vectorGetOpDesc(); GE_CHECK_NOTNULL(op_desc); auto stream_id = op_desc->GetStreamId(); - if (stream_id != kInvalidStream && !HasStreamLabel(*subgraph)) { + if ((stream_id != kInvalidStream) && !HasStreamLabel(*subgraph)) { ops_without_label.emplace(op_desc); } } @@ -427,7 +437,7 @@ Status UpdateForSkippedEnginePass::Run(ComputeGraphPtr graph, const vectorSetStreamId(inout_stream); - GELOGI("Node %s of type %s reassign to stream %ld from stream %ld", node->GetName().c_str(), + GELOGI("Node %s of type %s reassign to stream %ld from stream %ld.", node->GetName().c_str(), node->GetType().c_str(), inout_stream, stream_id); } } @@ -455,7 +465,7 @@ Status AllReduceParallelPass::Run(ComputeGraphPtr graph, const vectorGetDirectNode()) { if (!IsHcomNode(node->GetType()) || - node->GetInDataNodes().size() <= 1) { + (node->GetInDataNodes().size() <= 1)) { continue; } @@ -565,7 +575,7 @@ Status LogicalStreamAllocator::Assign(const ComputeGraphPtr &root_graph, const G RefreshContinuousStreams(root_graph); stream_num = context_.next_stream; - GELOGI("Assigned logical stream num: %ld", stream_num); + GELOGI("Assigned logical stream num: %ld.", stream_num); return SUCCESS; } @@ -575,7 +585,7 @@ Status LogicalStreamAllocator::DoAssign(const ComputeGraphPtr &graph, const Grap GE_CHECK_NOTNULL(graph); NodePtr parent_node = graph->GetParentNode(); - if (parent_node == nullptr || parent_node->GetOpDesc() == nullptr) { + if ((parent_node == nullptr) || (parent_node->GetOpDesc() == nullptr)) { context_.default_stream = kInvalidStream; } else { context_.default_stream = parent_node->GetOpDesc()->GetStreamId(); @@ -597,10 +607,10 @@ Status LogicalStreamAllocator::DoAssign(const ComputeGraphPtr &graph, const Grap return status; } - GELOGD("Subgraphs of graph %s.", graph->GetName().c_str()); + GELOGD("Subgraphs of graph %s", graph->GetName().c_str()); for (const auto &subgraph : subgraphs) { if (subgraph != nullptr) { - GELOGD("subgraph: %s.", subgraph->name.c_str()); + GELOGD("subgraph: %s", subgraph->name.c_str()); } } @@ -664,9 +674,9 @@ Status LogicalStreamAllocator::RunPasses(const ComputeGraphPtr &graph, const vec Status status = pass->Run(graph, subgraphs, context_); if (status == SUCCESS) { - GELOGD("Stream pass %s return SUCCESS", pass->GetName().c_str()); + GELOGD("Stream pass %s return SUCCESS.", pass->GetName().c_str()); } else if (status == NOT_CHANGED) { - GELOGD("Stream pass %s return NOT_CHANGED", pass->GetName().c_str()); + GELOGD("Stream pass %s return NOT_CHANGED.", pass->GetName().c_str()); } else { GELOGE(status, "Stream pass %s failed.", pass->GetName().c_str()); return status; @@ -686,7 +696,7 @@ void LogicalStreamAllocator::RefreshContinuousStreams(const ComputeGraphPtr &gra auto op_desc = node->GetOpDesc(); if (op_desc != nullptr) { int64_t stream_id = op_desc->GetStreamId(); - if (stream_id != kInvalidStream && stream_id < stream_num) { + if ((stream_id != kInvalidStream) && (stream_id < stream_num)) { stream_has_node[stream_id] = true; } } @@ -695,10 +705,10 @@ void LogicalStreamAllocator::RefreshContinuousStreams(const ComputeGraphPtr &gra context_.next_stream = 0; vector old_to_new_streams(stream_num, kInvalidStream); - for (size_t old_stream = 0; old_stream < stream_has_node.size(); ++old_stream) { + for (size_t old_stream = 0; old_stream < stream_has_node.size(); old_stream++) { if (stream_has_node[old_stream]) { old_to_new_streams[old_stream] = context_.next_stream; - ++context_.next_stream; + context_.next_stream++; } } @@ -706,7 +716,7 @@ void LogicalStreamAllocator::RefreshContinuousStreams(const ComputeGraphPtr &gra auto op_desc = node->GetOpDesc(); if (op_desc != nullptr) { int64_t stream_id = op_desc->GetStreamId(); - if (stream_id != kInvalidStream && stream_id < stream_num) { + if ((stream_id != kInvalidStream) && (stream_id < stream_num)) { op_desc->SetStreamId(old_to_new_streams[stream_id]); } } diff --git a/ge/graph/manager/graph_caching_allocator.cc b/ge/graph/manager/graph_caching_allocator.cc index 10f6b498..97aaab1c 100644 --- a/ge/graph/manager/graph_caching_allocator.cc +++ b/ge/graph/manager/graph_caching_allocator.cc @@ -40,7 +40,7 @@ static bool BlockComparator(const Block *left, const Block *right) { } bool CanMerge(Block *block) { - if (block == nullptr || block->allocated || !block->IsSplit()) { + if ((block == nullptr) || block->allocated || !block->IsSplit()) { return false; } return true; @@ -52,7 +52,7 @@ size_t GetBinIndex(size_t size) { if (size <= range) { break; } - ++index; + index++; } if (index > kNumBins - 1) { index = kNumBins - 1; @@ -95,17 +95,17 @@ void IncreaseCount(std::map &count, size_t size) { } CachingAllocator::CachingAllocator(rtMemType_t memory_type) : memory_type_(memory_type), memory_allocator_(nullptr) { - for (uint32_t i = 0; i < kNumBins; ++i) { + for (uint32_t i = 0; i < kNumBins; i++) { free_block_bins_[i] = nullptr; } } Status CachingAllocator::Initialize(uint32_t device_id) { - GELOGI("Device id %u.", device_id); + GELOGI("Device id %u", device_id); // when redo Initialize free old memory FreeBlocks(); std::lock_guard lock(mutex_); - for (uint32_t i = 0; i < kNumBins; ++i) { + for (uint32_t i = 0; i < kNumBins; i++) { if (free_block_bins_[i] != nullptr) { continue; } @@ -124,14 +124,14 @@ Status CachingAllocator::Initialize(uint32_t device_id) { } void CachingAllocator::Finalize(uint32_t device_id) { - GELOGI("Device id %u.", device_id); + GELOGI("Device id %u", device_id); PrintStatics(); FreeBlocks(); FreeBlockBins(); } uint8_t *CachingAllocator::Malloc(size_t size, uint8_t *org_ptr, uint32_t device_id) { - GELOGI("Start malloc pool memory, size = %zu, device id = %u.", size, device_id); + GELOGI("Start malloc pool memory, size = %zu, device id = %u", size, device_id); uint8_t *ptr = nullptr; size = GetBlockSize(size); Block *block = FindFreeBlock(size, org_ptr, device_id); @@ -152,7 +152,7 @@ uint8_t *CachingAllocator::Malloc(size_t size, uint8_t *org_ptr, uint32_t device } Status CachingAllocator::Free(uint8_t *ptr, uint32_t device_id) { - GELOGI("Free device id = %u.", device_id); + GELOGI("Free device id = %u", device_id); if (ptr == nullptr) { GELOGE(PARAM_INVALID, "Invalid memory pointer"); return ge::PARAM_INVALID; @@ -171,10 +171,10 @@ Status CachingAllocator::Free(uint8_t *ptr, uint32_t device_id) { } void CachingAllocator::FreeBlock(Block *block) { - if (block == nullptr || !block->allocated) { + if ((block == nullptr) || !block->allocated) { return; } - GELOGI("Free block size = %zu.", block->size); + GELOGI("Free block size = %zu", block->size); std::lock_guard lock(mutex_); block->allocated = false; @@ -227,7 +227,7 @@ Block *CachingAllocator::FindFreeBlock(size_t size, uint8_t *org_ptr, uint32_t d Block *block = *it; bin->erase(it); if (block != nullptr) { - GELOGI("Find block size = %zu.", block->size); + GELOGI("Find block size = %zu", block->size); if (ShouldSplit(block, size)) { block = SplitBlock(block, size, *bin, device_id); } @@ -235,7 +235,7 @@ Block *CachingAllocator::FindFreeBlock(size_t size, uint8_t *org_ptr, uint32_t d if (block->ptr != nullptr) { block->allocated = true; allocated_blocks_[block->ptr] = block; - GELOGI("Malloc device id = %u, size= %zu.", device_id, size); + GELOGI("Malloc device id = %u, size= %zu", device_id, size); } } @@ -265,7 +265,7 @@ Block *CachingAllocator::SplitBlock(Block *block, size_t size, BlockBin &bin, ui } Status CachingAllocator::TryExtendCache(size_t size, uint32_t device_id) { - GELOGI("Try to extend cache. size = %zu, device id = %u.", size, device_id); + GELOGI("Try to extend cache. size = %zu, device id = %u", size, device_id); auto memory_size = GetAllocationSize(size); const std::string purpose = "Memory for caching."; auto memory_addr = memory_allocator_->MallocMemory(purpose, memory_size, device_id); @@ -302,7 +302,7 @@ Status CachingAllocator::AddToBlockBin(uint8_t *ptr, size_t size, uint32_t devic return ge::FAILED; } - GELOGI("Block size = %zu.", size); + GELOGI("Block size = %zu", size); block->ptr = ptr; block->size = size; @@ -313,10 +313,10 @@ Status CachingAllocator::AddToBlockBin(uint8_t *ptr, size_t size, uint32_t devic } size_t CachingAllocator::FreeCachedBlocks() { - GELOGI("Free cached blocks."); + GELOGI("Free cached blocks"); std::lock_guard lock(mutex_); size_t free_cached_memory_size = 0; - for (uint32_t i = 0; i < kNumBins; ++i) { + for (uint32_t i = 0; i < kNumBins; i++) { auto pool = free_block_bins_[i]; if (pool == nullptr) { continue; @@ -324,7 +324,8 @@ size_t CachingAllocator::FreeCachedBlocks() { for (auto it = pool->begin(); it != pool->end();) { Block *block = *it; // free block memory that has not been split - if ((block != nullptr) && (block->ptr != nullptr) && (block->prev == nullptr) && (block->next == nullptr) && + if ((block != nullptr) && (block->ptr != nullptr) && + (block->prev == nullptr) && (block->next == nullptr) && (memory_allocator_->FreeMemory(block->ptr) == ge::SUCCESS)) { auto itcount = malloced_memory_.find(block->size); free_cached_memory_size += block->size; @@ -345,7 +346,7 @@ size_t CachingAllocator::FreeCachedBlocks() { } void CachingAllocator::FreeBlocks() { - GELOGI("Free blocks"); + GELOGI("Free blocks."); std::lock_guard lock(mutex_); // free allocated blocks and put to cache for (auto &it : allocated_blocks_) { @@ -356,9 +357,9 @@ void CachingAllocator::FreeBlocks() { } void CachingAllocator::FreeBlockBins() { - GELOGI("Free block bins"); + GELOGI("Free block bins."); std::lock_guard lock(mutex_); - for (uint32_t i = 0; i < kNumBins; ++i) { + for (uint32_t i = 0; i < kNumBins; i++) { if (free_block_bins_[i] != nullptr) { delete free_block_bins_[i]; free_block_bins_[i] = nullptr; @@ -367,9 +368,9 @@ void CachingAllocator::FreeBlockBins() { } void PrintCount(std::map &count, const std::string &name, size_t total_size, size_t total_count) { - GELOGI("%6s total[size:%10zu count:%10zu]", name.c_str(), total_size, total_count); + GELOGI("%6s total[size:%10zu count:%10zu].", name.c_str(), total_size, total_count); for (auto &it : count) { - GELOGI(" |- block[size:%10zu count:%10zu]", it.first, it.second); + GELOGI(" |- block[size:%10zu count:%10zu].", it.first, it.second); } } @@ -383,20 +384,20 @@ void CachingAllocator::PrintStatics() { size_t total_free_count = 0; size_t total_malloc_size = 0; size_t total_malloc_count = 0; - std::map using_block; - std::map free_block; - std::map malloc_block; + std::map using_block_stat; + std::map free_block_stat; + std::map malloc_block_stat; do { std::lock_guard lock(mutex_); - for (uint32_t i = 0; i < kNumBins; ++i) { + for (uint32_t i = 0; i < kNumBins; i++) { auto pool = free_block_bins_[i]; if (pool == nullptr) { continue; } - for (auto it = pool->begin(); it != pool->end(); ++it) { + for (auto it = pool->begin(); it != pool->end(); it++) { if ((*it) != nullptr) { total_free_size += (*it)->size; - IncreaseCount(free_block, (*it)->size); + IncreaseCount(free_block_stat, (*it)->size); total_free_count++; } } @@ -405,7 +406,7 @@ void CachingAllocator::PrintStatics() { for (auto &it : allocated_blocks_) { if (it.second != nullptr) { total_using_size += it.second->size; - IncreaseCount(using_block, it.second->size); + IncreaseCount(using_block_stat, it.second->size); total_using_count++; } } @@ -413,12 +414,12 @@ void CachingAllocator::PrintStatics() { for (auto &it : malloced_memory_) { total_malloc_size += it.first * it.second; total_malloc_count += it.second; - malloc_block[it.first] = it.second; + malloc_block_stat[it.first] = it.second; } } while (0); - PrintCount(malloc_block, "Malloc", total_malloc_size, total_malloc_count); - PrintCount(using_block, "Using", total_using_size, total_using_count); - PrintCount(free_block, "Free", total_free_size, total_free_count); + PrintCount(malloc_block_stat, "Malloc", total_malloc_size, total_malloc_count); + PrintCount(using_block_stat, "Using", total_using_size, total_using_count); + PrintCount(free_block_stat, "Free", total_free_size, total_free_count); } } // namespace ge