You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

serializer_oss.cpp 32 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882
  1. /**
  2. * \file src/serialization/impl/serializer_oss.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. /*
  12. * Dump file layout:
  13. * [uint32_t fourcc]
  14. * [00 00 00 00]
  15. * [uint64_t offset to graph from tensor start]
  16. * [Tensor 1]
  17. * [Tensor 2]
  18. * [...]
  19. * [Tensor N]
  20. * [SizePrefixed FlatBuffers Graph]
  21. */
  22. #if MGB_ENABLE_FBS_SERIALIZATION
  23. #include "batched_device_value_loader.h"
  24. #include "megbrain/graph/exc_extra_info.h"
  25. #include "megbrain/opr/io.h"
  26. #include "megbrain/serialization/helper.h"
  27. #include "megbrain/serialization/internal/flatbuffers_helper.h"
  28. #include "megbrain/serialization/internal/schema_generated.h"
  29. #include "megbrain/serialization/opr_load_dump.h"
  30. #include "megbrain/serialization/serializer.h"
  31. #include "megbrain/version.h"
  32. #include <flatbuffers/flatbuffers.h>
  33. #include <cerrno>
  34. #include <cinttypes>
  35. #include <cstdio>
  36. using namespace mgb;
  37. using namespace mgb::serialization;
  38. namespace {
  39. constexpr uint32_t MGB_VERSION =
  40. (MGB_MAJOR * 1000 + MGB_MINOR) * 100 + MGB_PATCH;
  41. constexpr uint32_t MGB_MAGIC = 0x5342474D;
  42. template <typename T>
  43. bool contains_any_in_set(const SmallVector<T>& list,
  44. const ThinHashSet<T>& set) {
  45. for (const auto& x : list) {
  46. if (set.count(x)) {
  47. return true;
  48. }
  49. }
  50. return false;
  51. }
  52. void check_tensor_value_valid(const std::string& name,
  53. const HostTensorND& tensor) {
  54. mgb_assert(tensor.layout().is_physical_contiguous(),
  55. "non-contiguous tensor: name=%s layout=%s", name.c_str(),
  56. tensor.layout().to_string().c_str());
  57. if (tensor.dtype() == dtype::Float32()) {
  58. auto ptr = tensor.ptr<float>();
  59. for (size_t i = 0, it = tensor.shape().total_nr_elems(); i < it; ++i) {
  60. if (!std::isfinite(ptr[i])) {
  61. mgb_log_warn("invalid tensor value in %s: %g", name.c_str(),
  62. ptr[i]);
  63. break;
  64. }
  65. }
  66. }
  67. }
  68. } // namespace
  69. namespace mgb {
  70. namespace serialization {
  71. class GraphDumperOSS final : public GraphDumper, OprDumpContextFlatBuffers {
  72. const std::unique_ptr<OutputFile> m_file;
  73. flatbuffers::FlatBufferBuilder m_builder;
  74. DumpConfig m_config;
  75. DumpResult m_cur_rst;
  76. size_t m_nr_shared_tensor;
  77. std::vector<std::pair<cg::OperatorNodeBase*, const OprRegistry*>>
  78. m_oprs_to_dump;
  79. ThinHashMap<VarNode*, size_t> m_var2id;
  80. //! set of output vars specified by user
  81. ThinHashSet<VarNode*> m_output_vars;
  82. std::unordered_set<std::string> m_used_input_names, m_used_param_names;
  83. //! current opr to be dumped
  84. cg::OperatorNodeBase* m_cur_opr = nullptr;
  85. // Will be filled in dump_tensor
  86. std::vector<flatbuffers::Offset<fbs::Tensor>> m_cur_opr_tensor;
  87. std::vector<flatbuffers::Offset<fbs::Blob>> m_blobs;
  88. std::vector<fbs::OperatorParam> m_cur_opr_param_type;
  89. std::vector<flatbuffers::Offset<void>> m_cur_opr_param;
  90. void init_oprs_to_dump(const SymbolVarArray& endpoints);
  91. flatbuffers::Offset<fbs::Operator> build_single_opr(
  92. cg::OperatorNodeBase* opr, const OprRegistry* registry);
  93. flatbuffers::Offset<fbs::DType> build_dtype(DType dtype);
  94. public:
  95. GraphDumperOSS(std::unique_ptr<OutputFile> file) : m_file{std::move(file)} {}
  96. DumpResult dump(const SymbolVarArray& output_vars,
  97. const DumpConfig& config = {}) override;
  98. const GraphDumpConfig& config() const override { return m_config; }
  99. void dump_tensor(const std::string& name, const HostTensorND& tensor,
  100. TensorWriteMethod method) override;
  101. flatbuffers::FlatBufferBuilder& builder() override { return m_builder; }
  102. void append_param(uint32_t type, uint32_t value) override {
  103. static_assert(std::is_same<uint32_t, flatbuffers::uoffset_t>::value,
  104. "append_param depends on uoffset_t being uint32_t");
  105. static_assert(std::is_standard_layout<flatbuffers::Offset<void>>::value,
  106. "append_param depends on flatbuffers::Offset having "
  107. "standard memory layout");
  108. mgb_assert(type != fbs::OperatorParam_NONE);
  109. m_cur_opr_param_type.emplace_back(
  110. static_cast<fbs::OperatorParam>(type));
  111. m_cur_opr_param.emplace_back(value);
  112. }
  113. void dump_buf_with_len(const void* data, uint32_t size) override;
  114. GraphDumpFormat format() const override {
  115. return GraphDumpFormat::FLATBUFFERS;
  116. }
  117. };
  118. flatbuffers::Offset<fbs::DType> GraphDumperOSS::build_dtype(DType dtype) {
  119. return fbs::intl::build_dtype(m_builder, dtype);
  120. }
  121. void GraphDumperOSS::init_oprs_to_dump(const SymbolVarArray& endpoints) {
  122. m_oprs_to_dump.clear();
  123. m_var2id.clear();
  124. // iterate oprs to init m_var2id
  125. size_t next_id = 0;
  126. auto on_opr = [&](cg::OperatorNodeBase* opr) {
  127. if (should_remove_in_dump(opr)) {
  128. mgb_assert(opr->input().size() == 1);
  129. // Copy input ID to output
  130. auto id = m_var2id.at(opr->input(0));
  131. for (auto i : opr->output())
  132. m_var2id[i] = id;
  133. } else {
  134. auto registry = OprRegistry::find_by_type(opr->dyn_typeinfo());
  135. if (!registry || !registry->dumper) {
  136. mgb_throw(cg::OperatorNodeExcExtraInfo::ExcMaker{opr}
  137. .make<MegBrainError>,
  138. "serialization as FlatBuffers is not supported for "
  139. "operator %s",
  140. opr->dyn_typeinfo()->name);
  141. }
  142. m_oprs_to_dump.emplace_back(opr, registry);
  143. for (auto i : opr->output()) {
  144. if (!i->contain_flag(VarNode::Flag::VOLATILE_CONTENT)) {
  145. m_var2id[i] = next_id++;
  146. }
  147. }
  148. }
  149. };
  150. cg::DepOprIter dep_opr_iter{on_opr};
  151. for (auto i : endpoints) {
  152. dep_opr_iter.add(i.node()->owner_opr());
  153. }
  154. }
  155. flatbuffers::Offset<fbs::Operator> GraphDumperOSS::build_single_opr(
  156. cg::OperatorNodeBase* opr, const OprRegistry* registry) {
  157. m_cur_opr = opr;
  158. ++m_cur_rst.nr_opr;
  159. using namespace flatbuffers;
  160. Offset<Vector<Offset<fbs::CompNode>>> comp_node;
  161. auto& config = opr->config();
  162. if (config.has_comp_node_set()) {
  163. std::vector<flatbuffers::Offset<fbs::CompNode>> cns;
  164. for (const auto& cn : config.comp_node()) {
  165. cns.emplace_back(fbs::CreateCompNode(
  166. m_builder,
  167. m_builder.CreateSharedString(cn.to_string_logical())));
  168. }
  169. comp_node = m_builder.CreateVector(cns);
  170. }
  171. Offset<Vector<uint32_t>> inputs;
  172. if (opr->input().size()) {
  173. std::vector<uint32_t> v;
  174. v.reserve(opr->input().size());
  175. for (auto inp : opr->input()) {
  176. v.emplace_back(m_var2id.at(inp));
  177. }
  178. inputs = m_builder.CreateVector(v);
  179. }
  180. Offset<Vector<Offset<String>>> output_names;
  181. if (m_config.keep_var_name >= 2 ||
  182. (m_config.keep_var_name == 1 &&
  183. contains_any_in_set(opr->output(), m_output_vars))) {
  184. std::vector<std::string> onames;
  185. for (auto i : opr->output()) {
  186. if (!i->contain_flag(VarNode::Flag::VOLATILE_CONTENT)) {
  187. onames.emplace_back(i->name());
  188. }
  189. }
  190. output_names = m_builder.CreateVectorOfStrings(onames);
  191. }
  192. auto output_dtype = build_dtype(config.output_dtype());
  193. m_cur_opr_tensor.clear();
  194. m_blobs.clear();
  195. m_cur_opr_param.clear();
  196. m_cur_opr_param_type.clear();
  197. registry->dumper(*this, *opr);
  198. Offset<Vector<Offset<fbs::Tensor>>> tensors;
  199. if (m_cur_opr_tensor.size())
  200. tensors = m_builder.CreateVector(m_cur_opr_tensor);
  201. Offset<Vector<Offset<fbs::Blob>>> blobs;
  202. if (m_blobs.size())
  203. blobs = m_builder.CreateVector(m_blobs);
  204. Offset<Vector<uint8_t>> additional_params_type;
  205. Offset<Vector<Offset<void>>> additional_params;
  206. auto param_cnt = m_cur_opr_param_type.size();
  207. if (param_cnt > 1) {
  208. additional_params_type = m_builder.CreateVectorScalarCast<uint8_t>(
  209. m_cur_opr_param_type.data() + 1, param_cnt - 1);
  210. additional_params = m_builder.CreateVector(m_cur_opr_param.data() + 1,
  211. param_cnt - 1);
  212. }
  213. fbs::OperatorBuilder builder(m_builder);
  214. builder.add_type_id(registry->unversioned_type_id);
  215. builder.add_inputs(inputs);
  216. if (m_config.keep_opr_priority) {
  217. builder.add_priority(opr->node_prop().attribute().priority);
  218. }
  219. builder.add_comp_node(comp_node);
  220. builder.add_output_name(output_names);
  221. builder.add_output_dtype(output_dtype);
  222. if (param_cnt > 0) {
  223. builder.add_param_type(m_cur_opr_param_type[0]);
  224. builder.add_param(m_cur_opr_param[0]);
  225. }
  226. if (param_cnt > 1) {
  227. builder.add_additional_params_type(additional_params_type);
  228. builder.add_additional_params(additional_params);
  229. }
  230. builder.add_tensors(tensors);
  231. builder.add_blobs(blobs);
  232. m_cur_opr = nullptr;
  233. return builder.Finish();
  234. }
  235. GraphDumper::DumpResult GraphDumperOSS::dump(
  236. const SymbolVarArray& output_vars, const DumpConfig& config) {
  237. mgb_throw_if(output_vars.empty(), SerializationError,
  238. "Can't dump empty graph");
  239. auto begin_pos = m_file->tell();
  240. m_config = config;
  241. m_builder.Reset();
  242. m_output_vars.clear();
  243. m_cur_rst = {};
  244. m_used_input_names.clear();
  245. m_used_param_names.clear();
  246. m_nr_shared_tensor = 0;
  247. // process output vars
  248. bool keep_output_var_name = m_config.keep_var_name >= 1;
  249. std::unordered_set<std::string> output_var_names;
  250. for (auto i : output_vars) {
  251. mgb_assert(!i.node()->contain_flag(VarNode::Flag::VOLATILE_CONTENT),
  252. "can not dump var with VOLATILE_CONTENT flag: %s",
  253. cg::dump_var_info({i.node()}).c_str());
  254. if (m_output_vars.insert(i.node()).second && keep_output_var_name) {
  255. auto name_ins = output_var_names.insert(i.node()->name()).second;
  256. mgb_assert(name_ins, "duplicated output var name: %s",
  257. i.node()->cname());
  258. }
  259. }
  260. // Write magic
  261. uint32_t magic = MGB_MAGIC;
  262. m_file->write(&magic, sizeof(magic));
  263. // Padding
  264. uint32_t reserved = 0;
  265. m_file->write(&reserved, sizeof(reserved));
  266. // Write placeholder for offset_to_fbs
  267. auto offset_pos = m_file->tell();
  268. uint64_t offset_to_fbs = 0;
  269. m_file->write(&offset_to_fbs, sizeof(offset_to_fbs));
  270. // Dump operators
  271. init_oprs_to_dump(output_vars);
  272. std::vector<flatbuffers::Offset<fbs::Operator>> oprs;
  273. for (auto&& i : m_oprs_to_dump) {
  274. oprs.emplace_back(build_single_opr(i.first, i.second));
  275. }
  276. auto fb_oprs = m_builder.CreateVector(oprs);
  277. // Dump output vars
  278. std::vector<fbs::OutputVar> output_vars_idx;
  279. output_vars_idx.reserve(output_vars.size());
  280. for (auto i : output_vars) {
  281. output_vars_idx.emplace_back(m_var2id.at(i.node()), i.node()->id());
  282. }
  283. auto fb_output_vars = m_builder.CreateVectorOfStructs(output_vars_idx);
  284. XXHash content_hash;
  285. content_hash.update(m_builder.GetCurrentBufferPointer(),
  286. m_builder.GetSize());
  287. auto graph_hash = content_hash.digest();
  288. fbs::GraphBuilder graph(m_builder);
  289. graph.add_mgb_version(MGB_VERSION);
  290. graph.add_hash(graph_hash);
  291. graph.add_oprs(fb_oprs);
  292. graph.add_output_vars_idx(fb_output_vars);
  293. graph.add_nr_shared_tensor(m_nr_shared_tensor);
  294. m_builder.FinishSizePrefixed(graph.Finish(), fbs::GraphIdentifier());
  295. // Write actual offset_to_fbs
  296. auto cur = m_file->tell();
  297. mgb_assert(cur >= offset_pos && cur - offset_pos >= sizeof(offset_to_fbs));
  298. offset_to_fbs = cur - offset_pos - sizeof(offset_to_fbs);
  299. m_file->seek(offset_pos);
  300. m_file->write(&offset_to_fbs, sizeof(offset_to_fbs));
  301. m_file->seek(cur);
  302. // Write serialized fbs::Graph
  303. m_file->write(m_builder.GetBufferPointer(), m_builder.GetSize());
  304. // Finalize DumpResult
  305. auto&& ret = m_cur_rst;
  306. for (size_t i = 0; i < output_vars.size(); i++) {
  307. ret.outputs.emplace_back(keep_output_var_name
  308. ? output_vars[i].node()->cname()
  309. : ssprintf("unnamed%zu", i));
  310. }
  311. ret.content_hash = graph_hash;
  312. std::sort(ret.inputs.begin(), ret.inputs.end());
  313. mgb_assert(ret.nr_opr == m_oprs_to_dump.size());
  314. ret.tot_bytes = m_file->tell() - begin_pos;
  315. return ret;
  316. }
  317. void GraphDumperOSS::dump_tensor(const std::string& name,
  318. const HostTensorND& tensor,
  319. TensorWriteMethod method) {
  320. using namespace flatbuffers;
  321. using Meth = TensorWriteMethod;
  322. mgb_assert((method == Meth::VALUE_ANONYMOUS) ^ (!name.empty()),
  323. "name must be non-empty for non Meth::VALUE_ANONYMOUS tensors");
  324. bool has_value = method != Meth::META_INPUT;
  325. bool should_keep_name = true;
  326. switch (method) {
  327. case Meth::VALUE_ANONYMOUS:
  328. should_keep_name = false;
  329. break;
  330. case Meth::VALUE_SHARED:
  331. should_keep_name = m_config.keep_param_name;
  332. ++m_nr_shared_tensor;
  333. if (m_config.keep_param_name) {
  334. mgb_assert(m_used_param_names.insert(name).second,
  335. "duplicated VALUE_SHARED tensor name: %s",
  336. name.c_str());
  337. m_cur_rst.params.emplace_back(name);
  338. }
  339. break;
  340. case Meth::META_INPUT:
  341. case Meth::VALUE_INPUT:
  342. mgb_assert(!name.empty(), "empty input tensor name");
  343. mgb_assert(m_used_input_names.insert(name).second,
  344. "duplicated input tensor name: %s", name.c_str());
  345. m_cur_rst.inputs.emplace_back(name);
  346. break;
  347. }
  348. size_t value_size = 0;
  349. if (has_value) {
  350. check_tensor_value_valid(name, tensor);
  351. auto begin = m_file->tell();
  352. auto&& dumper = m_config.tensor_value_dumper;
  353. if (dumper) {
  354. dumper(*m_file, *m_cur_opr, tensor);
  355. } else {
  356. m_file->write(tensor.raw_ptr(), tensor.layout().span().high_byte);
  357. }
  358. value_size = m_file->tell() - begin;
  359. m_cur_rst.tensor_value_bytes += value_size;
  360. }
  361. auto fbname = should_keep_name ? m_builder.CreateSharedString(name) : 0;
  362. auto shape = m_builder.CreateVectorScalarCast<uint32_t>(
  363. tensor.shape().shape, tensor.shape().ndim);
  364. auto comp_node = fbs::CreateCompNode(
  365. m_builder, m_builder.CreateSharedString(
  366. tensor.comp_node().to_string_logical()));
  367. auto dtype = build_dtype(tensor.dtype());
  368. auto serialized_tensor = fbs::CreateTensor(m_builder, fbname, shape,
  369. comp_node, dtype, value_size);
  370. m_cur_opr_tensor.emplace_back(serialized_tensor);
  371. }
  372. void GraphDumperOSS::dump_buf_with_len(const void* data, uint32_t size) {
  373. auto blob = fbs::CreateBlob(
  374. m_builder,
  375. m_builder.CreateVector(static_cast<const uint8_t*>(data), size));
  376. m_blobs.emplace_back(blob);
  377. }
  378. // ----------------------------- Loader --------------------------------------
  379. class GraphLoaderOSS final : public GraphLoader {
  380. const LoadConfig* m_cur_load_config = nullptr;
  381. std::unique_ptr<InputFile> m_file;
  382. SharedBuffer m_graph_buf{{}, 0};
  383. const fbs::Graph* m_graph;
  384. SharedTensorIDMap m_shared_tensor_map;
  385. uint32_t m_mgb_version = 0;
  386. uint64_t m_graph_hash = 0;
  387. class OprLoadContextImpl;
  388. friend class OprLoadContextImpl;
  389. void verify();
  390. public:
  391. GraphLoaderOSS(std::unique_ptr<InputFile> input_file)
  392. : m_file{std::move(input_file)} {}
  393. std::unique_ptr<InputFile> reset_file(
  394. std::unique_ptr<InputFile> file) override {
  395. file.swap(m_file);
  396. return file;
  397. }
  398. LoadResult load(const LoadConfig& config, bool rewind) override;
  399. const SharedTensorIDMap& shared_tensor_id_map() const override {
  400. mgb_assert(m_graph_hash, "graph not loaded yet");
  401. return m_shared_tensor_map;
  402. }
  403. GraphDumpFormat format() const override {
  404. return GraphDumpFormat::FLATBUFFERS;
  405. }
  406. };
  407. class GraphLoaderOSS::OprLoadContextImpl final
  408. : public OprLoadContextFlatBuffers {
  409. GraphLoaderOSS* const m_loader;
  410. size_t m_cur_shared_tensor_idx = 0;
  411. std::shared_ptr<ComputingGraph> m_graph;
  412. LoadResult::TensorMap m_tensor_map;
  413. VarNodeArray m_id2varnode;
  414. BatchedDeviceValueLoader m_device_value_loader;
  415. const fbs::Operator* m_current_opr;
  416. size_t m_cur_opr_tensor_cnt;
  417. size_t m_cur_opr_blob_cnt;
  418. size_t m_cur_opr_param_cnt;
  419. ComputingGraph& graph() override { return *m_graph; }
  420. const GraphLoadConfig& config() const override {
  421. return *m_loader->m_cur_load_config;
  422. }
  423. void load_tensor_value(HostTensorND* dest, const TensorLayout& layout,
  424. const fbs::Tensor* tensor);
  425. std::shared_ptr<HostTensorND> load_tensor() override;
  426. std::shared_ptr<DeviceTensorND> load_tensor_shared() override;
  427. void load_single_opr(const fbs::Operator* opr);
  428. public:
  429. OprLoadContextImpl(GraphLoaderOSS* loader, uint32_t version)
  430. : OprLoadContextFlatBuffers(version), m_loader{loader} {
  431. m_graph = loader->m_cur_load_config->comp_graph;
  432. if (!m_graph) {
  433. m_graph = ComputingGraph::make();
  434. }
  435. auto maker = [this]() {
  436. return std::shared_ptr<OprLoadContext>{
  437. std::shared_ptr<OprLoadContext>{}, this};
  438. };
  439. auto got = m_graph->options()
  440. .user_data.get_user_data_or_create<OprLoadContext>(
  441. maker);
  442. mgb_assert(got == this);
  443. }
  444. ~OprLoadContextImpl() noexcept {
  445. auto nr = m_graph->options().user_data.pop_user_data<OprLoadContext>();
  446. mgb_assert(nr == 1);
  447. }
  448. LoadResult load_oprs();
  449. CompNode load_comp_node(const fbs::CompNode* comp_node);
  450. const void* get_next_param(uint32_t enumv) override {
  451. auto type = static_cast<fbs::OperatorParam>(enumv);
  452. if (m_cur_opr_param_cnt == 0) {
  453. m_cur_opr_param_cnt++;
  454. if (m_current_opr->param_type() == type) {
  455. return m_current_opr->param();
  456. }
  457. } else {
  458. mgb_assert(m_current_opr->additional_params() &&
  459. m_cur_opr_param_cnt - 1 <
  460. m_current_opr->additional_params()->size());
  461. auto i = m_cur_opr_param_cnt++ - 1;
  462. if (m_current_opr->additional_params_type()->Get(i) == type) {
  463. return m_current_opr->additional_params()->Get(i);
  464. }
  465. }
  466. return nullptr;
  467. }
  468. std::string load_buf_with_len() override {
  469. mgb_assert(m_current_opr->blobs() &&
  470. m_cur_opr_blob_cnt < m_current_opr->blobs()->size());
  471. auto blob = m_current_opr->blobs()->Get(m_cur_opr_blob_cnt++);
  472. mgb_assert(blob && blob->data());
  473. auto data = blob->data()->data();
  474. return {reinterpret_cast<const char*>(data), blob->data()->size()};
  475. }
  476. SharedBuffer load_shared_buf_with_len() override {
  477. mgb_assert(m_current_opr->blobs() &&
  478. m_cur_opr_blob_cnt < m_current_opr->blobs()->size());
  479. auto blob = m_current_opr->blobs()->Get(m_cur_opr_blob_cnt++);
  480. mgb_assert(blob && blob->data());
  481. auto size = blob->data()->size();
  482. std::shared_ptr<uint8_t> shptr{new uint8_t[size],
  483. [](uint8_t* p) { delete[] p; }};
  484. memcpy(shptr.get(), blob->data()->data(), size);
  485. return {std::move(shptr), size};
  486. }
  487. };
  488. CompNode GraphLoaderOSS::OprLoadContextImpl::load_comp_node(
  489. const fbs::CompNode* comp_node) {
  490. mgb_assert(comp_node);
  491. if (!comp_node->logical_locator())
  492. return {};
  493. auto loc = CompNode::Locator::parse(comp_node->logical_locator()->str());
  494. m_loader->m_cur_load_config->comp_node_mapper(loc);
  495. return CompNode::load(loc);
  496. }
  497. TensorLayout load_tensor_layout(const fbs::Tensor* tensor) {
  498. TensorLayout layout;
  499. if (tensor->shape()) {
  500. layout.ndim = tensor->shape()->size();
  501. std::copy(tensor->shape()->begin(), tensor->shape()->end(),
  502. layout.shape);
  503. layout.init_contiguous_stride();
  504. }
  505. if (tensor->dtype()) {
  506. layout.dtype = fbs::intl::load_dtype(tensor->dtype());
  507. }
  508. return layout;
  509. }
  510. void GraphLoaderOSS::OprLoadContextImpl::load_tensor_value(
  511. HostTensorND* dest, const TensorLayout& layout,
  512. const fbs::Tensor* tensor) {
  513. auto&& loader = m_loader->m_cur_load_config->tensor_value_loader;
  514. auto&& file = m_loader->m_file;
  515. auto begin_pos = file->tell();
  516. file->skip(tensor->offset());
  517. if (loader) {
  518. // call custom loader
  519. void* dest_ptr = nullptr;
  520. if (dest) {
  521. dest->dtype(layout.dtype).resize(layout);
  522. dest_ptr = dest->raw_ptr();
  523. }
  524. loader(dest_ptr, layout, *file);
  525. } else {
  526. if (dest) {
  527. file->read_into_tensor(*dest, layout);
  528. } else {
  529. file->skip(layout.span().high_byte);
  530. }
  531. }
  532. mgb_throw_if(file->tell() < begin_pos, SerializationError,
  533. "Custom tensor value loader accessed out of range data before "
  534. "start of data blob");
  535. auto data_size = tensor->data_size();
  536. auto consumed_size = file->tell() - begin_pos;
  537. mgb_throw_if(consumed_size > data_size, SerializationError,
  538. "Custom tensor value loader consumed more data than "
  539. "available: consumed %lu, has %u",
  540. consumed_size, data_size);
  541. if (consumed_size < data_size) {
  542. mgb_log_warn(
  543. "Tensor value loader consumed less data than available: "
  544. "consumed %lu bytes, has %u bytes",
  545. consumed_size, data_size);
  546. file->skip(data_size - consumed_size);
  547. }
  548. }
  549. std::shared_ptr<HostTensorND>
  550. GraphLoaderOSS::OprLoadContextImpl::load_tensor() {
  551. mgb_assert(m_current_opr->tensors() &&
  552. m_cur_opr_tensor_cnt < m_current_opr->tensors()->size());
  553. auto tensor = m_current_opr->tensors()->Get(m_cur_opr_tensor_cnt++);
  554. auto comp_node = load_comp_node(tensor->comp_node());
  555. auto layout = load_tensor_layout(tensor);
  556. auto ret = std::make_shared<HostTensorND>(comp_node, layout);
  557. if (tensor->data_size()) {
  558. load_tensor_value(ret.get(), layout, tensor);
  559. }
  560. if (tensor->name()) {
  561. m_tensor_map[tensor->name()->str()] = ret;
  562. }
  563. if (auto&& mod = m_loader->m_cur_load_config->tensor_modifier) {
  564. mod(tensor->name() ? tensor->name()->str() : "",
  565. tensor->data_size() != 0, *ret);
  566. }
  567. return ret;
  568. }
  569. std::shared_ptr<DeviceTensorND>
  570. GraphLoaderOSS::OprLoadContextImpl::load_tensor_shared() {
  571. mgb_assert(m_current_opr->tensors() &&
  572. m_cur_opr_tensor_cnt < m_current_opr->tensors()->size());
  573. auto tensor = m_current_opr->tensors()->Get(m_cur_opr_tensor_cnt++);
  574. auto comp_node = load_comp_node(tensor->comp_node());
  575. auto layout = load_tensor_layout(tensor);
  576. mgb_assert(tensor->data_size());
  577. auto&& sh_reg = m_loader->m_shared_tensor_map.at(m_cur_shared_tensor_idx++);
  578. auto&& sh_ptr_ref = sh_reg.second[comp_node.mem_node()];
  579. if (sh_ptr_ref) {
  580. // cached tensor value is valid so we can reuse it
  581. load_tensor_value(nullptr, layout, tensor);
  582. if (sh_ptr_ref->comp_node() == comp_node)
  583. return sh_ptr_ref;
  584. // same mem node but different comp node, change comp node and share
  585. // value
  586. auto ret = std::make_shared<DeviceTensorND>(*sh_ptr_ref);
  587. ret->comp_node(comp_node);
  588. return ret;
  589. }
  590. if (tensor->name()) {
  591. sh_reg.first = tensor->name()->str();
  592. }
  593. if (comp_node.mem_node() == CompNode::default_cpu().mem_node()) {
  594. // directly forward CPU memory
  595. HostTensorND hv{comp_node};
  596. load_tensor_value(&hv, layout, tensor);
  597. sh_ptr_ref = std::make_shared<DeviceTensorND>();
  598. *sh_ptr_ref = DeviceTensorND::make_proxy(hv);
  599. } else {
  600. // use lazy load for non-CPU devices
  601. HostTensorND hv{CompNode::default_cpu()};
  602. load_tensor_value(&hv, layout, tensor);
  603. sh_ptr_ref = m_device_value_loader.make(comp_node, std::move(hv));
  604. }
  605. return sh_ptr_ref;
  606. }
  607. void GraphLoaderOSS::OprLoadContextImpl::load_single_opr(
  608. const fbs::Operator* fbopr) {
  609. m_cur_opr_tensor_cnt = 0;
  610. m_cur_opr_blob_cnt = 0;
  611. m_cur_opr_param_cnt = 0;
  612. OperatorNodeConfig config;
  613. if (fbopr->output_dtype()) {
  614. config.output_dtype(fbs::intl::load_dtype(fbopr->output_dtype()));
  615. }
  616. if (fbopr->comp_node()) {
  617. auto cnt = fbopr->comp_node()->size();
  618. cg::OperatorNodeConfig::CompNodeArray comp_node_arr(cnt);
  619. for (size_t i = 0; i < cnt; i++) {
  620. CompNode cn{};
  621. auto node = fbopr->comp_node()->Get(i);
  622. if (node) {
  623. cn = load_comp_node(node);
  624. }
  625. comp_node_arr[i] = cn;
  626. }
  627. config.comp_node_arr(comp_node_arr);
  628. }
  629. auto registry = OprRegistry::find_by_unversioned_id(fbopr->type_id());
  630. mgb_throw_if(!registry, SerializationError,
  631. "failed to find opr with type %s, use "
  632. "mgb.config.dump_registered_oprs() "
  633. "to get a dict that maps from opr id to opr name",
  634. std::to_string(fbopr->type_id()).c_str());
  635. // load inputs
  636. VarNodeArray inputs;
  637. if (fbopr->inputs()) {
  638. inputs.resize(fbopr->inputs()->size());
  639. for (size_t i = 0; i < inputs.size(); ++i) {
  640. inputs[i] = m_id2varnode.at(fbopr->inputs()->Get(i));
  641. }
  642. }
  643. // call loader
  644. auto opr = registry->loader(*this, inputs, config);
  645. // check opr type; note that:
  646. // 1. registry->type may be empty for dynamic opr loaders or legacy oprs
  647. // 2. due to some optimization, an opr may be replaced by ImmutableTensor
  648. mgb_assert(
  649. opr && (opr->dyn_typeinfo() == registry->type || !registry->type ||
  650. opr->same_type<opr::ImmutableTensor>()),
  651. "got_type=%s expected_type=%s",
  652. opr ? opr->dyn_typeinfo()->name : nullptr, registry->type->name);
  653. // record output vars; read output names
  654. size_t i = 0;
  655. for (auto ovar : opr->output()) {
  656. if (!ovar->contain_flag(VarNode::Flag::VOLATILE_CONTENT)) {
  657. m_id2varnode.push_back(ovar);
  658. if (fbopr->output_name()) {
  659. ovar->name(fbopr->output_name()->Get(i++)->str());
  660. }
  661. }
  662. }
  663. opr->node_prop().attribute().priority = fbopr->priority();
  664. }
  665. GraphLoader::LoadResult GraphLoaderOSS::OprLoadContextImpl::load_oprs() {
  666. // load oprs
  667. const auto* oprs = m_loader->m_graph->oprs();
  668. for (flatbuffers::uoffset_t i = 0; i < oprs->size(); ++i) {
  669. m_current_opr = oprs->Get(i);
  670. load_single_opr(m_current_opr);
  671. }
  672. // batched loading device values
  673. m_device_value_loader.apply();
  674. LoadResult ret;
  675. ret.graph = m_graph;
  676. ret.tensor_map = m_tensor_map;
  677. const auto* outputs = m_loader->m_graph->output_vars_idx();
  678. ret.output_var_list.resize(outputs->size());
  679. for (flatbuffers::uoffset_t i = 0; i < outputs->size(); i++) {
  680. auto out = outputs->Get(i);
  681. auto var = m_id2varnode.at(out->compact_id());
  682. ret.output_var_map[var->name()] = var;
  683. ret.output_var_map_id[out->original_id()] = var;
  684. ret.output_var_list[i] = var;
  685. }
  686. mgb_assert(m_cur_shared_tensor_idx == m_loader->m_shared_tensor_map.size());
  687. return ret;
  688. }
  689. GraphLoader::LoadResult GraphLoaderOSS::load(const LoadConfig& config,
  690. bool rewind) {
  691. mgb_assert(m_file);
  692. m_cur_load_config = &config;
  693. if (rewind) {
  694. m_file->rewind();
  695. }
  696. uint32_t magic;
  697. m_file->read(&magic, sizeof(magic));
  698. mgb_throw_if(magic != MGB_MAGIC, SerializationError,
  699. "wrong magic: wanted %#08x, actual %#08x (not a MegBrain fbs "
  700. "model?)",
  701. MGB_MAGIC, magic);
  702. m_file->skip(4);
  703. uint64_t offset_to_fbs;
  704. m_file->read(&offset_to_fbs, sizeof(offset_to_fbs));
  705. auto tensor_begin = m_file->tell();
  706. // Skip tensor data
  707. m_file->skip(offset_to_fbs);
  708. // Read fbs::Graph
  709. uint32_t size;
  710. m_file->read(&size, sizeof(size));
  711. m_graph_buf = m_file->read_shared(size);
  712. // Rewind back to tensor data
  713. m_file->rewind();
  714. m_file->skip(tensor_begin);
  715. mgb_throw_if(!fbs::GraphBufferHasIdentifier(m_graph_buf.data()),
  716. SerializationError, "not a MegBrain fbs model");
  717. {
  718. flatbuffers::Verifier verifier(
  719. static_cast<const uint8_t*>(m_graph_buf.data()),
  720. m_graph_buf.size());
  721. mgb_throw_if(!fbs::VerifyGraphBuffer(verifier), SerializationError,
  722. "model verification failed (invalid or corrupted model?)");
  723. }
  724. m_graph = fbs::GetGraph(m_graph_buf.data());
  725. m_mgb_version = m_graph->mgb_version();
  726. if (m_graph->mgb_version() > MGB_VERSION) {
  727. mgb_log_warn(
  728. "loading model from future MegBrain: version=%u "
  729. "model_version=%u",
  730. MGB_VERSION, m_graph->mgb_version());
  731. }
  732. if (!m_graph_hash) {
  733. m_graph_hash = m_graph->hash();
  734. mgb_assert(m_graph_hash,
  735. "invalid graph hash; maybe error "
  736. "occurred during graph dump");
  737. } else {
  738. mgb_assert(m_graph_hash == m_graph->hash(),
  739. "A GraphLoader instance can be used to load only one graph,"
  740. " since the tensor values are shared. Previous graph hash "
  741. "is 0x%llx, current graph hash is 0x%llx.",
  742. static_cast<unsigned long long>(m_graph_hash),
  743. static_cast<unsigned long long>(m_graph->hash()));
  744. }
  745. if (m_shared_tensor_map.empty()) {
  746. m_shared_tensor_map.resize(m_graph->nr_shared_tensor());
  747. } else {
  748. mgb_assert(m_shared_tensor_map.size() == m_graph->nr_shared_tensor());
  749. }
  750. OprLoadContextImpl ctx{this, m_graph->mgb_version()};
  751. auto result = ctx.load_oprs();
  752. auto fbs_end = tensor_begin + offset_to_fbs + sizeof(size) + size;
  753. auto cur = m_file->tell();
  754. mgb_assert(fbs_end > cur);
  755. // Skip to Graph end
  756. m_file->skip(fbs_end - cur);
  757. return result;
  758. }
  759. std::unique_ptr<GraphDumper> make_fbs_dumper(std::unique_ptr<OutputFile> file) {
  760. return std::make_unique<GraphDumperOSS>(std::move(file));
  761. }
  762. std::unique_ptr<GraphLoader> make_fbs_loader(std::unique_ptr<InputFile> file) {
  763. return std::make_unique<GraphLoaderOSS>(std::move(file));
  764. }
  765. bool is_fbs_file(InputFile& file) {
  766. uint64_t magic_with_reserved = 0;
  767. file.read(&magic_with_reserved, sizeof(magic_with_reserved));
  768. file.skip(-sizeof(magic_with_reserved));
  769. return magic_with_reserved == MGB_MAGIC;
  770. }
  771. } // namespace serialization
  772. } // namespace mgb
  773. #endif

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台