@@ -1,5 +1,6 @@ | |||||
file (GLOB_RECURSE SOURCES ./*.cpp) | file (GLOB_RECURSE SOURCES ./*.cpp) | ||||
add_executable(lite_examples ${SOURCES}) | add_executable(lite_examples ${SOURCES}) | ||||
target_include_directories(lite_examples PUBLIC ./) | |||||
if(LITE_BUILD_WITH_RKNPU) | if(LITE_BUILD_WITH_RKNPU) | ||||
#rknn sdk1.0.0 depend on libc++_shared, use gold to remove NEEDED so symbol check | #rknn sdk1.0.0 depend on libc++_shared, use gold to remove NEEDED so symbol check | ||||
@@ -33,6 +34,7 @@ if(LITE_BUILD_WITH_RKNPU) | |||||
endif() | endif() | ||||
target_link_libraries(lite_examples_depends_shared lite_shared) | target_link_libraries(lite_examples_depends_shared lite_shared) | ||||
target_include_directories(lite_examples_depends_shared PUBLIC ./) | |||||
if(UNIX) | if(UNIX) | ||||
if(APPLE OR ANDROID) | if(APPLE OR ANDROID) | ||||
@@ -49,57 +49,20 @@ ExampleFuncMap* get_example_function_map(); | |||||
bool register_example(std::string example_name, const ExampleFunc& fuction); | bool register_example(std::string example_name, const ExampleFunc& fuction); | ||||
template <int> | |||||
struct Register; | |||||
#if LITE_BUILD_WITH_MGE | |||||
bool basic_load_from_path(const Args& args); | |||||
bool basic_load_from_path_with_loader(const Args& args); | |||||
bool basic_load_from_memory(const Args& args); | |||||
bool cpu_affinity(const Args& args); | |||||
bool network_share_same_weights(const Args& args); | |||||
bool reset_input(const Args& args); | |||||
bool reset_input_output(const Args& args); | |||||
bool config_user_allocator(const Args& args); | |||||
bool register_cryption_method(const Args& args); | |||||
bool update_cryption_key(const Args& args); | |||||
bool async_forward(const Args& args); | |||||
bool set_input_callback(const Args& arg); | |||||
bool set_output_callback(const Args& arg); | |||||
bool picture_classification(const Args& arg); | |||||
bool detect_yolox(const Args& arg); | |||||
#if LITE_WITH_CUDA | |||||
bool load_from_path_run_cuda(const Args& args); | |||||
bool device_input(const Args& args); | |||||
bool device_input_output(const Args& args); | |||||
bool pinned_host_input(const Args& args); | |||||
#endif | |||||
#endif | |||||
} // namespace example | } // namespace example | ||||
} // namespace lite | } // namespace lite | ||||
#if LITE_BUILD_WITH_MGE | |||||
bool basic_c_interface(const lite::example::Args& args); | |||||
bool device_io_c_interface(const lite::example::Args& args); | |||||
bool async_c_interface(const lite::example::Args& args); | |||||
#endif | |||||
#define CONCAT_IMPL(a, b) a##b | #define CONCAT_IMPL(a, b) a##b | ||||
#define MACRO_CONCAT(a, b) CONCAT_IMPL(a, b) | #define MACRO_CONCAT(a, b) CONCAT_IMPL(a, b) | ||||
#define REGIST_EXAMPLE(name_, func_) REGIST_EXAMPLE_WITH_NUM(__COUNTER__, name_, func_) | #define REGIST_EXAMPLE(name_, func_) REGIST_EXAMPLE_WITH_NUM(__COUNTER__, name_, func_) | ||||
#define REGIST_EXAMPLE_WITH_NUM(number_, name_, func_) \ | |||||
template <> \ | |||||
struct Register<number_> { \ | |||||
Register() { register_example(name_, func_); } \ | |||||
}; \ | |||||
namespace { \ | |||||
Register<number_> MACRO_CONCAT(example_function_, number_); \ | |||||
#define REGIST_EXAMPLE_WITH_NUM(number_, name_, func_) \ | |||||
struct Register_##func_ { \ | |||||
Register_##func_() { lite::example::register_example(name_, func_); } \ | |||||
}; \ | |||||
namespace { \ | |||||
Register_##func_ MACRO_CONCAT(func_, number_); \ | |||||
} | } | ||||
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} | // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} |
@@ -60,7 +60,8 @@ bool lite::example::register_example( | |||||
std::string example_name, const ExampleFunc& fuction) { | std::string example_name, const ExampleFunc& fuction) { | ||||
auto map = get_example_function_map(); | auto map = get_example_function_map(); | ||||
if (map->find(example_name) != map->end()) { | if (map->find(example_name) != map->end()) { | ||||
printf("Error!!! This example is registed yet\n"); | |||||
printf("example_name: %s Error!!! This example is registed yet\n", | |||||
example_name.c_str()); | |||||
return false; | return false; | ||||
} | } | ||||
(*map)[example_name] = fuction; | (*map)[example_name] = fuction; | ||||
@@ -142,41 +143,5 @@ int main(int argc, char** argv) { | |||||
return -1; | return -1; | ||||
} | } | ||||
} | } | ||||
namespace lite { | |||||
namespace example { | |||||
#if LITE_BUILD_WITH_MGE | |||||
#if LITE_WITH_CUDA | |||||
REGIST_EXAMPLE("load_from_path_run_cuda", load_from_path_run_cuda); | |||||
#endif | |||||
REGIST_EXAMPLE("basic_load_from_path", basic_load_from_path); | |||||
REGIST_EXAMPLE("basic_load_from_path_with_loader", basic_load_from_path_with_loader); | |||||
REGIST_EXAMPLE("basic_load_from_memory", basic_load_from_memory); | |||||
REGIST_EXAMPLE("cpu_affinity", cpu_affinity); | |||||
REGIST_EXAMPLE("register_cryption_method", register_cryption_method); | |||||
REGIST_EXAMPLE("update_cryption_key", update_cryption_key); | |||||
REGIST_EXAMPLE("network_share_same_weights", network_share_same_weights); | |||||
REGIST_EXAMPLE("reset_input", reset_input); | |||||
REGIST_EXAMPLE("reset_input_output", reset_input_output); | |||||
REGIST_EXAMPLE("config_user_allocator", config_user_allocator); | |||||
REGIST_EXAMPLE("async_forward", async_forward); | |||||
REGIST_EXAMPLE("set_input_callback", set_input_callback); | |||||
REGIST_EXAMPLE("set_output_callback", set_output_callback); | |||||
REGIST_EXAMPLE("basic_c_interface", basic_c_interface); | |||||
REGIST_EXAMPLE("device_io_c_interface", device_io_c_interface); | |||||
REGIST_EXAMPLE("async_c_interface", async_c_interface); | |||||
REGIST_EXAMPLE("picture_classification", picture_classification); | |||||
REGIST_EXAMPLE("detect_yolox", detect_yolox); | |||||
#if LITE_WITH_CUDA | |||||
REGIST_EXAMPLE("device_input", device_input); | |||||
REGIST_EXAMPLE("device_input_output", device_input_output); | |||||
REGIST_EXAMPLE("pinned_host_input", pinned_host_input); | |||||
#endif | |||||
#endif | |||||
} // namespace example | |||||
} // namespace lite | |||||
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} | // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} |
@@ -10,7 +10,7 @@ | |||||
*/ | */ | ||||
#include <thread> | #include <thread> | ||||
#include "../example.h" | |||||
#include "example.h" | |||||
#if LITE_BUILD_WITH_MGE | #if LITE_BUILD_WITH_MGE | ||||
#include <cstdio> | #include <cstdio> | ||||
@@ -77,61 +77,8 @@ void output_data_info(std::shared_ptr<Network> network, size_t output_size) { | |||||
} | } | ||||
} // namespace | } // namespace | ||||
#if LITE_WITH_CUDA | |||||
bool lite::example::load_from_path_run_cuda(const Args& args) { | |||||
std::string network_path = args.model_path; | |||||
std::string input_path = args.input_path; | |||||
set_log_level(LiteLogLevel::DEBUG); | |||||
//! config the network running in CUDA device | |||||
lite::Config config{false, -1, LiteDeviceType::LITE_CUDA}; | |||||
//! set NetworkIO | |||||
NetworkIO network_io; | |||||
std::string input_name = "img0_comp_fullface"; | |||||
bool is_host = false; | |||||
IO device_input{input_name, is_host}; | |||||
network_io.inputs.push_back(device_input); | |||||
//! create and load the network | |||||
std::shared_ptr<Network> network = std::make_shared<Network>(config, network_io); | |||||
network->load_model(network_path); | |||||
std::shared_ptr<Tensor> input_tensor = network->get_input_tensor(0); | |||||
Layout input_layout = input_tensor->get_layout(); | |||||
//! read data from numpy data file | |||||
auto src_tensor = parse_npy(input_path); | |||||
//! malloc the device memory | |||||
auto tensor_device = Tensor(LiteDeviceType::LITE_CUDA, input_layout); | |||||
//! copy to the device memory | |||||
tensor_device.copy_from(*src_tensor); | |||||
//! Now the device memory if filled with user input data, set it to the | |||||
//! input tensor | |||||
input_tensor->reset(tensor_device.get_memory_ptr(), input_layout); | |||||
//! forward | |||||
{ | |||||
lite::Timer ltimer("warmup"); | |||||
network->forward(); | |||||
network->wait(); | |||||
ltimer.print_used_time(0); | |||||
} | |||||
lite::Timer ltimer("forward_iter"); | |||||
for (int i = 0; i < 10; i++) { | |||||
ltimer.reset_start(); | |||||
network->forward(); | |||||
network->wait(); | |||||
ltimer.print_used_time(i); | |||||
} | |||||
//! get the output data or read tensor set in network_in | |||||
size_t output_size = network->get_all_output_name().size(); | |||||
output_info(network, output_size); | |||||
output_data_info(network, output_size); | |||||
return true; | |||||
} | |||||
#endif | |||||
bool lite::example::basic_load_from_path(const Args& args) { | |||||
namespace { | |||||
bool basic_load_from_path(const Args& args) { | |||||
set_log_level(LiteLogLevel::DEBUG); | set_log_level(LiteLogLevel::DEBUG); | ||||
std::string network_path = args.model_path; | std::string network_path = args.model_path; | ||||
std::string input_path = args.input_path; | std::string input_path = args.input_path; | ||||
@@ -193,7 +140,7 @@ bool lite::example::basic_load_from_path(const Args& args) { | |||||
return true; | return true; | ||||
} | } | ||||
bool lite::example::basic_load_from_path_with_loader(const Args& args) { | |||||
bool basic_load_from_path_with_loader(const Args& args) { | |||||
set_log_level(LiteLogLevel::DEBUG); | set_log_level(LiteLogLevel::DEBUG); | ||||
lite::set_loader_lib_path(args.loader_path); | lite::set_loader_lib_path(args.loader_path); | ||||
std::string network_path = args.model_path; | std::string network_path = args.model_path; | ||||
@@ -251,7 +198,7 @@ bool lite::example::basic_load_from_path_with_loader(const Args& args) { | |||||
return true; | return true; | ||||
} | } | ||||
bool lite::example::basic_load_from_memory(const Args& args) { | |||||
bool basic_load_from_memory(const Args& args) { | |||||
std::string network_path = args.model_path; | std::string network_path = args.model_path; | ||||
std::string input_path = args.input_path; | std::string input_path = args.input_path; | ||||
@@ -307,7 +254,7 @@ bool lite::example::basic_load_from_memory(const Args& args) { | |||||
return true; | return true; | ||||
} | } | ||||
bool lite::example::async_forward(const Args& args) { | |||||
bool async_forward(const Args& args) { | |||||
std::string network_path = args.model_path; | std::string network_path = args.model_path; | ||||
std::string input_path = args.input_path; | std::string input_path = args.input_path; | ||||
Config config; | Config config; | ||||
@@ -366,7 +313,7 @@ bool lite::example::async_forward(const Args& args) { | |||||
return true; | return true; | ||||
} | } | ||||
bool lite::example::set_input_callback(const Args& args) { | |||||
bool set_input_callback(const Args& args) { | |||||
std::string network_path = args.model_path; | std::string network_path = args.model_path; | ||||
std::string input_path = args.input_path; | std::string input_path = args.input_path; | ||||
Config config; | Config config; | ||||
@@ -433,7 +380,7 @@ bool lite::example::set_input_callback(const Args& args) { | |||||
return true; | return true; | ||||
} | } | ||||
bool lite::example::set_output_callback(const Args& args) { | |||||
bool set_output_callback(const Args& args) { | |||||
std::string network_path = args.model_path; | std::string network_path = args.model_path; | ||||
std::string input_path = args.input_path; | std::string input_path = args.input_path; | ||||
Config config; | Config config; | ||||
@@ -500,7 +447,73 @@ bool lite::example::set_output_callback(const Args& args) { | |||||
printf("max=%e, sum=%e\n", max, sum); | printf("max=%e, sum=%e\n", max, sum); | ||||
return true; | return true; | ||||
} | } | ||||
} // namespace | |||||
REGIST_EXAMPLE("basic_load_from_path", basic_load_from_path); | |||||
REGIST_EXAMPLE("basic_load_from_path_with_loader", basic_load_from_path_with_loader); | |||||
REGIST_EXAMPLE("basic_load_from_memory", basic_load_from_memory); | |||||
REGIST_EXAMPLE("async_forward", async_forward); | |||||
REGIST_EXAMPLE("set_input_callback", set_input_callback); | |||||
REGIST_EXAMPLE("set_output_callback", set_output_callback); | |||||
#if LITE_WITH_CUDA | |||||
namespace { | |||||
bool load_from_path_run_cuda(const Args& args) { | |||||
std::string network_path = args.model_path; | |||||
std::string input_path = args.input_path; | |||||
set_log_level(LiteLogLevel::DEBUG); | |||||
//! config the network running in CUDA device | |||||
lite::Config config{false, -1, LiteDeviceType::LITE_CUDA}; | |||||
//! set NetworkIO | |||||
NetworkIO network_io; | |||||
std::string input_name = "img0_comp_fullface"; | |||||
bool is_host = false; | |||||
IO device_input{input_name, is_host}; | |||||
network_io.inputs.push_back(device_input); | |||||
//! create and load the network | |||||
std::shared_ptr<Network> network = std::make_shared<Network>(config, network_io); | |||||
network->load_model(network_path); | |||||
std::shared_ptr<Tensor> input_tensor = network->get_input_tensor(0); | |||||
Layout input_layout = input_tensor->get_layout(); | |||||
//! read data from numpy data file | |||||
auto src_tensor = parse_npy(input_path); | |||||
//! malloc the device memory | |||||
auto tensor_device = Tensor(LiteDeviceType::LITE_CUDA, input_layout); | |||||
//! copy to the device memory | |||||
tensor_device.copy_from(*src_tensor); | |||||
//! Now the device memory if filled with user input data, set it to the | |||||
//! input tensor | |||||
input_tensor->reset(tensor_device.get_memory_ptr(), input_layout); | |||||
//! forward | |||||
{ | |||||
lite::Timer ltimer("warmup"); | |||||
network->forward(); | |||||
network->wait(); | |||||
ltimer.print_used_time(0); | |||||
} | |||||
lite::Timer ltimer("forward_iter"); | |||||
for (int i = 0; i < 10; i++) { | |||||
ltimer.reset_start(); | |||||
network->forward(); | |||||
network->wait(); | |||||
ltimer.print_used_time(i); | |||||
} | |||||
//! get the output data or read tensor set in network_in | |||||
size_t output_size = network->get_all_output_name().size(); | |||||
output_info(network, output_size); | |||||
output_data_info(network, output_size); | |||||
return true; | |||||
} | |||||
} // namespace | |||||
REGIST_EXAMPLE("load_from_path_run_cuda", load_from_path_run_cuda); | |||||
#endif | |||||
#endif | #endif | ||||
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} | // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} |
@@ -9,13 +9,14 @@ | |||||
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
*/ | */ | ||||
#include "../example.h" | |||||
#include "example.h" | |||||
#if LITE_BUILD_WITH_MGE | #if LITE_BUILD_WITH_MGE | ||||
using namespace lite; | using namespace lite; | ||||
using namespace example; | using namespace example; | ||||
bool lite::example::cpu_affinity(const Args& args) { | |||||
namespace { | |||||
bool cpu_affinity(const Args& args) { | |||||
std::string network_path = args.model_path; | std::string network_path = args.model_path; | ||||
std::string input_path = args.input_path; | std::string input_path = args.input_path; | ||||
@@ -65,6 +66,9 @@ bool lite::example::cpu_affinity(const Args& args) { | |||||
printf("max=%e, sum=%e\n", max, sum); | printf("max=%e, sum=%e\n", max, sum); | ||||
return true; | return true; | ||||
} | } | ||||
} // namespace | |||||
REGIST_EXAMPLE("cpu_affinity", cpu_affinity); | |||||
#endif | #endif | ||||
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} | // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} |
@@ -10,7 +10,7 @@ | |||||
*/ | */ | ||||
#include <thread> | #include <thread> | ||||
#include "../../example.h" | |||||
#include "example.h" | |||||
#if LITE_BUILD_WITH_MGE | #if LITE_BUILD_WITH_MGE | ||||
#include <cstdio> | #include <cstdio> | ||||
@@ -289,6 +289,10 @@ void decode_outputs( | |||||
void draw_objects( | void draw_objects( | ||||
uint8_t* image, int width, int height, int channel, | uint8_t* image, int width, int height, int channel, | ||||
const std::vector<Object>& objects) { | const std::vector<Object>& objects) { | ||||
(void)image; | |||||
(void)width; | |||||
(void)height; | |||||
(void)channel; | |||||
for (size_t i = 0; i < objects.size(); i++) { | for (size_t i = 0; i < objects.size(); i++) { | ||||
const Object& obj = objects[i]; | const Object& obj = objects[i]; | ||||
@@ -297,9 +301,7 @@ void draw_objects( | |||||
} | } | ||||
} | } | ||||
} // namespace | |||||
bool lite::example::detect_yolox(const Args& args) { | |||||
bool detect_yolox(const Args& args) { | |||||
std::string network_path = args.model_path; | std::string network_path = args.model_path; | ||||
std::string input_path = args.input_path; | std::string input_path = args.input_path; | ||||
@@ -332,6 +334,9 @@ bool lite::example::detect_yolox(const Args& args) { | |||||
stbi_image_free(image); | stbi_image_free(image); | ||||
return 0; | return 0; | ||||
} | } | ||||
} // namespace | |||||
REGIST_EXAMPLE("detect_yolox", detect_yolox); | |||||
#endif | #endif | ||||
@@ -10,7 +10,7 @@ | |||||
*/ | */ | ||||
#include <thread> | #include <thread> | ||||
#include "../../example.h" | |||||
#include "example.h" | |||||
#if LITE_BUILD_WITH_MGE | #if LITE_BUILD_WITH_MGE | ||||
#include <cstdio> | #include <cstdio> | ||||
@@ -80,9 +80,8 @@ void classfication_process( | |||||
} | } | ||||
printf("output tensor sum is %f\n", sum); | printf("output tensor sum is %f\n", sum); | ||||
} | } | ||||
} // namespace | |||||
bool lite::example::picture_classification(const Args& args) { | |||||
bool picture_classification(const Args& args) { | |||||
std::string network_path = args.model_path; | std::string network_path = args.model_path; | ||||
std::string input_path = args.input_path; | std::string input_path = args.input_path; | ||||
@@ -109,6 +108,9 @@ bool lite::example::picture_classification(const Args& args) { | |||||
class_id, score); | class_id, score); | ||||
return 0; | return 0; | ||||
} | } | ||||
} // namespace | |||||
REGIST_EXAMPLE("picture_classification", picture_classification); | |||||
#endif | #endif | ||||
@@ -10,15 +10,17 @@ | |||||
*/ | */ | ||||
#include <thread> | #include <thread> | ||||
#include "../example.h" | |||||
#include "example.h" | |||||
#if LITE_BUILD_WITH_MGE | #if LITE_BUILD_WITH_MGE | ||||
#include "misc.h" | |||||
using namespace lite; | using namespace lite; | ||||
using namespace example; | using namespace example; | ||||
#if LITE_WITH_CUDA | #if LITE_WITH_CUDA | ||||
bool lite::example::device_input(const Args& args) { | |||||
namespace { | |||||
bool device_input(const Args& args) { | |||||
std::string network_path = args.model_path; | std::string network_path = args.model_path; | ||||
std::string input_path = args.input_path; | std::string input_path = args.input_path; | ||||
@@ -73,7 +75,7 @@ bool lite::example::device_input(const Args& args) { | |||||
return true; | return true; | ||||
} | } | ||||
bool lite::example::device_input_output(const Args& args) { | |||||
bool device_input_output(const Args& args) { | |||||
std::string network_path = args.model_path; | std::string network_path = args.model_path; | ||||
std::string input_path = args.input_path; | std::string input_path = args.input_path; | ||||
@@ -136,7 +138,7 @@ bool lite::example::device_input_output(const Args& args) { | |||||
return true; | return true; | ||||
} | } | ||||
bool lite::example::pinned_host_input(const Args& args) { | |||||
bool pinned_host_input(const Args& args) { | |||||
std::string network_path = args.model_path; | std::string network_path = args.model_path; | ||||
std::string input_path = args.input_path; | std::string input_path = args.input_path; | ||||
@@ -181,6 +183,11 @@ bool lite::example::pinned_host_input(const Args& args) { | |||||
printf("max=%e, sum=%e\n", max, sum); | printf("max=%e, sum=%e\n", max, sum); | ||||
return true; | return true; | ||||
} | } | ||||
} // namespace | |||||
REGIST_EXAMPLE("device_input", device_input); | |||||
REGIST_EXAMPLE("device_input_output", device_input_output); | |||||
REGIST_EXAMPLE("pinned_host_input", pinned_host_input); | |||||
#endif | #endif | ||||
#endif | #endif | ||||
@@ -9,7 +9,7 @@ | |||||
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
*/ | */ | ||||
#include "../example.h" | |||||
#include "example.h" | |||||
#include "misc.h" | #include "misc.h" | ||||
#if LITE_BUILD_WITH_MGE | #if LITE_BUILD_WITH_MGE | ||||
#include "lite-c/global_c.h" | #include "lite-c/global_c.h" | ||||
@@ -218,5 +218,10 @@ bool async_c_interface(const lite::example::Args& args) { | |||||
printf("max=%e, sum=%e\n", max, sum); | printf("max=%e, sum=%e\n", max, sum); | ||||
return true; | return true; | ||||
} | } | ||||
REGIST_EXAMPLE("basic_c_interface", basic_c_interface); | |||||
REGIST_EXAMPLE("device_io_c_interface", device_io_c_interface); | |||||
REGIST_EXAMPLE("async_c_interface", async_c_interface); | |||||
#endif | #endif | ||||
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} | // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} |
@@ -9,13 +9,15 @@ | |||||
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
*/ | */ | ||||
#include "../example.h" | |||||
#include "example.h" | |||||
#if LITE_BUILD_WITH_MGE | #if LITE_BUILD_WITH_MGE | ||||
using namespace lite; | using namespace lite; | ||||
using namespace example; | using namespace example; | ||||
bool lite::example::network_share_same_weights(const Args& args) { | |||||
namespace { | |||||
bool network_share_same_weights(const Args& args) { | |||||
std::string network_path = args.model_path; | std::string network_path = args.model_path; | ||||
std::string input_path = args.input_path; | std::string input_path = args.input_path; | ||||
@@ -75,5 +77,9 @@ bool lite::example::network_share_same_weights(const Args& args) { | |||||
printf("max=%e, sum=%e\n", max, sum); | printf("max=%e, sum=%e\n", max, sum); | ||||
return true; | return true; | ||||
} | } | ||||
} // namespace | |||||
REGIST_EXAMPLE("network_share_same_weights", network_share_same_weights); | |||||
#endif | #endif | ||||
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} | // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} |
@@ -9,13 +9,15 @@ | |||||
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
*/ | */ | ||||
#include "../example.h" | |||||
#include "example.h" | |||||
#if LITE_BUILD_WITH_MGE | #if LITE_BUILD_WITH_MGE | ||||
using namespace lite; | using namespace lite; | ||||
using namespace example; | using namespace example; | ||||
bool lite::example::reset_input(const Args& args) { | |||||
namespace { | |||||
bool reset_input(const Args& args) { | |||||
std::string network_path = args.model_path; | std::string network_path = args.model_path; | ||||
std::string input_path = args.input_path; | std::string input_path = args.input_path; | ||||
lite::Config config; | lite::Config config; | ||||
@@ -53,7 +55,7 @@ bool lite::example::reset_input(const Args& args) { | |||||
return true; | return true; | ||||
} | } | ||||
bool lite::example::reset_input_output(const Args& args) { | |||||
bool reset_input_output(const Args& args) { | |||||
std::string network_path = args.model_path; | std::string network_path = args.model_path; | ||||
std::string input_path = args.input_path; | std::string input_path = args.input_path; | ||||
lite::Config config; | lite::Config config; | ||||
@@ -92,5 +94,10 @@ bool lite::example::reset_input_output(const Args& args) { | |||||
printf("max=%e, sum=%e\n", max, sum); | printf("max=%e, sum=%e\n", max, sum); | ||||
return true; | return true; | ||||
} | } | ||||
} // namespace | |||||
REGIST_EXAMPLE("reset_input", reset_input); | |||||
REGIST_EXAMPLE("reset_input_output", reset_input_output); | |||||
#endif | #endif | ||||
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} | // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} |
@@ -9,7 +9,7 @@ | |||||
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
*/ | */ | ||||
#include "../example.h" | |||||
#include "example.h" | |||||
#if LITE_BUILD_WITH_MGE | #if LITE_BUILD_WITH_MGE | ||||
using namespace lite; | using namespace lite; | ||||
using namespace example; | using namespace example; | ||||
@@ -42,9 +42,8 @@ public: | |||||
#endif | #endif | ||||
}; | }; | ||||
}; | }; | ||||
} // namespace | |||||
bool lite::example::config_user_allocator(const Args& args) { | |||||
bool config_user_allocator(const Args& args) { | |||||
std::string network_path = args.model_path; | std::string network_path = args.model_path; | ||||
std::string input_path = args.input_path; | std::string input_path = args.input_path; | ||||
@@ -87,5 +86,9 @@ bool lite::example::config_user_allocator(const Args& args) { | |||||
printf("max=%e, sum=%e\n", max, sum); | printf("max=%e, sum=%e\n", max, sum); | ||||
return true; | return true; | ||||
} | } | ||||
} // namespace | |||||
REGIST_EXAMPLE("config_user_allocator", config_user_allocator); | |||||
#endif | #endif | ||||
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} | // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} |
@@ -9,7 +9,7 @@ | |||||
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
*/ | */ | ||||
#include "../example.h" | |||||
#include "example.h" | |||||
#if LITE_BUILD_WITH_MGE | #if LITE_BUILD_WITH_MGE | ||||
using namespace lite; | using namespace lite; | ||||
@@ -31,9 +31,8 @@ std::vector<uint8_t> decrypt_model( | |||||
return {}; | return {}; | ||||
} | } | ||||
} | } | ||||
} // namespace | |||||
bool lite::example::register_cryption_method(const Args& args) { | |||||
bool register_cryption_method(const Args& args) { | |||||
std::string network_path = args.model_path; | std::string network_path = args.model_path; | ||||
std::string input_path = args.input_path; | std::string input_path = args.input_path; | ||||
@@ -75,7 +74,7 @@ bool lite::example::register_cryption_method(const Args& args) { | |||||
return true; | return true; | ||||
} | } | ||||
bool lite::example::update_cryption_key(const Args& args) { | |||||
bool update_cryption_key(const Args& args) { | |||||
std::string network_path = args.model_path; | std::string network_path = args.model_path; | ||||
std::string input_path = args.input_path; | std::string input_path = args.input_path; | ||||
@@ -120,5 +119,9 @@ bool lite::example::update_cryption_key(const Args& args) { | |||||
printf("max=%e, sum=%e\n", max, sum); | printf("max=%e, sum=%e\n", max, sum); | ||||
return true; | return true; | ||||
} | } | ||||
} // namespace | |||||
REGIST_EXAMPLE("register_cryption_method", register_cryption_method); | |||||
REGIST_EXAMPLE("update_cryption_key", update_cryption_key); | |||||
#endif | #endif | ||||
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} | // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} |