diff --git a/dnn/src/cuda/handle.cpp b/dnn/src/cuda/handle.cpp index 39bd56b3..f00006ea 100644 --- a/dnn/src/cuda/handle.cpp +++ b/dnn/src/cuda/handle.cpp @@ -52,7 +52,9 @@ HandleImpl::HandleImpl(megcoreComputingHandle_t comp_handle) // Get stream from MegCore computing handle. megdnn_assert( CUDNN_VERSION == cudnnGetVersion(), - "cudnn version mismatch: compiled with %d; detected %zu at runtime", + "cudnn version mismatch: compiled with %d; detected %zu at runtime, may " + "caused by customized environment, for example LD_LIBRARY_PATH on LINUX " + "and PATH on Windows!!", CUDNN_VERSION, cudnnGetVersion()); #if CUDA_VERSION >= 10010 megdnn_assert( diff --git a/src/tensorrt/impl/tensorrt_runtime_opr.cpp b/src/tensorrt/impl/tensorrt_runtime_opr.cpp index ef232d8c..f0849eb0 100644 --- a/src/tensorrt/impl/tensorrt_runtime_opr.cpp +++ b/src/tensorrt/impl/tensorrt_runtime_opr.cpp @@ -217,6 +217,12 @@ SymbolVarArray TensorRTRuntimeOpr::make( std::shared_ptr engine, std::shared_ptr gpu_allocator, const SymbolVarArray& src, const OperatorNodeConfig& config) { + mgb_assert( + NV_TENSORRT_VERSION == getInferLibVersion(), + "TensorRT version mismatch: compiled with %d; detected %d at runtime , may " + "caused by customized environment, for example LD_LIBRARY_PATH on LINUX " + "and PATH on Windows!!", + NV_TENSORRT_VERSION, getInferLibVersion()); VarNodeArray var_node_array = cg::to_var_node_array(src); auto tensor_rt_opr = std::make_unique( std::move(engine), std::move(gpu_allocator), var_node_array, config);