You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

build_wheel.sh 3.7 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. #!/bin/bash -e
  2. CWD=$(dirname $0)
  3. BASEDIR=$(readlink -f ${CWD}/../../..)
  4. OUTPUTDIR=$(readlink -f ${CWD}/output)
  5. USERID=$(id -u)
  6. TMPFS_ARGS="--tmpfs /tmp:exec"
  7. echo ${BASEDIR}
  8. pushd ${BASEDIR}/third_party >/dev/null
  9. ./prepare.sh
  10. popd >/dev/null
  11. cd ${CWD}
  12. mkdir -p ${OUTPUTDIR}
  13. if [[ -z ${CUDA_ROOT_DIR} ]]; then
  14. echo "Environment variable CUDA_ROOT_DIR not set."
  15. exit -1
  16. fi
  17. if [[ -z ${CUDNN_ROOT_DIR} ]]; then
  18. echo "Environment variable CUDNN_ROOT_DIR not set."
  19. exit -1
  20. fi
  21. if [[ -z ${TENSORRT_ROOT_DIR} ]]; then
  22. echo "Environment variable TENSORRT_ROOT_DIR not set."
  23. exit -1
  24. fi
  25. ## YOU SHOULD MODIFY CUDA VERSION AS BELOW WHEN UPGRADE
  26. REQUIR_CUDA_VERSION="10010"
  27. REQUIR_CUDNN_VERSION="7.6.3"
  28. REQUIR_TENSORRT_VERSION="6.0.1.5"
  29. CUDA_ROOT_DIR_=${CUDA_ROOT_DIR%*/}
  30. CUDNN_ROOT_DIR_=${CUDNN_ROOT_DIR%*/}
  31. TENSORRT_ROOT_DIR_=${TENSORRT_ROOT_DIR%*/}
  32. CUDA_VERSION_PATH=${CUDA_ROOT_DIR_}/include/cuda.h
  33. CUDNN_VERSION_PATH=${CUDNN_ROOT_DIR_}/include/cudnn.h
  34. TENSORRT_VERSION_PATH=${TENSORRT_ROOT_DIR_}/include/NvInferVersion.h
  35. if [ ! -e $CUDA_VERSION_PATH ] ; then
  36. echo file $CUDA_VERSION_PATH is not exist
  37. echo please check the Environment must use CUDA-10.1 NO.$REQUIR_CUDA_VERSION
  38. exit -1
  39. fi
  40. if [ ! -e $CUDNN_VERSION_PATH ] ; then
  41. echo file $CUDNN_VERSION_PATH is not exist
  42. echo please check the Environment must use CUDNN-V$REQUIR_CUDNN_VERSION
  43. exit -1
  44. fi
  45. if [ ! -e $TENSORRT_VERSION_PATH ] ; then
  46. echo file $TENSORRT_VERSION_PATH is not exist
  47. echo please check the Environment must use TensorRT-$REQUIR_TENSORRT_VERSION
  48. exit -1
  49. fi
  50. CUDA_VERSION_CONTEXT=$(head -85 ${CUDA_VERSION_PATH})
  51. CUDNN_VERSION_CONTEXT=$(head -62 ${CUDNN_VERSION_PATH})
  52. TENSORRT_VERSION_CONTEXT=$(tail -12 ${TENSORRT_VERSION_PATH})
  53. CUDA_API_VERSION=$(echo $CUDA_VERSION_CONTEXT | grep -Eo "define __CUDA_API_VERSION * +([0-9]+)")
  54. CUDA_VERSION=${CUDA_API_VERSION:0-5}
  55. echo CUDA_VERSION:$CUDA_VERSION
  56. CUDNN_VERSION_MAJOR=$(echo $CUDNN_VERSION_CONTEXT | grep -Eo "define CUDNN_MAJOR * +([0-9]+)")
  57. CUDNN_VERSION_MINOR=$(echo $CUDNN_VERSION_CONTEXT | grep -Eo "define CUDNN_MINOR * +([0-9]+)")
  58. CUDNN_VERSION_PATCH=$(echo $CUDNN_VERSION_CONTEXT | grep -Eo "define CUDNN_PATCHLEVEL * +([0-9]+)")
  59. CUDNN_VERSION=${CUDNN_VERSION_MAJOR:0-1}.${CUDNN_VERSION_MINOR:0-1}.${CUDNN_VERSION_PATCH:0-1}
  60. echo CUDNN_VERSION:$CUDNN_VERSION
  61. TENSORRT_VERSION_MAJOR=$(echo $TENSORRT_VERSION_CONTEXT | grep -Eo "NV_TENSORRT_MAJOR * +([0-9]+)")
  62. TENSORRT_VERSION_MINOR=$(echo $TENSORRT_VERSION_CONTEXT | grep -Eo "NV_TENSORRT_MINOR * +([0-9]+)")
  63. TENSORRT_VERSION_PATCH=$(echo $TENSORRT_VERSION_CONTEXT | grep -Eo "NV_TENSORRT_PATCH * +([0-9]+)")
  64. TENSORRT_VERSION_BUILD=$(echo $TENSORRT_VERSION_CONTEXT | grep -Eo "NV_TENSORRT_BUILD * +([0-9]+)")
  65. TENSORRT_VERSION=${TENSORRT_VERSION_MAJOR:0-1}.${TENSORRT_VERSION_MINOR:0-1}.${TENSORRT_VERSION_PATCH:0-1}.${TENSORRT_VERSION_BUILD:0-1}
  66. echo TENSORRT_VERSION:$TENSORRT_VERSION
  67. if [ $CUDA_VERSION != $REQUIR_CUDA_VERSION ] ; then
  68. echo please check the Environment must use CUDA-10.1 NO.$REQUIR_CUDA_VERSION
  69. exit -1
  70. fi
  71. if [ $CUDNN_VERSION != $REQUIR_CUDNN_VERSION ] ; then
  72. echo please check the Environment must use CUDNN-V$REQUIR_CUDNN_VERSION
  73. exit -1
  74. fi
  75. if [ $TENSORRT_VERSION != $REQUIR_TENSORRT_VERSION ] ; then
  76. echo please check the Environment must use TENSORRT-$REQUIR_TENSORRT_VERSION
  77. exit -1
  78. fi
  79. docker run -it --rm $TMPFS_ARGS -e UID=${USERID} -e LOCAL_VERSION=${LOCAL_VERSION} -e ALL_PYTHON=${ALL_PYTHON} -v ${CUDA_ROOT_DIR}:/usr/local/cuda -v ${CUDNN_ROOT_DIR}:/opt/cudnn -v ${TENSORRT_ROOT_DIR}:/opt/tensorrt -v ${BASEDIR}:/home/code -v ${OUTPUTDIR}:/home/output:rw env_manylinux2010:latest /home/code/scripts/whl/manylinux2010/do_build.sh

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台