You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

build_wheel_common.sh 9.0 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239
  1. #!/bin/bash
  2. set -ex
  3. CWD=$(dirname $0)
  4. BASEDIR=$(readlink -f ${CWD}/../../..)
  5. OUTPUTDIR=$(readlink -f ${CWD}/output)
  6. USERID=$(id -u)
  7. TMPFS_ARGS="--tmpfs /tmp:exec"
  8. local_path=$(dirname $(readlink -f $0))
  9. CUDNN_LIB_DIR="/opt/cudnn/lib64/"
  10. CUDA_LIB_DIR="/usr/local/cuda/lib64/"
  11. SDK_NAME="unknown"
  12. function usage() {
  13. echo "use '-sdk cu111' to specify cuda toolkit config, also support cu101, cu112, cpu"
  14. }
  15. while [ "$1" != "" ]; do
  16. case $1 in
  17. -sdk)
  18. shift
  19. SDK_NAME=$1
  20. shift
  21. ;;
  22. *)
  23. usage
  24. exit 1
  25. esac
  26. done
  27. echo "Build with ${SDK_NAME}"
  28. if [ $SDK_NAME == "cu101" ];then
  29. CUDA_COPY_LIB_LIST="${CUDA_LIB_DIR}/libnvrtc.so.10.1"
  30. EXTRA_CMAKE_FLAG=" -DMGE_WITH_CUDNN_SHARED=OFF -DMGE_WITH_CUBLAS_SHARED=OFF"
  31. BUILD_GCC8="ON"
  32. REQUIR_CUDA_VERSION="10010"
  33. REQUIR_CUDNN_VERSION="7.6.3"
  34. REQUIR_TENSORRT_VERSION="6.0.1.5"
  35. REQUIR_CUBLAS_VERSION="10.2.1.243"
  36. elif [ $SDK_NAME == "cu111" ];then
  37. CUDA_COPY_LIB_LIST="\
  38. ${CUDA_LIB_DIR}/libnvrtc.so.11.1:\
  39. ${CUDA_LIB_DIR}/libcublasLt.so.11:\
  40. ${CUDA_LIB_DIR}/libcublas.so.11:\
  41. ${CUDNN_LIB_DIR}/libcudnn_adv_infer.so.8:\
  42. ${CUDNN_LIB_DIR}/libcudnn_adv_train.so.8:\
  43. ${CUDNN_LIB_DIR}/libcudnn_cnn_infer.so.8:\
  44. ${CUDNN_LIB_DIR}/libcudnn_cnn_train.so.8:\
  45. ${CUDNN_LIB_DIR}/libcudnn_ops_infer.so.8:\
  46. ${CUDNN_LIB_DIR}/libcudnn_ops_train.so.8:\
  47. ${CUDNN_LIB_DIR}/libcudnn.so.8"
  48. EXTRA_CMAKE_FLAG=" -DMGE_WITH_CUDNN_SHARED=ON -DMGE_WITH_CUBLAS_SHARED=ON \
  49. -gencode arch=compute_61,code=sm_61 \
  50. arch=compute_70,code=sm_70 \
  51. arch=compute_75,code=sm_75 \
  52. arch=compute_80,code=sm_80 \
  53. arch=compute_86,code=sm_86 \
  54. arch=compute_86,code=compute_86"
  55. REQUIR_CUDA_VERSION="11010"
  56. REQUIR_CUDNN_VERSION="8.0.4"
  57. REQUIR_TENSORRT_VERSION="7.2.2.3"
  58. REQUIR_CUBLAS_VERSION="11.2.1.74"
  59. elif [ $SDK_NAME == "cu112" ];then
  60. CUDA_COPY_LIB_LIST="\
  61. ${CUDA_LIB_DIR}/libnvrtc.so.11.2:\
  62. ${CUDA_LIB_DIR}/libcublasLt.so.11:\
  63. ${CUDA_LIB_DIR}/libcublas.so.11:\
  64. ${CUDNN_LIB_DIR}/libcudnn_adv_infer.so.8:\
  65. ${CUDNN_LIB_DIR}/libcudnn_adv_train.so.8:\
  66. ${CUDNN_LIB_DIR}/libcudnn_cnn_infer.so.8:\
  67. ${CUDNN_LIB_DIR}/libcudnn_cnn_train.so.8:\
  68. ${CUDNN_LIB_DIR}/libcudnn_ops_infer.so.8:\
  69. ${CUDNN_LIB_DIR}/libcudnn_ops_train.so.8:\
  70. ${CUDNN_LIB_DIR}/libcudnn.so.8"
  71. EXTRA_CMAKE_FLAG=" -DMGE_WITH_CUDNN_SHARED=ON -DMGE_WITH_CUBLAS_SHARED=ON \
  72. -gencode arch=compute_61,code=sm_61 \
  73. arch=compute_70,code=sm_70 \
  74. arch=compute_75,code=sm_75 \
  75. arch=compute_80,code=sm_80 \
  76. arch=compute_86,code=sm_86 \
  77. arch=compute_86,code=compute_86"
  78. REQUIR_CUDA_VERSION="11020"
  79. REQUIR_CUDNN_VERSION="8.0.4"
  80. REQUIR_TENSORRT_VERSION="7.2.2.3"
  81. REQUIR_CUBLAS_VERSION="11.3.1.68"
  82. elif [ $SDK_NAME == "cpu" ];then
  83. echo "use $SDK_NAME without cuda support"
  84. BUILD_WHL_CPU_ONLY="ON"
  85. else
  86. echo "no support sdk ${SDK_NAME}, please set by '-sdk cu111'"
  87. exit -1
  88. fi
  89. if [[ -z ${BUILD_WHL_CPU_ONLY} ]]
  90. then
  91. BUILD_WHL_CPU_ONLY="OFF"
  92. fi
  93. echo ${BASEDIR}
  94. pushd ${BASEDIR}/third_party >/dev/null
  95. ./prepare.sh
  96. popd >/dev/null
  97. cd ${CWD}
  98. mkdir -p ${OUTPUTDIR}
  99. if [ ${BUILD_WHL_CPU_ONLY} = "OFF" ]; then
  100. if [[ -z ${CUDA_ROOT_DIR} ]]; then
  101. echo "Environment variable CUDA_ROOT_DIR not set."
  102. exit -1
  103. fi
  104. if [[ -z ${CUDNN_ROOT_DIR} ]]; then
  105. echo "Environment variable CUDNN_ROOT_DIR not set."
  106. exit -1
  107. fi
  108. if [[ -z ${TENSORRT_ROOT_DIR} ]]; then
  109. echo "Environment variable TENSORRT_ROOT_DIR not set."
  110. exit -1
  111. fi
  112. ## YOU SHOULD MODIFY CUDA VERSION AS BELOW WHEN UPGRADE
  113. CUDA_ROOT_DIR_=${CUDA_ROOT_DIR%*/}
  114. CUDNN_ROOT_DIR_=${CUDNN_ROOT_DIR%*/}
  115. TENSORRT_ROOT_DIR_=${TENSORRT_ROOT_DIR%*/}
  116. CUBLAS_VERSION_PATH=${CUDA_ROOT_DIR_}/include/cublas_api.h
  117. CUDA_VERSION_PATH=${CUDA_ROOT_DIR_}/include/cuda.h
  118. if [ "$REQUIR_CUDA_VERSION" -ge "11000" ];then
  119. CUDNN_VERSION_PATH=${CUDNN_ROOT_DIR_}/include/cudnn_version.h
  120. else
  121. CUDNN_VERSION_PATH=${CUDNN_ROOT_DIR_}/include/cudnn.h
  122. fi
  123. TENSORRT_VERSION_PATH=${TENSORRT_ROOT_DIR_}/include/NvInferVersion.h
  124. if [ ! -e $CUDA_VERSION_PATH ] ; then
  125. echo file $CUDA_VERSION_PATH is not exist
  126. echo please check the Environment must use CUDA-$REQUIR_CUDA_VERSION
  127. exit -1
  128. fi
  129. if [ ! -e $CUDNN_VERSION_PATH ] ; then
  130. echo file $CUDNN_VERSION_PATH is not exist
  131. echo please check the Environment must use CUDNN-V$REQUIR_CUDNN_VERSION
  132. exit -1
  133. fi
  134. if [ ! -e $TENSORRT_VERSION_PATH ] ; then
  135. echo file $TENSORRT_VERSION_PATH is not exist
  136. echo please check the Environment must use TensorRT-$REQUIR_TENSORRT_VERSION
  137. exit -1
  138. fi
  139. if [ ! -e $CUBLAS_VERSION_PATH ] ; then
  140. echo file $CUBLAS_VERSION_PATH is not exist
  141. exit -1
  142. fi
  143. CUBLAS_VERSION_CONTEXT=$(head -150 ${CUBLAS_VERSION_PATH})
  144. CUDA_VERSION_CONTEXT=$(head -300 ${CUDA_VERSION_PATH})
  145. CUDNN_VERSION_CONTEXT=$(head -62 ${CUDNN_VERSION_PATH})
  146. TENSORRT_VERSION_CONTEXT=$(tail -12 ${TENSORRT_VERSION_PATH})
  147. if [ "$REQUIR_CUDA_VERSION" -ge "11000" ];then
  148. CUDA_API_VERSION=$(echo $CUDA_VERSION_CONTEXT | grep -Eo "define CUDA_VERSION * +([0-9]+)")
  149. else
  150. CUDA_API_VERSION=$(echo $CUDA_VERSION_CONTEXT | grep -Eo "define __CUDA_API_VERSION * +([0-9]+)")
  151. fi
  152. CUDA_VERSION=${CUDA_API_VERSION:0-5}
  153. echo CUDA_VERSION:$CUDA_VERSION
  154. CUDNN_VERSION_MAJOR=$(echo $CUDNN_VERSION_CONTEXT | grep -Eo "define CUDNN_MAJOR * +([0-9]+)")
  155. CUDNN_VERSION_MINOR=$(echo $CUDNN_VERSION_CONTEXT | grep -Eo "define CUDNN_MINOR * +([0-9]+)")
  156. CUDNN_VERSION_PATCH=$(echo $CUDNN_VERSION_CONTEXT | grep -Eo "define CUDNN_PATCHLEVEL * +([0-9]+)")
  157. CUDNN_VERSION=${CUDNN_VERSION_MAJOR:0-1}.${CUDNN_VERSION_MINOR:0-1}.${CUDNN_VERSION_PATCH:0-1}
  158. echo CUDNN_VERSION:$CUDNN_VERSION
  159. TENSORRT_VERSION_MAJOR=$(echo $TENSORRT_VERSION_CONTEXT | grep -Eo "NV_TENSORRT_MAJOR * +([0-9]+)")
  160. TENSORRT_VERSION_MINOR=$(echo $TENSORRT_VERSION_CONTEXT | grep -Eo "NV_TENSORRT_MINOR * +([0-9]+)")
  161. TENSORRT_VERSION_PATCH=$(echo $TENSORRT_VERSION_CONTEXT | grep -Eo "NV_TENSORRT_PATCH * +([0-9]+)")
  162. TENSORRT_VERSION_BUILD=$(echo $TENSORRT_VERSION_CONTEXT | grep -Eo "NV_TENSORRT_BUILD * +([0-9]+)")
  163. TENSORRT_VERSION=${TENSORRT_VERSION_MAJOR:0-1}.${TENSORRT_VERSION_MINOR:0-1}.${TENSORRT_VERSION_PATCH:0-1}.${TENSORRT_VERSION_BUILD:0-1}
  164. echo TENSORRT_VERSION:$TENSORRT_VERSION
  165. CUBLAS_VERSION_MAJOR=$(echo $CUBLAS_VERSION_CONTEXT | grep -Eo "define CUBLAS_VER_MAJOR * +([0-9]+)" | grep -Eo "*+([0-9]+)")
  166. CUBLAS_VERSION_MINOR=$(echo $CUBLAS_VERSION_CONTEXT | grep -Eo "define CUBLAS_VER_MINOR * +([0-9]+)" | grep -Eo "*+([0-9]+)")
  167. CUBLAS_VERSION_PATCH=$(echo $CUBLAS_VERSION_CONTEXT | grep -Eo "define CUBLAS_VER_PATCH * +([0-9]+)" | grep -Eo "*+([0-9]+)")
  168. CUBLAS_VERSION_BUILD=$(echo $CUBLAS_VERSION_CONTEXT | grep -Eo "define CUBLAS_VER_BUILD * +([0-9]+)" | grep -Eo "*+([0-9]+)")
  169. CUBLAS_VERSION=${CUBLAS_VERSION_MAJOR}.${CUBLAS_VERSION_MINOR}.${CUBLAS_VERSION_PATCH}.${CUBLAS_VERSION_BUILD}
  170. echo CUBLAS_VERSION:$CUBLAS_VERSION
  171. if [ $CUDA_VERSION != $REQUIR_CUDA_VERSION ] ; then
  172. echo please check the Environment must use CUDA-10.1 NO.$REQUIR_CUDA_VERSION
  173. exit -1
  174. fi
  175. if [ $CUDNN_VERSION != $REQUIR_CUDNN_VERSION ] ; then
  176. echo please check the Environment must use CUDNN-V$REQUIR_CUDNN_VERSION
  177. exit -1
  178. fi
  179. if [ $TENSORRT_VERSION != $REQUIR_TENSORRT_VERSION ] ; then
  180. echo please check the Environment must use TENSORRT-$REQUIR_TENSORRT_VERSION
  181. exit -1
  182. fi
  183. if [ $CUBLAS_VERSION != $REQUIR_CUBLAS_VERSION ] ; then
  184. echo please check the Environment must use CUBLAS-$REQUIR_CUBLAS_VERSION
  185. exit -1
  186. fi
  187. fi
  188. if [[ -z ${BUILD_GCC8} ]];then
  189. BUILD_GCC8=OFF
  190. fi
  191. if [ "$BUILD_GCC8" == "ON" ];then
  192. run_cmd="scl enable devtoolset-8 /home/code/scripts/whl/manylinux2014/do_build_common.sh"
  193. else
  194. run_cmd="/home/code/scripts/whl/manylinux2014/do_build_common.sh"
  195. fi
  196. docker run --rm -it $TMPFS_ARGS \
  197. -e UID=${USERID} \
  198. -e PUBLIC_VERSION_POSTFIX=${PUBLIC_VERSION_POSTFIX} \
  199. -e LOCAL_VERSION=${LOCAL_VERSION} \
  200. -e STRIP_SDK_INFO=${STRIP_SDK_INFO} \
  201. -e BUILD_WHL_CPU_ONLY=${BUILD_WHL_CPU_ONLY} \
  202. -e ALL_PYTHON="${ALL_PYTHON}" \
  203. -e EXTRA_CMAKE_FLAG="$EXTRA_CMAKE_FLAG" \
  204. -e CUDA_COPY_LIB_LIST="$CUDA_COPY_LIB_LIST" \
  205. -e SDK_NAME="$SDK_NAME" \
  206. -v ${CUDA_ROOT_DIR}:/usr/local/cuda \
  207. -v ${CUDNN_ROOT_DIR}:/opt/cudnn \
  208. -v ${TENSORRT_ROOT_DIR}:/opt/tensorrt \
  209. -v ${BASEDIR}:/home/code \
  210. -v ${OUTPUTDIR}:/home/output:rw \
  211. env_manylinux2014:latest /bin/bash -c "$run_cmd"

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台