You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

build_wheel_common.sh 10 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. #!/bin/bash
  2. set -ex
  3. CWD=$(dirname $0)
  4. BASEDIR=$(readlink -f ${CWD}/../../..)
  5. OUTPUTDIR=$(readlink -f ${CWD}/output)
  6. USERID=$(id -u)
  7. TMPFS_ARGS="--tmpfs /tmp:exec"
  8. local_path=$(dirname $(readlink -f $0))
  9. CUDNN_LIB_DIR="/opt/cudnn/lib64/"
  10. CUDA_LIB_DIR="/usr/local/cuda/lib64/"
  11. SDK_NAME="unknown"
  12. x86_64_support_version="cu101 cu111 cu112 cpu"
  13. aarch64_support_version="cu111 cpu"
  14. function usage() {
  15. echo "use -sdk sdk_version to specify sdk toolkit config!"
  16. echo "now x86_64 sdk_version support ${x86_64_support_version}"
  17. echo "now aarch64 sdk_version support ${aarch64_support_version}"
  18. }
  19. while [ "$1" != "" ]; do
  20. case $1 in
  21. -sdk)
  22. shift
  23. SDK_NAME=$1
  24. shift
  25. ;;
  26. *)
  27. usage
  28. exit -1
  29. esac
  30. done
  31. is_valid_sdk="false"
  32. all_sdk=""
  33. machine=$(uname -m)
  34. case ${machine} in
  35. x86_64) all_sdk=${x86_64_support_version} ;;
  36. aarch64) all_sdk=${aarch64_support_version} ;;
  37. *) echo "nonsupport env!!!";exit -1 ;;
  38. esac
  39. for i_sdk in ${all_sdk}
  40. do
  41. if [ ${i_sdk} == ${SDK_NAME} ];then
  42. is_valid_sdk="true"
  43. fi
  44. done
  45. if [ ${is_valid_sdk} == "false" ];then
  46. echo "invalid sdk: ${SDK_NAME}"
  47. usage
  48. exit -1
  49. fi
  50. echo "Build with ${SDK_NAME}"
  51. if [ $SDK_NAME == "cu101" ];then
  52. CUDA_COPY_LIB_LIST="${CUDA_LIB_DIR}/libnvrtc.so.10.1"
  53. EXTRA_CMAKE_FLAG=" -DMGE_WITH_CUDNN_SHARED=OFF -DMGE_WITH_CUBLAS_SHARED=OFF"
  54. BUILD_GCC8="ON"
  55. REQUIR_CUDA_VERSION="10010"
  56. REQUIR_CUDNN_VERSION="7.6.3"
  57. REQUIR_TENSORRT_VERSION="6.0.1.5"
  58. REQUIR_CUBLAS_VERSION="10.2.1.243"
  59. elif [ $SDK_NAME == "cu111" ];then
  60. if [ ${machine} == "aarch64" ];then
  61. REQUIR_CUDA_VERSION="11010"
  62. REQUIR_CUDNN_VERSION="8.0.5"
  63. REQUIR_TENSORRT_VERSION="7.2.1.6"
  64. REQUIR_CUBLAS_VERSION="11.3.0.106"
  65. elif [ ${machine} == "x86_64" ];then
  66. REQUIR_CUDA_VERSION="11010"
  67. REQUIR_CUDNN_VERSION="8.0.4"
  68. REQUIR_TENSORRT_VERSION="7.2.2.3"
  69. REQUIR_CUBLAS_VERSION="11.2.1.74"
  70. else
  71. echo "no support machine: ${machine}"
  72. exit -1
  73. fi
  74. CUDA_COPY_LIB_LIST="\
  75. ${CUDA_LIB_DIR}/libnvrtc.so.11.1:\
  76. ${CUDA_LIB_DIR}/libcublasLt.so.11:\
  77. ${CUDA_LIB_DIR}/libcublas.so.11:\
  78. ${CUDNN_LIB_DIR}/libcudnn_adv_infer.so.8:\
  79. ${CUDNN_LIB_DIR}/libcudnn_adv_train.so.8:\
  80. ${CUDNN_LIB_DIR}/libcudnn_cnn_infer.so.8:\
  81. ${CUDNN_LIB_DIR}/libcudnn_cnn_train.so.8:\
  82. ${CUDNN_LIB_DIR}/libcudnn_ops_infer.so.8:\
  83. ${CUDNN_LIB_DIR}/libcudnn_ops_train.so.8:\
  84. ${CUDNN_LIB_DIR}/libcudnn.so.8"
  85. EXTRA_CMAKE_FLAG=" -DMGE_WITH_CUDNN_SHARED=ON -DMGE_WITH_CUBLAS_SHARED=ON \
  86. -DMGE_CUDA_GENCODE=\"-gencode arch=compute_61,code=sm_61 \
  87. -gencode arch=compute_70,code=sm_70 \
  88. -gencode arch=compute_75,code=sm_75 \
  89. -gencode arch=compute_80,code=sm_80 \
  90. -gencode arch=compute_86,code=sm_86 \
  91. -gencode arch=compute_86,code=compute_86\" "
  92. elif [ $SDK_NAME == "cu112" ];then
  93. CUDA_COPY_LIB_LIST="\
  94. ${CUDA_LIB_DIR}/libnvrtc.so.11.2:\
  95. ${CUDA_LIB_DIR}/libcublasLt.so.11:\
  96. ${CUDA_LIB_DIR}/libcublas.so.11:\
  97. ${CUDNN_LIB_DIR}/libcudnn_adv_infer.so.8:\
  98. ${CUDNN_LIB_DIR}/libcudnn_adv_train.so.8:\
  99. ${CUDNN_LIB_DIR}/libcudnn_cnn_infer.so.8:\
  100. ${CUDNN_LIB_DIR}/libcudnn_cnn_train.so.8:\
  101. ${CUDNN_LIB_DIR}/libcudnn_ops_infer.so.8:\
  102. ${CUDNN_LIB_DIR}/libcudnn_ops_train.so.8:\
  103. ${CUDNN_LIB_DIR}/libcudnn.so.8"
  104. EXTRA_CMAKE_FLAG=" -DMGE_WITH_CUDNN_SHARED=ON -DMGE_WITH_CUBLAS_SHARED=ON \
  105. -DMGE_CUDA_GENCODE=\"-gencode arch=compute_61,code=sm_61 \
  106. -gencode arch=compute_70,code=sm_70 \
  107. -gencode arch=compute_75,code=sm_75 \
  108. -gencode arch=compute_80,code=sm_80 \
  109. -gencode arch=compute_86,code=sm_86 \
  110. -gencode arch=compute_86,code=compute_86\" "
  111. REQUIR_CUDA_VERSION="11020"
  112. REQUIR_CUDNN_VERSION="8.0.4"
  113. REQUIR_TENSORRT_VERSION="7.2.2.3"
  114. REQUIR_CUBLAS_VERSION="11.3.1.68"
  115. elif [ $SDK_NAME == "cpu" ];then
  116. echo "use $SDK_NAME without cuda support"
  117. BUILD_WHL_CPU_ONLY="ON"
  118. else
  119. echo "no support sdk ${SDK_NAME}"
  120. usage
  121. exit -1
  122. fi
  123. if [[ -z ${BUILD_WHL_CPU_ONLY} ]]
  124. then
  125. BUILD_WHL_CPU_ONLY="OFF"
  126. fi
  127. echo ${BASEDIR}
  128. pushd ${BASEDIR}/third_party >/dev/null
  129. ./prepare.sh
  130. popd >/dev/null
  131. cd ${CWD}
  132. mkdir -p ${OUTPUTDIR}
  133. if [ ${BUILD_WHL_CPU_ONLY} = "OFF" ]; then
  134. if [[ -z ${CUDA_ROOT_DIR} ]]; then
  135. echo "Environment variable CUDA_ROOT_DIR not set."
  136. exit -1
  137. fi
  138. if [[ -z ${CUDNN_ROOT_DIR} ]]; then
  139. echo "Environment variable CUDNN_ROOT_DIR not set."
  140. exit -1
  141. fi
  142. if [[ -z ${TENSORRT_ROOT_DIR} ]]; then
  143. echo "Environment variable TENSORRT_ROOT_DIR not set."
  144. exit -1
  145. fi
  146. ## YOU SHOULD MODIFY CUDA VERSION AS BELOW WHEN UPGRADE
  147. CUDA_ROOT_DIR_=${CUDA_ROOT_DIR%*/}
  148. CUDNN_ROOT_DIR_=${CUDNN_ROOT_DIR%*/}
  149. TENSORRT_ROOT_DIR_=${TENSORRT_ROOT_DIR%*/}
  150. CUBLAS_VERSION_PATH=${CUDA_ROOT_DIR_}/include/cublas_api.h
  151. CUDA_VERSION_PATH=${CUDA_ROOT_DIR_}/include/cuda.h
  152. if [ "$REQUIR_CUDA_VERSION" -ge "11000" ];then
  153. CUDNN_VERSION_PATH=${CUDNN_ROOT_DIR_}/include/cudnn_version.h
  154. else
  155. CUDNN_VERSION_PATH=${CUDNN_ROOT_DIR_}/include/cudnn.h
  156. fi
  157. TENSORRT_VERSION_PATH=${TENSORRT_ROOT_DIR_}/include/NvInferVersion.h
  158. if [ ! -e $CUDA_VERSION_PATH ] ; then
  159. echo file $CUDA_VERSION_PATH is not exist
  160. echo please check the Environment must use CUDA-$REQUIR_CUDA_VERSION
  161. exit -1
  162. fi
  163. if [ ! -e $CUDNN_VERSION_PATH ] ; then
  164. echo file $CUDNN_VERSION_PATH is not exist
  165. echo please check the Environment must use CUDNN-V$REQUIR_CUDNN_VERSION
  166. exit -1
  167. fi
  168. if [ ! -e $TENSORRT_VERSION_PATH ] ; then
  169. echo file $TENSORRT_VERSION_PATH is not exist
  170. echo please check the Environment must use TensorRT-$REQUIR_TENSORRT_VERSION
  171. exit -1
  172. fi
  173. if [ ! -e $CUBLAS_VERSION_PATH ] ; then
  174. echo file $CUBLAS_VERSION_PATH is not exist
  175. exit -1
  176. fi
  177. CUBLAS_VERSION_CONTEXT=$(head -150 ${CUBLAS_VERSION_PATH})
  178. CUDA_VERSION_CONTEXT=$(head -300 ${CUDA_VERSION_PATH})
  179. CUDNN_VERSION_CONTEXT=$(head -62 ${CUDNN_VERSION_PATH})
  180. TENSORRT_VERSION_CONTEXT=$(tail -12 ${TENSORRT_VERSION_PATH})
  181. if [ "$REQUIR_CUDA_VERSION" -ge "11000" ];then
  182. CUDA_API_VERSION=$(echo $CUDA_VERSION_CONTEXT | grep -Eo "define CUDA_VERSION * +([0-9]+)")
  183. else
  184. CUDA_API_VERSION=$(echo $CUDA_VERSION_CONTEXT | grep -Eo "define __CUDA_API_VERSION * +([0-9]+)")
  185. fi
  186. CUDA_VERSION=${CUDA_API_VERSION:0-5}
  187. echo CUDA_VERSION:$CUDA_VERSION
  188. CUDNN_VERSION_MAJOR=$(echo $CUDNN_VERSION_CONTEXT | grep -Eo "define CUDNN_MAJOR * +([0-9]+)")
  189. CUDNN_VERSION_MINOR=$(echo $CUDNN_VERSION_CONTEXT | grep -Eo "define CUDNN_MINOR * +([0-9]+)")
  190. CUDNN_VERSION_PATCH=$(echo $CUDNN_VERSION_CONTEXT | grep -Eo "define CUDNN_PATCHLEVEL * +([0-9]+)")
  191. CUDNN_VERSION=${CUDNN_VERSION_MAJOR:0-1}.${CUDNN_VERSION_MINOR:0-1}.${CUDNN_VERSION_PATCH:0-1}
  192. echo CUDNN_VERSION:$CUDNN_VERSION
  193. TENSORRT_VERSION_MAJOR=$(echo $TENSORRT_VERSION_CONTEXT | grep -Eo "NV_TENSORRT_MAJOR * +([0-9]+)")
  194. TENSORRT_VERSION_MINOR=$(echo $TENSORRT_VERSION_CONTEXT | grep -Eo "NV_TENSORRT_MINOR * +([0-9]+)")
  195. TENSORRT_VERSION_PATCH=$(echo $TENSORRT_VERSION_CONTEXT | grep -Eo "NV_TENSORRT_PATCH * +([0-9]+)")
  196. TENSORRT_VERSION_BUILD=$(echo $TENSORRT_VERSION_CONTEXT | grep -Eo "NV_TENSORRT_BUILD * +([0-9]+)")
  197. TENSORRT_VERSION=${TENSORRT_VERSION_MAJOR:0-1}.${TENSORRT_VERSION_MINOR:0-1}.${TENSORRT_VERSION_PATCH:0-1}.${TENSORRT_VERSION_BUILD:0-1}
  198. echo TENSORRT_VERSION:$TENSORRT_VERSION
  199. CUBLAS_VERSION_MAJOR=$(echo $CUBLAS_VERSION_CONTEXT | grep -Eo "define CUBLAS_VER_MAJOR * +([0-9]+)" | grep -Eo "*+([0-9]+)")
  200. CUBLAS_VERSION_MINOR=$(echo $CUBLAS_VERSION_CONTEXT | grep -Eo "define CUBLAS_VER_MINOR * +([0-9]+)" | grep -Eo "*+([0-9]+)")
  201. CUBLAS_VERSION_PATCH=$(echo $CUBLAS_VERSION_CONTEXT | grep -Eo "define CUBLAS_VER_PATCH * +([0-9]+)" | grep -Eo "*+([0-9]+)")
  202. CUBLAS_VERSION_BUILD=$(echo $CUBLAS_VERSION_CONTEXT | grep -Eo "define CUBLAS_VER_BUILD * +([0-9]+)" | grep -Eo "*+([0-9]+)")
  203. CUBLAS_VERSION=${CUBLAS_VERSION_MAJOR}.${CUBLAS_VERSION_MINOR}.${CUBLAS_VERSION_PATCH}.${CUBLAS_VERSION_BUILD}
  204. echo CUBLAS_VERSION:$CUBLAS_VERSION
  205. if [ $CUDA_VERSION != $REQUIR_CUDA_VERSION ] ; then
  206. echo please check the Environment must use CUDA NO.$REQUIR_CUDA_VERSION
  207. exit -1
  208. fi
  209. if [ $CUDNN_VERSION != $REQUIR_CUDNN_VERSION ] ; then
  210. echo please check the Environment must use CUDNN-V$REQUIR_CUDNN_VERSION
  211. exit -1
  212. fi
  213. if [ $TENSORRT_VERSION != $REQUIR_TENSORRT_VERSION ] ; then
  214. echo please check the Environment must use TENSORRT-$REQUIR_TENSORRT_VERSION
  215. exit -1
  216. fi
  217. if [ $CUBLAS_VERSION != $REQUIR_CUBLAS_VERSION ] ; then
  218. echo please check the Environment must use CUBLAS-$REQUIR_CUBLAS_VERSION
  219. exit -1
  220. fi
  221. fi
  222. if [[ -z ${BUILD_GCC8} ]];then
  223. BUILD_GCC8=OFF
  224. fi
  225. if [ ${machine} == "aarch64" ];then
  226. # manylinux on aarch64 gcc9 is: (GCC) 9.3.1 20200408 (Red Hat 9.3.1-2)
  227. # which version has issue: 'as' take a long long long time for some dnn kernel!
  228. # infact ubuntu gcc version: gcc (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0 is OK
  229. echo "force use gcc8 on aarch64 linux"
  230. BUILD_GCC8="ON"
  231. fi
  232. if [ "$BUILD_GCC8" == "ON" ];then
  233. run_cmd="scl enable devtoolset-8 /home/code/scripts/whl/manylinux2014/do_build_common.sh"
  234. else
  235. run_cmd="/home/code/scripts/whl/manylinux2014/do_build_common.sh"
  236. fi
  237. docker run --rm -it $TMPFS_ARGS \
  238. -e UID=${USERID} \
  239. -e PUBLIC_VERSION_POSTFIX=${PUBLIC_VERSION_POSTFIX} \
  240. -e LOCAL_VERSION=${LOCAL_VERSION} \
  241. -e STRIP_SDK_INFO=${STRIP_SDK_INFO} \
  242. -e BUILD_WHL_CPU_ONLY=${BUILD_WHL_CPU_ONLY} \
  243. -e ALL_PYTHON="${ALL_PYTHON}" \
  244. -e EXTRA_CMAKE_FLAG="$EXTRA_CMAKE_FLAG" \
  245. -e CUDA_COPY_LIB_LIST="$CUDA_COPY_LIB_LIST" \
  246. -e SDK_NAME="$SDK_NAME" \
  247. -e CUDA_ROOT_DIR="/usr/local/cuda" \
  248. -e CUDNN_ROOT_DIR="/opt/cudnn" \
  249. -e TRT_ROOT_DIR="/opt/tensorrt" \
  250. -v ${CUDA_ROOT_DIR}:/usr/local/cuda \
  251. -v ${CUDNN_ROOT_DIR}:/opt/cudnn \
  252. -v ${TENSORRT_ROOT_DIR}:/opt/tensorrt \
  253. -v ${BASEDIR}:/home/code \
  254. -v ${OUTPUTDIR}:/home/output:rw \
  255. env_manylinux2014:latest /bin/bash -c "$run_cmd"

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台