You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

build_wheel_common.sh 14 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375
  1. #!/bin/bash -e
  2. set -x
  3. CWD=$(dirname $0)
  4. BASEDIR=$(readlink -f ${CWD}/../../..)
  5. OUTPUTDIR=$(readlink -f ${CWD}/output)
  6. USERID=$(id -u)
  7. TMPFS_ARGS="--tmpfs /tmp:exec"
  8. local_path=$(dirname $(readlink -f $0))
  9. CUDNN_LIB_DIR="/opt/cudnn/lib64/"
  10. CUDA_LIB_DIR="/usr/local/cuda/lib64/"
  11. TensorRT_LIB_DIR="/opt/tensorrt/lib/"
  12. SDK_NAME="unknown"
  13. x86_64_support_version="cu101 cu111 cu112 cpu cu111_cudnn821_tensorRT825"
  14. aarch64_support_version="cu102_JetsonNano cu111 cpu"
  15. if [[ -z ${IN_CI} ]]
  16. then
  17. IN_CI="false"
  18. fi
  19. function usage() {
  20. echo "use -sdk sdk_version to specify sdk toolkit config!"
  21. echo "now x86_64 sdk_version support ${x86_64_support_version}"
  22. echo "now aarch64 sdk_version support ${aarch64_support_version}"
  23. }
  24. while [ "$1" != "" ]; do
  25. case $1 in
  26. -sdk)
  27. shift
  28. SDK_NAME=$1
  29. shift
  30. ;;
  31. *)
  32. usage
  33. exit -1
  34. esac
  35. done
  36. is_valid_sdk="false"
  37. all_sdk=""
  38. machine=$(uname -m)
  39. case ${machine} in
  40. x86_64) all_sdk=${x86_64_support_version} ;;
  41. aarch64) all_sdk=${aarch64_support_version} ;;
  42. *) echo "nonsupport env!!!";exit -1 ;;
  43. esac
  44. for i_sdk in ${all_sdk}
  45. do
  46. if [ ${i_sdk} == ${SDK_NAME} ];then
  47. is_valid_sdk="true"
  48. fi
  49. done
  50. if [ ${is_valid_sdk} == "false" ];then
  51. echo "invalid sdk: ${SDK_NAME}"
  52. usage
  53. exit -1
  54. fi
  55. echo "Build with ${SDK_NAME}"
  56. if [ $SDK_NAME == "cu101" ];then
  57. CUDA_COPY_LIB_LIST="\
  58. ${CUDA_LIB_DIR}/libnvrtc.so.10.1"
  59. EXTRA_CMAKE_FLAG=" -DMGE_WITH_CUDNN_SHARED=OFF -DMGE_WITH_CUBLAS_SHARED=OFF"
  60. BUILD_GCC8="ON"
  61. REQUIR_CUDA_VERSION="10010"
  62. REQUIR_CUDNN_VERSION="7.6.3"
  63. REQUIR_TENSORRT_VERSION="6.0.1.5"
  64. REQUIR_CUBLAS_VERSION="10.2.1.243"
  65. elif [ $SDK_NAME == "cu102_JetsonNano" ];then
  66. # Jetson Nano B01 version
  67. REQUIR_CUDA_VERSION="10020"
  68. REQUIR_CUDNN_VERSION="8.2.1"
  69. REQUIR_TENSORRT_VERSION="8.0.1.6"
  70. REQUIR_CUBLAS_VERSION="10.2.3.300"
  71. CUDA_COPY_LIB_LIST="\
  72. ${CUDA_LIB_DIR}/libnvrtc.so.10.2:\
  73. ${CUDA_LIB_DIR}/libcublasLt.so.10:\
  74. ${CUDA_LIB_DIR}/libcublas.so.10:\
  75. ${CUDNN_LIB_DIR}/libcudnn_adv_infer.so.8:\
  76. ${CUDNN_LIB_DIR}/libcudnn_adv_train.so.8:\
  77. ${CUDNN_LIB_DIR}/libcudnn_cnn_infer.so.8:\
  78. ${CUDNN_LIB_DIR}/libcudnn_cnn_train.so.8:\
  79. ${CUDNN_LIB_DIR}/libcudnn_ops_infer.so.8:\
  80. ${CUDNN_LIB_DIR}/libcudnn_ops_train.so.8:\
  81. ${CUDNN_LIB_DIR}/libcudnn.so.8:\
  82. ${TensorRT_LIB_DIR}/libnvinfer_plugin.so.8:\
  83. ${TensorRT_LIB_DIR}/libnvinfer.so.8"
  84. EXTRA_CMAKE_FLAG="-DMGE_WITH_CUDNN_SHARED=ON -DMGE_WITH_CUBLAS_SHARED=ON -DMGE_CUDA_GENCODE=\"-gencode arch=compute_53,code=sm_53\" "
  85. elif [ $SDK_NAME == "cu111" ];then
  86. BUILD_GCC8="ON"
  87. if [ ${machine} == "aarch64" ];then
  88. REQUIR_CUDA_VERSION="11010"
  89. REQUIR_CUDNN_VERSION="8.0.5"
  90. REQUIR_TENSORRT_VERSION="7.2.1.6"
  91. REQUIR_CUBLAS_VERSION="11.3.0.106"
  92. elif [ ${machine} == "x86_64" ];then
  93. REQUIR_CUDA_VERSION="11010"
  94. REQUIR_CUDNN_VERSION="8.0.4"
  95. REQUIR_TENSORRT_VERSION="7.2.2.3"
  96. REQUIR_CUBLAS_VERSION="11.2.1.74"
  97. else
  98. echo "no support machine: ${machine}"
  99. exit -1
  100. fi
  101. CUDA_COPY_LIB_LIST="\
  102. ${CUDA_LIB_DIR}/libnvrtc.so.11.1:\
  103. ${CUDA_LIB_DIR}/libcublasLt.so.11:\
  104. ${CUDA_LIB_DIR}/libcublas.so.11:\
  105. ${CUDNN_LIB_DIR}/libcudnn_adv_infer.so.8:\
  106. ${CUDNN_LIB_DIR}/libcudnn_adv_train.so.8:\
  107. ${CUDNN_LIB_DIR}/libcudnn_cnn_infer.so.8:\
  108. ${CUDNN_LIB_DIR}/libcudnn_cnn_train.so.8:\
  109. ${CUDNN_LIB_DIR}/libcudnn_ops_infer.so.8:\
  110. ${CUDNN_LIB_DIR}/libcudnn_ops_train.so.8:\
  111. ${CUDNN_LIB_DIR}/libcudnn.so.8"
  112. if [ ${IN_CI} = "true" ] && [ ${machine} == "aarch64" ]; then
  113. EXTRA_CMAKE_FLAG=" -DMGE_WITH_CUDNN_SHARED=ON -DMGE_WITH_CUBLAS_SHARED=ON -DMGE_CUDA_GENCODE=\"-gencode arch=compute_75,code=sm_75\" "
  114. else
  115. EXTRA_CMAKE_FLAG=" -DMGE_WITH_CUDNN_SHARED=ON -DMGE_WITH_CUBLAS_SHARED=ON \
  116. -DMGE_CUDA_GENCODE=\"-gencode arch=compute_61,code=sm_61 \
  117. -gencode arch=compute_70,code=sm_70 \
  118. -gencode arch=compute_75,code=sm_75 \
  119. -gencode arch=compute_80,code=sm_80 \
  120. -gencode arch=compute_86,code=sm_86 \
  121. -gencode arch=compute_86,code=compute_86\" "
  122. fi
  123. elif [ $SDK_NAME == "cu111_cudnn821_tensorRT825" ];then
  124. BUILD_GCC8="ON"
  125. REQUIR_CUDA_VERSION="11010"
  126. REQUIR_CUDNN_VERSION="8.2.1"
  127. REQUIR_TENSORRT_VERSION="8.2.5.1"
  128. REQUIR_CUBLAS_VERSION="11.2.1.74"
  129. CUDA_COPY_LIB_LIST="\
  130. ${CUDA_LIB_DIR}/libnvrtc.so.11.1:\
  131. ${CUDA_LIB_DIR}/libcublasLt.so.11:\
  132. ${CUDA_LIB_DIR}/libcublas.so.11:\
  133. ${CUDNN_LIB_DIR}/libcudnn_adv_infer.so.8:\
  134. ${CUDNN_LIB_DIR}/libcudnn_adv_train.so.8:\
  135. ${CUDNN_LIB_DIR}/libcudnn_cnn_infer.so.8:\
  136. ${CUDNN_LIB_DIR}/libcudnn_cnn_train.so.8:\
  137. ${CUDNN_LIB_DIR}/libcudnn_ops_infer.so.8:\
  138. ${CUDNN_LIB_DIR}/libcudnn_ops_train.so.8:\
  139. ${CUDNN_LIB_DIR}/libcudnn.so.8:\
  140. ${TensorRT_LIB_DIR}/libnvinfer_plugin.so.8:\
  141. ${TensorRT_LIB_DIR}/libnvinfer.so.8"
  142. EXTRA_CMAKE_FLAG=" -DMGE_WITH_CUDNN_SHARED=ON -DMGE_WITH_CUBLAS_SHARED=ON \
  143. -DMGE_CUDA_GENCODE=\"-gencode arch=compute_61,code=sm_61 \
  144. -gencode arch=compute_70,code=sm_70 \
  145. -gencode arch=compute_75,code=sm_75 \
  146. -gencode arch=compute_80,code=sm_80 \
  147. -gencode arch=compute_86,code=sm_86 \
  148. -gencode arch=compute_86,code=compute_86\" "
  149. elif [ $SDK_NAME == "cu112" ];then
  150. BUILD_GCC8="ON"
  151. CUDA_COPY_LIB_LIST="\
  152. ${CUDA_LIB_DIR}/libnvrtc.so.11.2:\
  153. ${CUDA_LIB_DIR}/libcublasLt.so.11:\
  154. ${CUDA_LIB_DIR}/libcublas.so.11:\
  155. ${CUDNN_LIB_DIR}/libcudnn_adv_infer.so.8:\
  156. ${CUDNN_LIB_DIR}/libcudnn_adv_train.so.8:\
  157. ${CUDNN_LIB_DIR}/libcudnn_cnn_infer.so.8:\
  158. ${CUDNN_LIB_DIR}/libcudnn_cnn_train.so.8:\
  159. ${CUDNN_LIB_DIR}/libcudnn_ops_infer.so.8:\
  160. ${CUDNN_LIB_DIR}/libcudnn_ops_train.so.8:\
  161. ${CUDNN_LIB_DIR}/libcudnn.so.8"
  162. EXTRA_CMAKE_FLAG=" -DMGE_WITH_CUDNN_SHARED=ON -DMGE_WITH_CUBLAS_SHARED=ON \
  163. -DMGE_CUDA_GENCODE=\"-gencode arch=compute_61,code=sm_61 \
  164. -gencode arch=compute_70,code=sm_70 \
  165. -gencode arch=compute_75,code=sm_75 \
  166. -gencode arch=compute_80,code=sm_80 \
  167. -gencode arch=compute_86,code=sm_86 \
  168. -gencode arch=compute_86,code=compute_86\" "
  169. REQUIR_CUDA_VERSION="11020"
  170. REQUIR_CUDNN_VERSION="8.0.4"
  171. REQUIR_TENSORRT_VERSION="7.2.2.3"
  172. REQUIR_CUBLAS_VERSION="11.3.1.68"
  173. elif [ $SDK_NAME == "cpu" ];then
  174. echo "use $SDK_NAME without cuda support"
  175. BUILD_WHL_CPU_ONLY="ON"
  176. else
  177. echo "no support sdk ${SDK_NAME}"
  178. usage
  179. exit -1
  180. fi
  181. if [[ -z ${BUILD_WHL_CPU_ONLY} ]]
  182. then
  183. BUILD_WHL_CPU_ONLY="OFF"
  184. fi
  185. echo ${BASEDIR}
  186. pushd ${BASEDIR}/third_party >/dev/null
  187. ./prepare.sh
  188. popd >/dev/null
  189. cd ${CWD}
  190. mkdir -p ${OUTPUTDIR}
  191. if [ ${BUILD_WHL_CPU_ONLY} = "OFF" ]; then
  192. if [[ -z ${CUDA_ROOT_DIR} ]]; then
  193. echo "Environment variable CUDA_ROOT_DIR not set."
  194. exit -1
  195. fi
  196. if [[ -z ${CUDNN_ROOT_DIR} ]]; then
  197. echo "Environment variable CUDNN_ROOT_DIR not set."
  198. exit -1
  199. fi
  200. if [[ -z ${TENSORRT_ROOT_DIR} ]]; then
  201. echo "Environment variable TENSORRT_ROOT_DIR not set."
  202. if [[ -z ${TRT_ROOT_DIR} ]]; then
  203. echo "Environment variable TRT_ROOT_DIR not set."
  204. exit -1
  205. else
  206. echo "put ${TRT_ROOT_DIR} to TENSORRT_ROOT_DIR env"
  207. TENSORRT_ROOT_DIR=${TRT_ROOT_DIR}
  208. fi
  209. fi
  210. ## YOU SHOULD MODIFY CUDA VERSION AS BELOW WHEN UPGRADE
  211. CUDA_ROOT_DIR_=${CUDA_ROOT_DIR%*/}
  212. CUDNN_ROOT_DIR_=${CUDNN_ROOT_DIR%*/}
  213. TENSORRT_ROOT_DIR_=${TENSORRT_ROOT_DIR%*/}
  214. CUBLAS_VERSION_PATH=${CUDA_ROOT_DIR_}/include/cublas_api.h
  215. CUDA_VERSION_PATH=${CUDA_ROOT_DIR_}/include/cuda.h
  216. if [ -e ${CUDNN_ROOT_DIR_}/include/cudnn_version.h ];then
  217. CUDNN_VERSION_PATH=${CUDNN_ROOT_DIR_}/include/cudnn_version.h
  218. elif [ -e ${CUDNN_ROOT_DIR_}/include/cudnn.h ];then
  219. CUDNN_VERSION_PATH=${CUDNN_ROOT_DIR_}/include/cudnn.h
  220. else
  221. echo "cannot determine CUDNN_VERSION_PATH from CUDNN_ROOT_DIR."
  222. exit -1
  223. fi
  224. TENSORRT_VERSION_PATH=${TENSORRT_ROOT_DIR_}/include/NvInferVersion.h
  225. if [ ! -e $CUDA_VERSION_PATH ] ; then
  226. echo file $CUDA_VERSION_PATH is not exist
  227. echo please check the Environment must use CUDA-$REQUIR_CUDA_VERSION
  228. exit -1
  229. fi
  230. if [ ! -e $CUDNN_VERSION_PATH ] ; then
  231. echo file $CUDNN_VERSION_PATH is not exist
  232. echo please check the Environment must use CUDNN-V$REQUIR_CUDNN_VERSION
  233. exit -1
  234. fi
  235. if [ ! -e $TENSORRT_VERSION_PATH ] ; then
  236. echo file $TENSORRT_VERSION_PATH is not exist
  237. echo please check the Environment must use TensorRT-$REQUIR_TENSORRT_VERSION
  238. exit -1
  239. fi
  240. if [ ! -e $CUBLAS_VERSION_PATH ] ; then
  241. echo file $CUBLAS_VERSION_PATH is not exist
  242. exit -1
  243. fi
  244. CUBLAS_VERSION_CONTEXT=$(head -150 ${CUBLAS_VERSION_PATH})
  245. CUDA_VERSION_CONTEXT=$(head -300 ${CUDA_VERSION_PATH})
  246. CUDNN_VERSION_CONTEXT=$(head -62 ${CUDNN_VERSION_PATH})
  247. TENSORRT_VERSION_CONTEXT=$(tail -12 ${TENSORRT_VERSION_PATH})
  248. if [ "$REQUIR_CUDA_VERSION" -ge "11000" ];then
  249. CUDA_API_VERSION=$(echo $CUDA_VERSION_CONTEXT | grep -Eo "define CUDA_VERSION * +([0-9]+)")
  250. else
  251. CUDA_API_VERSION=$(echo $CUDA_VERSION_CONTEXT | grep -Eo "define __CUDA_API_VERSION * +([0-9]+)")
  252. fi
  253. CUDA_VERSION=${CUDA_API_VERSION:0-5}
  254. echo CUDA_VERSION:$CUDA_VERSION
  255. CUDNN_VERSION_MAJOR=$(echo $CUDNN_VERSION_CONTEXT | grep -Eo "define CUDNN_MAJOR * +([0-9]+)")
  256. CUDNN_VERSION_MINOR=$(echo $CUDNN_VERSION_CONTEXT | grep -Eo "define CUDNN_MINOR * +([0-9]+)")
  257. CUDNN_VERSION_PATCH=$(echo $CUDNN_VERSION_CONTEXT | grep -Eo "define CUDNN_PATCHLEVEL * +([0-9]+)")
  258. CUDNN_VERSION=${CUDNN_VERSION_MAJOR:0-1}.${CUDNN_VERSION_MINOR:0-1}.${CUDNN_VERSION_PATCH:0-1}
  259. echo CUDNN_VERSION:$CUDNN_VERSION
  260. TENSORRT_VERSION_MAJOR=$(echo $TENSORRT_VERSION_CONTEXT | grep -Eo "NV_TENSORRT_MAJOR * +([0-9]+)")
  261. TENSORRT_VERSION_MINOR=$(echo $TENSORRT_VERSION_CONTEXT | grep -Eo "NV_TENSORRT_MINOR * +([0-9]+)")
  262. TENSORRT_VERSION_PATCH=$(echo $TENSORRT_VERSION_CONTEXT | grep -Eo "NV_TENSORRT_PATCH * +([0-9]+)")
  263. TENSORRT_VERSION_BUILD=$(echo $TENSORRT_VERSION_CONTEXT | grep -Eo "NV_TENSORRT_BUILD * +([0-9]+)")
  264. TENSORRT_VERSION=${TENSORRT_VERSION_MAJOR:0-1}.${TENSORRT_VERSION_MINOR:0-1}.${TENSORRT_VERSION_PATCH:0-1}.${TENSORRT_VERSION_BUILD:0-1}
  265. echo TENSORRT_VERSION:$TENSORRT_VERSION
  266. CUBLAS_VERSION_MAJOR=$(echo $CUBLAS_VERSION_CONTEXT | grep -Eo "define CUBLAS_VER_MAJOR * +([0-9]+)" | grep -Eo "*+([0-9]+)")
  267. CUBLAS_VERSION_MINOR=$(echo $CUBLAS_VERSION_CONTEXT | grep -Eo "define CUBLAS_VER_MINOR * +([0-9]+)" | grep -Eo "*+([0-9]+)")
  268. CUBLAS_VERSION_PATCH=$(echo $CUBLAS_VERSION_CONTEXT | grep -Eo "define CUBLAS_VER_PATCH * +([0-9]+)" | grep -Eo "*+([0-9]+)")
  269. CUBLAS_VERSION_BUILD=$(echo $CUBLAS_VERSION_CONTEXT | grep -Eo "define CUBLAS_VER_BUILD * +([0-9]+)" | grep -Eo "*+([0-9]+)")
  270. CUBLAS_VERSION=${CUBLAS_VERSION_MAJOR}.${CUBLAS_VERSION_MINOR}.${CUBLAS_VERSION_PATCH}.${CUBLAS_VERSION_BUILD}
  271. echo CUBLAS_VERSION:$CUBLAS_VERSION
  272. if [ $CUDA_VERSION != $REQUIR_CUDA_VERSION ] ; then
  273. echo please check the Environment must use CUDA NO.$REQUIR_CUDA_VERSION
  274. exit -1
  275. fi
  276. if [ $CUDNN_VERSION != $REQUIR_CUDNN_VERSION ] ; then
  277. echo please check the Environment must use CUDNN-V$REQUIR_CUDNN_VERSION
  278. exit -1
  279. fi
  280. if [ $TENSORRT_VERSION != $REQUIR_TENSORRT_VERSION ] ; then
  281. echo please check the Environment must use TENSORRT-$REQUIR_TENSORRT_VERSION
  282. exit -1
  283. fi
  284. if [ $CUBLAS_VERSION != $REQUIR_CUBLAS_VERSION ] ; then
  285. echo please check the Environment must use CUBLAS-$REQUIR_CUBLAS_VERSION
  286. exit -1
  287. fi
  288. fi
  289. if [[ -z ${BUILD_GCC8} ]];then
  290. BUILD_GCC8=OFF
  291. fi
  292. if [ ${machine} == "aarch64" ];then
  293. # manylinux on aarch64 gcc9 is: (GCC) 9.3.1 20200408 (Red Hat 9.3.1-2)
  294. # which version has issue: 'as' take a long long long time for some dnn kernel!
  295. # infact ubuntu gcc version: gcc (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0 is OK
  296. echo "force use gcc8 on aarch64 linux"
  297. BUILD_GCC8="ON"
  298. fi
  299. if [ "$BUILD_GCC8" == "ON" ];then
  300. run_cmd="scl enable devtoolset-8 /home/code/scripts/whl/manylinux2014/do_build_common.sh"
  301. else
  302. run_cmd="/home/code/scripts/whl/manylinux2014/do_build_common.sh"
  303. fi
  304. set +x
  305. docker_args="-it"
  306. if [ -z "${CI_SERVER_NAME}" ]; then
  307. CI_SERVER_NAME="null"
  308. fi
  309. if [ ${CI_SERVER_NAME} = "GitLab" ];then
  310. docker_args="-i"
  311. fi
  312. if [ ${IN_CI} = "true" ];then
  313. EXTRA_CMAKE_FLAG=" ${EXTRA_CMAKE_FLAG} -DMGE_WITH_TEST=ON"
  314. fi
  315. docker run --rm ${docker_args} $TMPFS_ARGS \
  316. -e UID=${USERID} \
  317. -e PUBLIC_VERSION_POSTFIX=${PUBLIC_VERSION_POSTFIX} \
  318. -e LOCAL_VERSION=${LOCAL_VERSION} \
  319. -e STRIP_SDK_INFO=${STRIP_SDK_INFO} \
  320. -e BUILD_WHL_CPU_ONLY=${BUILD_WHL_CPU_ONLY} \
  321. -e ALL_PYTHON="${ALL_PYTHON}" \
  322. -e EXTRA_CMAKE_FLAG="$EXTRA_CMAKE_FLAG" \
  323. -e CUDA_COPY_LIB_LIST="$CUDA_COPY_LIB_LIST" \
  324. -e SDK_NAME="$SDK_NAME" \
  325. -e CUDA_ROOT_DIR="/usr/local/cuda" \
  326. -e CUDNN_ROOT_DIR="/opt/cudnn" \
  327. -e TRT_ROOT_DIR="/opt/tensorrt" \
  328. -v ${CUDA_ROOT_DIR}:/usr/local/cuda \
  329. -v ${CUDNN_ROOT_DIR}:/opt/cudnn \
  330. -v ${TENSORRT_ROOT_DIR}:/opt/tensorrt \
  331. -v ${BASEDIR}:/home/code \
  332. -v ${OUTPUTDIR}:/home/output:rw \
  333. env_manylinux2014:latest /bin/bash -c "$run_cmd"