You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

http_server.py 3.1 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182
  1. """
  2. Copyright 2020 Tianshu AI Platform. All Rights Reserved.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. """
  13. import config as configs
  14. from fastapi import FastAPI, File, UploadFile
  15. from utils import file_utils
  16. import uvicorn
  17. import threading
  18. from logger import Logger
  19. from typing import List
  20. from service.inference_service_manager import InferenceServiceManager
  21. from response import Response
  22. app = FastAPI(version='1.0', title='Zhejiang Lab TS_Serving inference Automation',
  23. description="<b>API for performing oneflow、tensorflow、pytorch inference</b></br></br>")
  24. # 独立部署可在该处解决跨域问题,或在nginx和网关下解决
  25. # app.add_middleware(
  26. # CORSMiddleware,
  27. # allow_origins=["*"], # 设置允许的origins来源
  28. # allow_credentials=True,
  29. # allow_methods=["*"], # 设置允许跨域的http方法,比如 get、post、put等。
  30. # allow_headers=["*"]) # 允许跨域的headers,可以用来鉴别来源等作用。
  31. parser = configs.get_parser()
  32. args = parser.parse_args()
  33. configs.print_args(args)
  34. inference_service = InferenceServiceManager(args)
  35. inference_service.init()
  36. log = Logger().logger
  37. @app.get("/")
  38. def read_root():
  39. return {"message": "ok"}
  40. @app.post("/image_inference")
  41. async def inference(images_path: List[str] = None):
  42. threading.Thread(target=file_utils.download_image(images_path)) # 开启异步线程下载图片到本地
  43. images = list()
  44. for image in images_path:
  45. data = {"data_name": image.split("/")[-1], "data_path": image}
  46. images.append(data)
  47. try:
  48. data = inference_service.inference(args.model_name, images)
  49. return Response(success=True, data=data)
  50. except Exception as e:
  51. return Response(success=False, data=str(e), error="inference fail")
  52. @app.post("/inference")
  53. async def inference(files: List[UploadFile] = File(...)):
  54. """
  55. 上传本地文件推理
  56. """
  57. log.info("===============> http inference start <===============")
  58. try:
  59. data_list = file_utils.upload_data(files) # 上传图片到本地
  60. except Exception as e:
  61. log.error("upload data failed", e)
  62. return Response(success=False, data=str(e), error="upload data failed")
  63. try:
  64. result = inference_service.inference(args.model_name, data_list)
  65. log.info("===============> http inference success <===============")
  66. return Response(success=True, data=result)
  67. except Exception as e:
  68. log.error("inference fail", e)
  69. return Response(success=False, data=str(e), error="inference fail")
  70. if __name__ == '__main__':
  71. uvicorn.run(app, host=args.host, port=args.port)

一站式算法开发平台、高性能分布式深度学习框架、先进算法模型库、视觉模型炼知平台、数据可视化分析平台等一系列平台及工具,在模型高效分布式训练、数据处理和可视分析、模型炼知和轻量化等技术上形成独特优势,目前已在产学研等各领域近千家单位及个人提供AI应用赋能