You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

server.py 9.2 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317
  1. # -*- coding: utf-8 -*-
  2. import multiprocessing as mp
  3. import threading
  4. import time
  5. from collections import defaultdict
  6. from functools import partial
  7. from socketserver import ThreadingMixIn
  8. from xmlrpc.client import ServerProxy
  9. from xmlrpc.server import SimpleXMLRPCServer
  10. from ..core._imperative_rt.utils import create_mm_server
  11. from ..utils.future import Future
  12. class Methods:
  13. r"""Distributed Server Method.
  14. Used for exchange information between distributed nodes.
  15. Args:
  16. mm_server_port: multiple machine rpc server port.
  17. """
  18. def __init__(self, mm_server_port):
  19. self.lock = threading.Lock()
  20. self.mm_server_port = mm_server_port
  21. self.dict_is_grad = defaultdict(partial(Future, True))
  22. self.dict_remote_tracer = defaultdict(partial(Future, True))
  23. self.dict_pack_list = defaultdict(partial(Future, False))
  24. self.dict_barrier_counter = defaultdict(int)
  25. self.dict_barrier_event = defaultdict(threading.Event)
  26. self.user_dict = defaultdict(partial(Future, False))
  27. self.bcast_dict = {}
  28. def connect(self):
  29. r"""Method for checking connection success."""
  30. return True
  31. def get_mm_server_port(self):
  32. r"""Get multiple machine rpc server port."""
  33. return self.mm_server_port
  34. def set_is_grad(self, key, is_grad):
  35. r"""Mark send/recv need gradiants by key.
  36. Args:
  37. key: key to match send/recv op.
  38. is_grad: whether this op need grad.
  39. """
  40. with self.lock:
  41. future = self.dict_is_grad[key]
  42. future.set(is_grad)
  43. return True
  44. def check_is_grad(self, key):
  45. r"""Check whether send/recv need gradiants.
  46. Args:
  47. key: key to match send/recv op.
  48. """
  49. with self.lock:
  50. future = self.dict_is_grad[key]
  51. ret = future.get()
  52. with self.lock:
  53. del self.dict_is_grad[key]
  54. return ret
  55. def set_remote_tracer(self, key, tracer_set):
  56. r"""Set tracer dict for tracing send/recv op.
  57. Args:
  58. key: key to match send/recv op.
  59. tracer_set: valid tracer set.
  60. """
  61. with self.lock:
  62. future = self.dict_remote_tracer[key]
  63. future.set(tracer_set)
  64. return True
  65. def check_remote_tracer(self, key):
  66. r"""Get tracer dict for send/recv op.
  67. Args:
  68. key: key to match send/recv op.
  69. """
  70. with self.lock:
  71. future = self.dict_remote_tracer[key]
  72. ret = future.get()
  73. with self.lock:
  74. del self.dict_remote_tracer[key]
  75. return ret
  76. def group_barrier(self, key, size):
  77. r"""A barrier wait for all group member.
  78. Args:
  79. key: group key to match each other.
  80. size: group size.
  81. """
  82. with self.lock:
  83. self.dict_barrier_counter[key] += 1
  84. counter = self.dict_barrier_counter[key]
  85. event = self.dict_barrier_event[key]
  86. if counter == size:
  87. del self.dict_barrier_counter[key]
  88. del self.dict_barrier_event[key]
  89. event.set()
  90. else:
  91. event.wait()
  92. return True
  93. def user_set(self, key, val):
  94. r"""Set user defined key-value pairs across processes."""
  95. with self.lock:
  96. future = self.user_dict[key]
  97. future.set(val)
  98. return True
  99. def user_get(self, key):
  100. r"""Get user defined key-value pairs across processes."""
  101. with self.lock:
  102. future = self.user_dict[key]
  103. return future.get()
  104. def bcast_val(self, val, key, size):
  105. with self.lock:
  106. if key not in self.bcast_dict:
  107. self.bcast_dict[key] = [Future(False), size]
  108. arr = self.bcast_dict[key]
  109. if val is not None:
  110. arr[0].set(val)
  111. val = None
  112. else:
  113. val = arr[0].get()
  114. with self.lock:
  115. cnt = arr[1] - 1
  116. arr[1] = cnt
  117. if cnt == 0:
  118. del self.bcast_dict[key]
  119. return val
  120. def _del(self, key):
  121. with self.lock:
  122. del self.user_dict[key]
  123. # thread safe function
  124. def user_pop(self, key):
  125. ret = self.user_get(key)
  126. self._del(key)
  127. return ret
  128. class ThreadXMLRPCServer(ThreadingMixIn, SimpleXMLRPCServer):
  129. pass
  130. def _start_server(py_server_port, queue):
  131. r"""Start python distributed server and multiple machine server.
  132. Args:
  133. py_server_port: python server port.
  134. mm_server_port: multiple machine server port.
  135. queue: server port will put in this queue, puts exception when process fails.
  136. """
  137. try:
  138. mm_server_port = create_mm_server("0.0.0.0", 0)
  139. server = ThreadXMLRPCServer(
  140. ("0.0.0.0", py_server_port), logRequests=False, allow_none=True
  141. )
  142. server.register_instance(Methods(mm_server_port))
  143. _, py_server_port = server.server_address
  144. queue.put((py_server_port, mm_server_port))
  145. server.serve_forever()
  146. except Exception as e:
  147. queue.put(e)
  148. class Server:
  149. r"""Distributed Server for distributed training.
  150. Should be running at master node.
  151. Args:
  152. port: python server port.
  153. """
  154. def __init__(self, port=0):
  155. q = mp.Queue()
  156. self.proc = mp.Process(target=_start_server, args=(port, q), daemon=True)
  157. self.proc.start()
  158. ret = q.get()
  159. if isinstance(ret, Exception):
  160. raise ret
  161. else:
  162. self.py_server_port, self.mm_server_port = ret
  163. def __del__(self):
  164. self.proc.terminate()
  165. class Client:
  166. r"""Distributed Client for distributed training.
  167. Args:
  168. master_ip: ip address of master node.
  169. port: port of server at master node.
  170. """
  171. def __init__(self, master_ip, port):
  172. self.master_ip = master_ip
  173. self.port = port
  174. self.connect()
  175. self.bcast_dict = defaultdict(lambda: 0)
  176. def connect(self):
  177. r"""Check connection success."""
  178. while True:
  179. try:
  180. self.proxy = ServerProxy(
  181. "http://{}:{}".format(self.master_ip, self.port), allow_none=True
  182. )
  183. if self.proxy.connect():
  184. break
  185. except:
  186. time.sleep(1)
  187. def get_mm_server_port(self):
  188. r"""Get multiple machine server port."""
  189. while True:
  190. try:
  191. return self.proxy.get_mm_server_port()
  192. except:
  193. time.sleep(0.5)
  194. def set_is_grad(self, key, is_grad):
  195. r"""Mark send/recv need gradiants by key.
  196. Args:
  197. key: key to match send/recv op.
  198. is_grad: whether this op need grad.
  199. """
  200. self.proxy.set_is_grad(key, is_grad)
  201. def check_is_grad(self, key):
  202. r"""Check whether send/recv need gradiants.
  203. Args:
  204. key: key to match send/recv op.
  205. """
  206. return self.proxy.check_is_grad(key)
  207. def set_remote_tracer(self, key, tracer_set):
  208. r"""Set tracer dict for tracing send/recv op.
  209. Args:
  210. key: key to match send/recv op.
  211. tracer_set: valid tracer set.
  212. """
  213. self.proxy.set_remote_tracer(key, tracer_set)
  214. def check_remote_tracer(self, key):
  215. r"""Get tracer dict for send/recv op.
  216. Args:
  217. key: key to match send/recv op.
  218. """
  219. return self.proxy.check_remote_tracer(key)
  220. def group_barrier(self, key, size):
  221. r"""A barrier wait for all group member.
  222. Args:
  223. key: group key to match each other.
  224. size: group size.
  225. """
  226. # FIXME: group_barrier is not idempotent
  227. while True:
  228. try:
  229. self.proxy.group_barrier(key, size)
  230. return
  231. except:
  232. time.sleep(0.5)
  233. def user_set(self, key, val):
  234. r"""Set user defined key-value pairs across processes."""
  235. return self.proxy.user_set(key, val)
  236. def user_get(self, key):
  237. r"""Get user defined key-value pairs across processes."""
  238. return self.proxy.user_get(key)
  239. def user_pop(self, key):
  240. r"""Get user defined key-value pairs and delete the resources when the get is done"""
  241. return self.proxy.user_pop(key)
  242. def bcast_val(self, val, key, size):
  243. idx = self.bcast_dict[key] + 1
  244. self.bcast_dict[key] = idx
  245. key = key + "_bcast_" + str(idx)
  246. return self.proxy.bcast_val(val, key, size)
  247. def main(port=0, verbose=True):
  248. mm_server_port = create_mm_server("0.0.0.0", 0)
  249. server = ThreadXMLRPCServer(("0.0.0.0", port), logRequests=verbose)
  250. server.register_instance(Methods(mm_server_port))
  251. _, port = server.server_address
  252. print("serving on port", port)
  253. server.serve_forever()
  254. if __name__ == "__main__":
  255. import argparse
  256. ap = argparse.ArgumentParser()
  257. ap.add_argument("-p", "--port", type=int, default=0)
  258. ap.add_argument("-v", "--verbose", type=bool, default=True)
  259. args = ap.parse_args()
  260. main(port=args.port, verbose=args.verbose)