|
|
@@ -6,7 +6,6 @@ |
|
|
|
# Unless required by applicable law or agreed to in writing, |
|
|
|
# software distributed under the License is distributed on an |
|
|
|
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
|
|
import multiprocessing as mp |
|
|
|
import platform |
|
|
|
|
|
|
|
import numpy as np |
|
|
@@ -16,6 +15,7 @@ import megengine as mge |
|
|
|
import megengine.distributed as dist |
|
|
|
from megengine import Parameter, Tensor, tensor |
|
|
|
from megengine.device import get_default_device, set_default_device |
|
|
|
from megengine.distributed.helper import get_device_count_by_fork |
|
|
|
from megengine.functional.distributed import ( |
|
|
|
all_gather, |
|
|
|
all_reduce_max, |
|
|
@@ -38,20 +38,16 @@ from megengine.functional.distributed import ( |
|
|
|
@pytest.mark.skipif( |
|
|
|
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM" |
|
|
|
) |
|
|
|
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device") |
|
|
|
@pytest.mark.isolated_distributed |
|
|
|
def test_reduce_sum(): |
|
|
|
world_size = 2 |
|
|
|
server = dist.Server() |
|
|
|
port = server.py_server_port |
|
|
|
|
|
|
|
def worker(rank, data, expect, port): |
|
|
|
if mge.get_device_count("gpu") < world_size: |
|
|
|
return |
|
|
|
dist.init_process_group("localhost", port, world_size, rank, rank) |
|
|
|
inp = tensor(data) |
|
|
|
@dist.launcher(n_gpus=2) |
|
|
|
def worker(data, expect): |
|
|
|
rank = dist.get_rank() |
|
|
|
inp = tensor(data[rank]) |
|
|
|
output = reduce_sum(inp) |
|
|
|
if rank == 0: |
|
|
|
assert np.allclose(output.numpy(), expect) |
|
|
|
assert np.allclose(output.numpy(), expect[rank]) |
|
|
|
else: |
|
|
|
assert np.allclose(output.numpy(), 0) |
|
|
|
|
|
|
@@ -59,16 +55,9 @@ def test_reduce_sum(): |
|
|
|
x = np.random.rand(*shape).astype("float32") |
|
|
|
y = np.random.rand(*shape).astype("float32") |
|
|
|
z = x + y |
|
|
|
p0 = mp.Process(target=worker, args=(0, x, z, port)) |
|
|
|
p1 = mp.Process(target=worker, args=(1, y, None, port)) |
|
|
|
|
|
|
|
p0.start() |
|
|
|
p1.start() |
|
|
|
|
|
|
|
p0.join(10) |
|
|
|
p1.join(10) |
|
|
|
|
|
|
|
assert p0.exitcode == 0 and p1.exitcode == 0 |
|
|
|
data = (x, y) |
|
|
|
expect = (z, None) |
|
|
|
worker(data, expect) |
|
|
|
|
|
|
|
for shape in [(2, 3), (8, 10), (99, 77)]: |
|
|
|
check(shape) |
|
|
@@ -80,33 +69,22 @@ def test_reduce_sum(): |
|
|
|
@pytest.mark.skipif( |
|
|
|
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM" |
|
|
|
) |
|
|
|
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device") |
|
|
|
@pytest.mark.isolated_distributed |
|
|
|
def test_broadcast(): |
|
|
|
world_size = 2 |
|
|
|
server = dist.Server() |
|
|
|
port = server.py_server_port |
|
|
|
|
|
|
|
def worker(rank, data, expect, port): |
|
|
|
if mge.get_device_count("gpu") < world_size: |
|
|
|
return |
|
|
|
dist.init_process_group("localhost", port, world_size, rank, rank) |
|
|
|
inp = tensor(data) |
|
|
|
@dist.launcher(n_gpus=2) |
|
|
|
def worker(data, expect): |
|
|
|
rank = dist.get_rank() |
|
|
|
inp = tensor(data[rank]) |
|
|
|
output = broadcast(inp) |
|
|
|
assert np.allclose(output.numpy(), expect) |
|
|
|
assert np.allclose(output.numpy(), expect[rank]) |
|
|
|
|
|
|
|
def check(shape): |
|
|
|
x = np.random.rand(*shape).astype("float32") |
|
|
|
y = x + 1 |
|
|
|
p0 = mp.Process(target=worker, args=(0, x, x, port)) |
|
|
|
p1 = mp.Process(target=worker, args=(1, y, x, port)) |
|
|
|
|
|
|
|
p0.start() |
|
|
|
p1.start() |
|
|
|
|
|
|
|
p0.join(10) |
|
|
|
p1.join(10) |
|
|
|
|
|
|
|
assert p0.exitcode == 0 and p1.exitcode == 0 |
|
|
|
data = (x, y) |
|
|
|
expect = (x, x) |
|
|
|
worker(data, expect) |
|
|
|
|
|
|
|
for shape in [(2, 3), (8, 10), (99, 77)]: |
|
|
|
check(shape) |
|
|
@@ -118,34 +96,23 @@ def test_broadcast(): |
|
|
|
@pytest.mark.skipif( |
|
|
|
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM" |
|
|
|
) |
|
|
|
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device") |
|
|
|
@pytest.mark.isolated_distributed |
|
|
|
def test_all_gather(): |
|
|
|
world_size = 2 |
|
|
|
server = dist.Server() |
|
|
|
port = server.py_server_port |
|
|
|
|
|
|
|
def worker(rank, data, expect, port): |
|
|
|
if mge.get_device_count("gpu") < world_size: |
|
|
|
return |
|
|
|
dist.init_process_group("localhost", port, world_size, rank, rank) |
|
|
|
inp = tensor(data) |
|
|
|
@dist.launcher(n_gpus=2) |
|
|
|
def worker(data, expect): |
|
|
|
rank = dist.get_rank() |
|
|
|
inp = tensor(data[rank]) |
|
|
|
output = all_gather(inp) |
|
|
|
assert np.allclose(output.numpy(), expect) |
|
|
|
assert np.allclose(output.numpy(), expect[rank]) |
|
|
|
|
|
|
|
def check(shape): |
|
|
|
x = np.random.rand(*shape).astype("float32") |
|
|
|
y = np.random.rand(*shape).astype("float32") |
|
|
|
z = np.concatenate((x, y)) |
|
|
|
p0 = mp.Process(target=worker, args=(0, x, z, port)) |
|
|
|
p1 = mp.Process(target=worker, args=(1, y, z, port)) |
|
|
|
|
|
|
|
p0.start() |
|
|
|
p1.start() |
|
|
|
|
|
|
|
p0.join(10) |
|
|
|
p1.join(10) |
|
|
|
|
|
|
|
assert p0.exitcode == 0 and p1.exitcode == 0 |
|
|
|
data = (x, y) |
|
|
|
expect = (z, z) |
|
|
|
worker(data, expect) |
|
|
|
|
|
|
|
for shape in [(2, 3), (8, 10), (99, 77)]: |
|
|
|
check(shape) |
|
|
@@ -157,34 +124,23 @@ def test_all_gather(): |
|
|
|
@pytest.mark.skipif( |
|
|
|
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM" |
|
|
|
) |
|
|
|
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device") |
|
|
|
@pytest.mark.isolated_distributed |
|
|
|
def test_reduce_scatter_sum(): |
|
|
|
world_size = 2 |
|
|
|
server = dist.Server() |
|
|
|
port = server.py_server_port |
|
|
|
|
|
|
|
def worker(rank, data, expect, port): |
|
|
|
if mge.get_device_count("gpu") < world_size: |
|
|
|
return |
|
|
|
dist.init_process_group("localhost", port, world_size, rank, rank) |
|
|
|
inp = tensor(data) |
|
|
|
@dist.launcher(n_gpus=2) |
|
|
|
def worker(data, expect): |
|
|
|
rank = dist.get_rank() |
|
|
|
inp = tensor(data[rank]) |
|
|
|
output = reduce_scatter_sum(inp) |
|
|
|
assert np.allclose(output.numpy(), expect) |
|
|
|
assert np.allclose(output.numpy(), expect[rank]) |
|
|
|
|
|
|
|
def check(shape): |
|
|
|
x = np.random.rand(*shape).astype("float32") |
|
|
|
y = np.random.rand(*shape).astype("float32") |
|
|
|
z = x + y |
|
|
|
p0 = mp.Process(target=worker, args=(0, x, z[: shape[0] // 2], port)) |
|
|
|
p1 = mp.Process(target=worker, args=(1, y, z[shape[0] // 2 :], port)) |
|
|
|
|
|
|
|
p0.start() |
|
|
|
p1.start() |
|
|
|
|
|
|
|
p0.join(10) |
|
|
|
p1.join(10) |
|
|
|
|
|
|
|
assert p0.exitcode == 0 and p1.exitcode == 0 |
|
|
|
data = (x, y) |
|
|
|
expect = (z[: shape[0] // 2], z[shape[0] // 2 :]) |
|
|
|
worker(data, expect) |
|
|
|
|
|
|
|
for shape in [(2, 4), (8, 10), (88, 44)]: |
|
|
|
check(shape) |
|
|
@@ -196,34 +152,23 @@ def test_reduce_scatter_sum(): |
|
|
|
@pytest.mark.skipif( |
|
|
|
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM" |
|
|
|
) |
|
|
|
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device") |
|
|
|
@pytest.mark.isolated_distributed |
|
|
|
def test_all_reduce_sum(): |
|
|
|
world_size = 2 |
|
|
|
server = dist.Server() |
|
|
|
port = server.py_server_port |
|
|
|
|
|
|
|
def worker(rank, data, expect, port): |
|
|
|
if mge.get_device_count("gpu") < world_size: |
|
|
|
return |
|
|
|
dist.init_process_group("localhost", port, world_size, rank, rank) |
|
|
|
inp = tensor(data) |
|
|
|
@dist.launcher(n_gpus=2) |
|
|
|
def worker(data, expect): |
|
|
|
rank = dist.get_rank() |
|
|
|
inp = tensor(data[rank]) |
|
|
|
output = all_reduce_sum(inp) |
|
|
|
assert np.allclose(output.numpy(), expect) |
|
|
|
assert np.allclose(output.numpy(), expect[rank]) |
|
|
|
|
|
|
|
def check(shape): |
|
|
|
x = np.random.rand(*shape).astype("float32") |
|
|
|
y = np.random.rand(*shape).astype("float32") |
|
|
|
z = x + y |
|
|
|
p0 = mp.Process(target=worker, args=(0, x, z, port)) |
|
|
|
p1 = mp.Process(target=worker, args=(1, y, z, port)) |
|
|
|
|
|
|
|
p0.start() |
|
|
|
p1.start() |
|
|
|
|
|
|
|
p0.join(10) |
|
|
|
p1.join(10) |
|
|
|
|
|
|
|
assert p0.exitcode == 0 and p1.exitcode == 0 |
|
|
|
data = (x, y) |
|
|
|
expect = (z, z) |
|
|
|
worker(data, expect) |
|
|
|
|
|
|
|
for shape in [(2, 3), (8, 10), (99, 77)]: |
|
|
|
check(shape) |
|
|
@@ -235,34 +180,23 @@ def test_all_reduce_sum(): |
|
|
|
@pytest.mark.skipif( |
|
|
|
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM" |
|
|
|
) |
|
|
|
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device") |
|
|
|
@pytest.mark.isolated_distributed |
|
|
|
def test_all_reduce_max(): |
|
|
|
world_size = 2 |
|
|
|
server = dist.Server() |
|
|
|
port = server.py_server_port |
|
|
|
|
|
|
|
def worker(rank, data, expect, port): |
|
|
|
if mge.get_device_count("gpu") < world_size: |
|
|
|
return |
|
|
|
dist.init_process_group("localhost", port, world_size, rank, rank) |
|
|
|
inp = tensor(data) |
|
|
|
@dist.launcher(n_gpus=2) |
|
|
|
def worker(data, expect): |
|
|
|
rank = dist.get_rank() |
|
|
|
inp = tensor(data[rank]) |
|
|
|
output = all_reduce_max(inp) |
|
|
|
assert np.allclose(output.numpy(), expect) |
|
|
|
assert np.allclose(output.numpy(), expect[rank]) |
|
|
|
|
|
|
|
def check(shape): |
|
|
|
x = np.random.rand(*shape).astype("float32") |
|
|
|
y = np.random.rand(*shape).astype("float32") |
|
|
|
z = np.maximum(x, y) |
|
|
|
p0 = mp.Process(target=worker, args=(0, x, z, port)) |
|
|
|
p1 = mp.Process(target=worker, args=(1, y, z, port)) |
|
|
|
|
|
|
|
p0.start() |
|
|
|
p1.start() |
|
|
|
|
|
|
|
p0.join(10) |
|
|
|
p1.join(10) |
|
|
|
|
|
|
|
assert p0.exitcode == 0 and p1.exitcode == 0 |
|
|
|
data = (x, y) |
|
|
|
expect = (z, z) |
|
|
|
worker(data, expect) |
|
|
|
|
|
|
|
for shape in [(2, 3), (8, 10), (99, 77)]: |
|
|
|
check(shape) |
|
|
@@ -274,34 +208,23 @@ def test_all_reduce_max(): |
|
|
|
@pytest.mark.skipif( |
|
|
|
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM" |
|
|
|
) |
|
|
|
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device") |
|
|
|
@pytest.mark.isolated_distributed |
|
|
|
def test_all_reduce_min(): |
|
|
|
world_size = 2 |
|
|
|
server = dist.Server() |
|
|
|
port = server.py_server_port |
|
|
|
|
|
|
|
def worker(rank, data, expect, port): |
|
|
|
if mge.get_device_count("gpu") < world_size: |
|
|
|
return |
|
|
|
dist.init_process_group("localhost", port, world_size, rank, rank) |
|
|
|
inp = tensor(data) |
|
|
|
@dist.launcher(n_gpus=2) |
|
|
|
def worker(data, expect): |
|
|
|
rank = dist.get_rank() |
|
|
|
inp = tensor(data[rank]) |
|
|
|
output = all_reduce_min(inp) |
|
|
|
assert np.allclose(output.numpy(), expect) |
|
|
|
assert np.allclose(output.numpy(), expect[rank]) |
|
|
|
|
|
|
|
def check(shape): |
|
|
|
x = np.random.rand(*shape).astype("float32") |
|
|
|
y = np.random.rand(*shape).astype("float32") |
|
|
|
z = np.minimum(x, y) |
|
|
|
p0 = mp.Process(target=worker, args=(0, x, z, port)) |
|
|
|
p1 = mp.Process(target=worker, args=(1, y, z, port)) |
|
|
|
|
|
|
|
p0.start() |
|
|
|
p1.start() |
|
|
|
|
|
|
|
p0.join(10) |
|
|
|
p1.join(10) |
|
|
|
|
|
|
|
assert p0.exitcode == 0 and p1.exitcode == 0 |
|
|
|
data = (x, y) |
|
|
|
expect = (z, z) |
|
|
|
worker(data, expect) |
|
|
|
|
|
|
|
for shape in [(2, 3), (8, 10), (99, 77)]: |
|
|
|
check(shape) |
|
|
@@ -313,20 +236,16 @@ def test_all_reduce_min(): |
|
|
|
@pytest.mark.skipif( |
|
|
|
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM" |
|
|
|
) |
|
|
|
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device") |
|
|
|
@pytest.mark.isolated_distributed |
|
|
|
def test_gather(): |
|
|
|
world_size = 2 |
|
|
|
server = dist.Server() |
|
|
|
port = server.py_server_port |
|
|
|
|
|
|
|
def worker(rank, data, expect, port): |
|
|
|
if mge.get_device_count("gpu") < world_size: |
|
|
|
return |
|
|
|
dist.init_process_group("localhost", port, world_size, rank, rank) |
|
|
|
inp = tensor(data) |
|
|
|
@dist.launcher(n_gpus=2) |
|
|
|
def worker(data, expect): |
|
|
|
rank = dist.get_rank() |
|
|
|
inp = tensor(data[rank]) |
|
|
|
output = gather(inp) |
|
|
|
if rank == 0: |
|
|
|
assert np.allclose(output.numpy(), expect) |
|
|
|
assert np.allclose(output.numpy(), expect[rank]) |
|
|
|
else: |
|
|
|
assert np.allclose(output.numpy(), 0) |
|
|
|
|
|
|
@@ -334,16 +253,9 @@ def test_gather(): |
|
|
|
x = np.random.rand(*shape).astype("float32") |
|
|
|
y = np.random.rand(*shape).astype("float32") |
|
|
|
z = np.concatenate((x, y)) |
|
|
|
p0 = mp.Process(target=worker, args=(0, x, z, port)) |
|
|
|
p1 = mp.Process(target=worker, args=(1, y, None, port)) |
|
|
|
|
|
|
|
p0.start() |
|
|
|
p1.start() |
|
|
|
|
|
|
|
p0.join(10) |
|
|
|
p1.join(10) |
|
|
|
|
|
|
|
assert p0.exitcode == 0 and p1.exitcode == 0 |
|
|
|
data = (x, y) |
|
|
|
expect = (z, None) |
|
|
|
worker(data, expect) |
|
|
|
|
|
|
|
for shape in [(2, 3), (8, 10), (99, 77)]: |
|
|
|
check(shape) |
|
|
@@ -355,33 +267,22 @@ def test_gather(): |
|
|
|
@pytest.mark.skipif( |
|
|
|
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM" |
|
|
|
) |
|
|
|
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device") |
|
|
|
@pytest.mark.isolated_distributed |
|
|
|
def test_scatter(): |
|
|
|
world_size = 2 |
|
|
|
server = dist.Server() |
|
|
|
port = server.py_server_port |
|
|
|
|
|
|
|
def worker(rank, data, expect, port): |
|
|
|
if mge.get_device_count("gpu") < world_size: |
|
|
|
return |
|
|
|
dist.init_process_group("localhost", port, world_size, rank, rank) |
|
|
|
inp = tensor(data) |
|
|
|
@dist.launcher(n_gpus=2) |
|
|
|
def worker(data, expect): |
|
|
|
rank = dist.get_rank() |
|
|
|
inp = tensor(data[rank]) |
|
|
|
output = scatter(inp) |
|
|
|
assert np.allclose(output.numpy(), expect) |
|
|
|
assert np.allclose(output.numpy(), expect[rank]) |
|
|
|
|
|
|
|
def check(shape): |
|
|
|
x = np.random.rand(*shape).astype("float32") |
|
|
|
y = x + 1 |
|
|
|
p0 = mp.Process(target=worker, args=(0, x, x[: shape[0] // 2], port)) |
|
|
|
p1 = mp.Process(target=worker, args=(1, y, x[shape[0] // 2 :], port)) |
|
|
|
|
|
|
|
p0.start() |
|
|
|
p1.start() |
|
|
|
|
|
|
|
p0.join(10) |
|
|
|
p1.join(10) |
|
|
|
|
|
|
|
assert p0.exitcode == 0 and p1.exitcode == 0 |
|
|
|
data = (x, y) |
|
|
|
expect = (x[: shape[0] // 2], x[shape[0] // 2 :]) |
|
|
|
worker(data, expect) |
|
|
|
|
|
|
|
for shape in [(2, 3), (8, 10), (100, 77)]: |
|
|
|
check(shape) |
|
|
@@ -393,35 +294,24 @@ def test_scatter(): |
|
|
|
@pytest.mark.skipif( |
|
|
|
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM" |
|
|
|
) |
|
|
|
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device") |
|
|
|
@pytest.mark.isolated_distributed |
|
|
|
def test_all_to_all(): |
|
|
|
world_size = 2 |
|
|
|
server = dist.Server() |
|
|
|
port = server.py_server_port |
|
|
|
|
|
|
|
def worker(rank, data, expect, port): |
|
|
|
if mge.get_device_count("gpu") < world_size: |
|
|
|
return |
|
|
|
dist.init_process_group("localhost", port, world_size, rank, rank) |
|
|
|
inp = tensor(data) |
|
|
|
@dist.launcher(n_gpus=2) |
|
|
|
def worker(data, expect): |
|
|
|
rank = dist.get_rank() |
|
|
|
inp = tensor(data[rank]) |
|
|
|
output = all_to_all(inp) |
|
|
|
assert np.allclose(output.numpy(), expect) |
|
|
|
assert np.allclose(output.numpy(), expect[rank]) |
|
|
|
|
|
|
|
def check(shape): |
|
|
|
x = np.random.rand(*shape).astype("float32") |
|
|
|
y = np.random.rand(*shape).astype("float32") |
|
|
|
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2])) |
|
|
|
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :])) |
|
|
|
p0 = mp.Process(target=worker, args=(0, x, a, port)) |
|
|
|
p1 = mp.Process(target=worker, args=(1, y, b, port)) |
|
|
|
|
|
|
|
p0.start() |
|
|
|
p1.start() |
|
|
|
|
|
|
|
p0.join(10) |
|
|
|
p1.join(10) |
|
|
|
|
|
|
|
assert p0.exitcode == 0 and p1.exitcode == 0 |
|
|
|
data = (x, y) |
|
|
|
expect = (a, b) |
|
|
|
worker(data, expect) |
|
|
|
|
|
|
|
for shape in [(2, 3), (8, 10), (100, 77)]: |
|
|
|
check(shape) |
|
|
@@ -433,33 +323,21 @@ def test_all_to_all(): |
|
|
|
@pytest.mark.skipif( |
|
|
|
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM" |
|
|
|
) |
|
|
|
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device") |
|
|
|
@pytest.mark.isolated_distributed |
|
|
|
def test_io_remote(): |
|
|
|
world_size = 2 |
|
|
|
server = dist.Server() |
|
|
|
port = server.py_server_port |
|
|
|
val = np.random.rand(4, 5).astype(np.float32) |
|
|
|
|
|
|
|
def worker(rank): |
|
|
|
if mge.get_device_count("gpu") < world_size: |
|
|
|
return |
|
|
|
@dist.launcher(n_gpus=2) |
|
|
|
def worker(): |
|
|
|
rank = dist.get_rank() |
|
|
|
if rank == 0: # remote send |
|
|
|
dist.init_process_group("localhost", port, world_size, rank, rank) |
|
|
|
x = Tensor(val, device="gpu0") |
|
|
|
y = remote_send(x, 1) |
|
|
|
assert y.numpy()[0] == 0 |
|
|
|
else: # remote recv |
|
|
|
dist.init_process_group("localhost", port, world_size, rank, rank) |
|
|
|
y = remote_recv(0, val.shape, val.dtype) |
|
|
|
assert y.device == "gpu1" |
|
|
|
np.testing.assert_almost_equal(val, y.numpy()) |
|
|
|
|
|
|
|
procs = [] |
|
|
|
for rank in range(world_size): |
|
|
|
p = mp.Process(target=worker, args=(rank,)) |
|
|
|
p.start() |
|
|
|
procs.append(p) |
|
|
|
|
|
|
|
for p in procs: |
|
|
|
p.join(10) |
|
|
|
assert p.exitcode == 0 |
|
|
|
worker() |