未验证 提交 c33ba9d4 编写于 作者: zhenhailiu's avatar zhenhailiu 提交者: GitHub

Fix some tests for issuse 52842 (#53795)

* polish

* polish
上级 69161a96
......@@ -78,8 +78,8 @@ void BindTCPStore(py::module *m) {
[](phi::distributed::Store &self,
const std::string &key) -> py::bytes {
auto data = self.get(key);
return py::bytes(reinterpret_cast<char *>(data.data()),
data.size());
std::string s(data.begin(), data.end());
return py::bytes(s);
},
py::arg("key"),
py::call_guard<py::gil_scoped_release>())
......
......@@ -81,6 +81,7 @@ def _reduce_scatter_in_static_mode(tensor, tensor_or_tensor_list, group):
'int8',
'uint8',
'bool',
'uint16',
],
op_type,
)
......
......@@ -43,6 +43,7 @@ def all_gather_new(tensor_list, tensor, group=None):
'bool',
'int8',
'uint8',
'uint16',
],
op_type,
)
......@@ -58,6 +59,7 @@ def all_gather_new(tensor_list, tensor, group=None):
'bool',
'int8',
'uint8',
'uint16',
],
op_type,
)
......
......@@ -36,6 +36,7 @@ def all_reduce_new(tensor, reduce_type=str(dist.ReduceOp.SUM), group=None):
'int8',
'uint8',
'bool',
'uint16',
],
op_type,
)
......
......@@ -35,6 +35,7 @@ def broadcast_new(tensor, src, group=None, sync_op=True):
'int8',
'uint8',
'bool',
'uint16',
],
op_type,
)
......
......@@ -36,6 +36,7 @@ def reduce_new(tensor, dst, reduce_type=str(dist.ReduceOp.SUM), group=None):
'int8',
'uint8',
'bool',
'uint16',
],
op_type,
)
......
......@@ -35,6 +35,7 @@ def send_new(tensor, dst, group=None, sync_op=True):
'int8',
'uint8',
'bool',
'uint16',
],
op_type,
)
......@@ -67,6 +68,7 @@ def recv_new(tensor, src, group=None, sync_op=True, dtype='float32'):
'int8',
'uint8',
'bool',
'uint16',
],
op_type,
)
......
......@@ -22,7 +22,7 @@ import unittest
from contextlib import closing
import numpy as np
from paddle_bfloat import bfloat16
from eager_op_test import convert_float_to_uint16, convert_uint16_to_float
import paddle
import paddle.distributed as dist
......@@ -45,6 +45,14 @@ def create_float_test_data(shape=None, dtype=None, seed=None):
return data
def create_bfloat16_test_data(shape=None, seed=None):
if seed:
np.random.seed(seed)
data = np.random.uniform(-100.0, 100.0, shape).astype("float32")
data = convert_float_to_uint16(data)
return data
def create_int_test_data(shape=None, dtype=None, seed=None):
if seed:
np.random.seed(seed)
......@@ -76,8 +84,9 @@ def create_test_data(shape=None, dtype=None, seed=None):
if dtype == "float32" or dtype == "float16" or dtype == "float64":
return create_float_test_data(shape=shape, dtype=dtype, seed=seed)
elif dtype == "bfloat16":
return create_bfloat16_test_data(shape=shape, seed=seed)
# since numpy does not support bfloat16 yet, use `paddle_bfloat` to replace
return create_float_test_data(shape=shape, dtype=bfloat16, seed=seed)
# return create_float_test_data(shape=shape, dtype=bfloat16, seed=seed)
elif dtype == "bool":
return create_bool_test_data(shape=shape, seed=seed)
elif (
......@@ -344,8 +353,18 @@ class TestDistBase(unittest.TestCase):
input2 = create_test_data(shape=(10, 1000), dtype=dtype, seed=pid1)
# cast bfloat16 to float32 for numeric comparison
if dtype == "bfloat16":
input1 = input1.astype("float32")
input2 = input2.astype("float32")
def convertbf16(origin):
if origin.dtype == np.uint16:
return convert_uint16_to_float(origin)
else:
return origin.astype("float32")
input1 = convertbf16(input1)
input2 = convertbf16(input2)
tr0_out = [convertbf16(e) for e in tr0_out]
tr1_out = [convertbf16(e) for e in tr1_out]
if col_type == "allgather":
need_result = np.vstack((input1, input2))
tr_out0 = np.vstack((tr0_out[0], tr0_out[1]))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册