未验证 提交 5c85f1a7 编写于 作者: G Ghost Screaming 提交者: GitHub

Support BF16 training for sharding (#46846) (#47246)

* Fix bug of reduce_sum op. When input.numel() > INT32_MAX, its result
is wrong.

* support pure bfloat16

* support bf16 linear

* update PR to pass CI

* tiny fix where_grad_kernel.cu

* Support bfloat16 type for reducer and sharding.

* Fix some bug.

* Polish code.

* Polise code.

* Add bfloat16 datatype in fill_grad kernels.
Co-authored-by: Nsneaxiy <sneaxiy@126.com>
Co-authored-by: Nsneaxiy <sneaxiy@126.com>
上级 82f1e1b7
......@@ -254,6 +254,10 @@ static void ConcatTensorsWithType(
ConcatTensorsForAllReduce<DeviceContext, double>()(
context, dense_tensors_, p_dense_contents);
break;
case phi::DataType::BFLOAT16:
ConcatTensorsForAllReduce<DeviceContext, platform::bfloat16>()(
context, dense_tensors_, p_dense_contents);
break;
default:
PADDLE_THROW(platform::errors::Unimplemented(
"Data type (%s) is not supported when it concats tensors for "
......@@ -281,6 +285,10 @@ static void SplitTensorsWithType(const DeviceContext &context,
SplitTensorsForAllReduce<DeviceContext, double>()(
context, p_dense_contents, p_dense_tensors);
break;
case phi::DataType::BFLOAT16:
SplitTensorsForAllReduce<DeviceContext, platform::bfloat16>()(
context, p_dense_contents, p_dense_tensors);
break;
default:
PADDLE_THROW(platform::errors::Unimplemented(
"Data type (%s) is not supported when it splits tensors for "
......
......@@ -26,4 +26,5 @@ PD_REGISTER_KERNEL(fill_grad,
int64_t,
int,
paddle::platform::float16,
paddle::platform::bfloat16,
bool) {}
......@@ -26,4 +26,5 @@ PD_REGISTER_KERNEL(fill,
int64_t,
int,
paddle::platform::float16,
paddle::platform::bfloat16,
bool) {}
......@@ -27,4 +27,5 @@ PD_REGISTER_KERNEL(fill_grad,
int64_t,
int,
paddle::platform::float16,
paddle::platform::bfloat16,
bool) {}
......@@ -27,4 +27,5 @@ PD_REGISTER_KERNEL(fill,
int64_t,
int,
paddle::platform::float16,
paddle::platform::bfloat16,
bool) {}
......@@ -43,6 +43,7 @@ from .group_sharded_utils import Type, device_guard, GroupShardedClipGrad
alignment = {"gpu": 256, "cpu": 4096}
align = {
Type.fp16.value: 2,
Type.bf16.value: 2,
Type.fp32.value: 4,
}
......
......@@ -532,6 +532,12 @@ class GroupShardedStage2(nn.Layer):
"====== FP16 GradStorage size: {:.2f}M parameters, Model size {:.2f}M parameters ======"
.format(rank_buffer_size[Type.fp16.value] / 2**19,
model_size / 2**19))
if Type.bf16.value in rank_buffer_size.keys():
# FP16 GradStorage and model size
logger_.info(
"====== BF16 GradStorage size: {:.2f}M parameters, Model size {:.2f}M parameters ======"
.format(rank_buffer_size[Type.bf16.value] / 2**19,
model_size / 2**19))
if Type.fp32.value in rank_buffer_size.keys():
# FP32 GradStorage and model size
logger_.info(
......
......@@ -53,6 +53,8 @@ class InternalStorage:
dtype=np.float16) if Type.fp16.value == dtype else np.zeros(
size, dtype=np.float32)
self.buffer = core.eager.Tensor(value=value, place=core.CPUPlace())
if dtype == Type.bf16.value:
self.buffer = paddle.cast(self.buffer, dtype=paddle.bfloat16)
else:
self.buffer = paddle.zeros(size, dtype=dtype)
......
......@@ -41,6 +41,7 @@ class Type(Enum):
Type of trainable parameters
"""
fp16 = paddle.float16
bf16 = paddle.bfloat16
fp32 = paddle.float32
......
......@@ -45,6 +45,7 @@ class Type(Enum):
Type of trainable parameters
"""
fp16 = paddle.float16
bf16 = paddle.bfloat16
fp32 = paddle.float32
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册