未验证 提交 0b39b244 编写于 作者: G Ghost Screaming 提交者: GitHub

Support BF16 training for sharding (#46846)

* Fix bug of reduce_sum op. When input.numel() > INT32_MAX, its result
is wrong.

* support pure bfloat16

* support bf16 linear

* update PR to pass CI

* tiny fix where_grad_kernel.cu

* Support bfloat16 type for reducer and sharding.

* Fix some bug.

* Polish code.

* Polise code.

* Add bfloat16 datatype in fill_grad kernels.
Co-authored-by: Nsneaxiy <sneaxiy@126.com>
上级 7c6835ca
...@@ -254,6 +254,10 @@ static void ConcatTensorsWithType( ...@@ -254,6 +254,10 @@ static void ConcatTensorsWithType(
ConcatTensorsForAllReduce<DeviceContext, double>()( ConcatTensorsForAllReduce<DeviceContext, double>()(
context, dense_tensors_, p_dense_contents); context, dense_tensors_, p_dense_contents);
break; break;
case phi::DataType::BFLOAT16:
ConcatTensorsForAllReduce<DeviceContext, platform::bfloat16>()(
context, dense_tensors_, p_dense_contents);
break;
default: default:
PADDLE_THROW(platform::errors::Unimplemented( PADDLE_THROW(platform::errors::Unimplemented(
"Data type (%s) is not supported when it concats tensors for " "Data type (%s) is not supported when it concats tensors for "
...@@ -281,6 +285,10 @@ static void SplitTensorsWithType(const DeviceContext &context, ...@@ -281,6 +285,10 @@ static void SplitTensorsWithType(const DeviceContext &context,
SplitTensorsForAllReduce<DeviceContext, double>()( SplitTensorsForAllReduce<DeviceContext, double>()(
context, p_dense_contents, p_dense_tensors); context, p_dense_contents, p_dense_tensors);
break; break;
case phi::DataType::BFLOAT16:
SplitTensorsForAllReduce<DeviceContext, platform::bfloat16>()(
context, p_dense_contents, p_dense_tensors);
break;
default: default:
PADDLE_THROW(platform::errors::Unimplemented( PADDLE_THROW(platform::errors::Unimplemented(
"Data type (%s) is not supported when it splits tensors for " "Data type (%s) is not supported when it splits tensors for "
......
...@@ -26,4 +26,5 @@ PD_REGISTER_KERNEL(fill_grad, ...@@ -26,4 +26,5 @@ PD_REGISTER_KERNEL(fill_grad,
int64_t, int64_t,
int, int,
paddle::platform::float16, paddle::platform::float16,
paddle::platform::bfloat16,
bool) {} bool) {}
...@@ -26,4 +26,5 @@ PD_REGISTER_KERNEL(fill, ...@@ -26,4 +26,5 @@ PD_REGISTER_KERNEL(fill,
int64_t, int64_t,
int, int,
paddle::platform::float16, paddle::platform::float16,
paddle::platform::bfloat16,
bool) {} bool) {}
...@@ -27,4 +27,5 @@ PD_REGISTER_KERNEL(fill_grad, ...@@ -27,4 +27,5 @@ PD_REGISTER_KERNEL(fill_grad,
int64_t, int64_t,
int, int,
paddle::platform::float16, paddle::platform::float16,
paddle::platform::bfloat16,
bool) {} bool) {}
...@@ -27,4 +27,5 @@ PD_REGISTER_KERNEL(fill, ...@@ -27,4 +27,5 @@ PD_REGISTER_KERNEL(fill,
int64_t, int64_t,
int, int,
paddle::platform::float16, paddle::platform::float16,
paddle::platform::bfloat16,
bool) {} bool) {}
...@@ -40,6 +40,7 @@ from .group_sharded_utils import Type, device_guard, GroupShardedClipGrad ...@@ -40,6 +40,7 @@ from .group_sharded_utils import Type, device_guard, GroupShardedClipGrad
alignment = {"gpu": 256, "cpu": 4096} alignment = {"gpu": 256, "cpu": 4096}
align = { align = {
Type.fp16.value: 2, Type.fp16.value: 2,
Type.bf16.value: 2,
Type.fp32.value: 4, Type.fp32.value: 4,
} }
......
...@@ -555,6 +555,12 @@ class GroupShardedStage2(nn.Layer): ...@@ -555,6 +555,12 @@ class GroupShardedStage2(nn.Layer):
"====== FP16 GradStorage size: {:.2f}M parameters, Model size {:.2f}M parameters ======" "====== FP16 GradStorage size: {:.2f}M parameters, Model size {:.2f}M parameters ======"
.format(rank_buffer_size[Type.fp16.value] / 2**19, .format(rank_buffer_size[Type.fp16.value] / 2**19,
model_size / 2**19)) model_size / 2**19))
if Type.bf16.value in rank_buffer_size.keys():
# FP16 GradStorage and model size
logger_.info(
"====== BF16 GradStorage size: {:.2f}M parameters, Model size {:.2f}M parameters ======"
.format(rank_buffer_size[Type.bf16.value] / 2**19,
model_size / 2**19))
if Type.fp32.value in rank_buffer_size.keys(): if Type.fp32.value in rank_buffer_size.keys():
# FP32 GradStorage and model size # FP32 GradStorage and model size
logger_.info( logger_.info(
......
...@@ -51,6 +51,8 @@ class InternalStorage: ...@@ -51,6 +51,8 @@ class InternalStorage:
dtype=np.float16) if Type.fp16.value == dtype else np.zeros( dtype=np.float16) if Type.fp16.value == dtype else np.zeros(
size, dtype=np.float32) size, dtype=np.float32)
self.buffer = core.eager.Tensor(value=value, place=core.CPUPlace()) self.buffer = core.eager.Tensor(value=value, place=core.CPUPlace())
if dtype == Type.bf16.value:
self.buffer = paddle.cast(self.buffer, dtype=paddle.bfloat16)
else: else:
self.buffer = paddle.zeros(size, dtype=dtype) self.buffer = paddle.zeros(size, dtype=dtype)
......
...@@ -40,6 +40,7 @@ class Type(Enum): ...@@ -40,6 +40,7 @@ class Type(Enum):
Type of trainable parameters Type of trainable parameters
""" """
fp16 = paddle.float16 fp16 = paddle.float16
bf16 = paddle.bfloat16
fp32 = paddle.float32 fp32 = paddle.float32
......
...@@ -41,6 +41,7 @@ class Type(Enum): ...@@ -41,6 +41,7 @@ class Type(Enum):
Type of trainable parameters Type of trainable parameters
""" """
fp16 = paddle.float16 fp16 = paddle.float16
bf16 = paddle.bfloat16
fp32 = paddle.float32 fp32 = paddle.float32
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册