提交 497395d3 编写于 作者: N niuliling123

modified for SD Layer

上级 7849d58d
...@@ -171,6 +171,11 @@ class Linear(Layer): ...@@ -171,6 +171,11 @@ class Linear(Layer):
self.name = name self.name = name
def forward(self, input): def forward(self, input):
with paddle.amp.auto_cast(custom_white_list={'elementwise_add','fused_gemm_epilogue'}, dtype='bfloat16'):
out = paddle.incubate.nn.functional.fused_linear(
x=input, weight=self.weight, bias=self.bias, name=self.name
)
return out
out = F.linear( out = F.linear(
x=input, weight=self.weight, bias=self.bias, name=self.name x=input, weight=self.weight, bias=self.bias, name=self.name
) )
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
# TODO: define classes of convolutional neural network # TODO: define classes of convolutional neural network
import numpy as np import numpy as np
import paddle
from paddle import get_flags from paddle import get_flags
from ...device import ( from ...device import (
...@@ -704,6 +704,7 @@ class Conv2D(_ConvNd): ...@@ -704,6 +704,7 @@ class Conv2D(_ConvNd):
) )
def forward(self, x): def forward(self, x):
with paddle.amp.auto_cast(custom_white_list={'elementwise_add'}, level='O1', dtype='bfloat16'):
if self._padding_mode != 'zeros': if self._padding_mode != 'zeros':
x = F.pad( x = F.pad(
x, x,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册