未验证 提交 724d49da 编写于 作者: 0 0x45f 提交者: GitHub

[Dy2St]close enable_inplace PASS for PE and open test_mnist_pure_fp16.py for windows (#38752)

* close enable_inplace PASS for PE, and test dy2st pure fp16 training stability

* add some comment

* enlarge atol
上级 f81569e3
......@@ -33,7 +33,7 @@ class TestPureFP16(TestMNIST):
return self.train(to_static=False)
def test_mnist_to_static(self):
if paddle.fluid.is_compiled_with_cuda() and os.name != 'nt':
if paddle.fluid.is_compiled_with_cuda():
dygraph_loss = self.train_dygraph()
static_loss = self.train_static()
# NOTE: In pure fp16 training, loss is not stable, so we enlarge atol here.
......@@ -52,7 +52,11 @@ class TestPureFP16(TestMNIST):
if to_static:
print("Successfully to apply @to_static.")
mnist = paddle.jit.to_static(mnist)
build_strategy = paddle.static.BuildStrategy()
# Why set `build_strategy.enable_inplace = False` here?
# Because we find that this PASS strategy of PE makes dy2st training loss unstable.
build_strategy.enable_inplace = False
mnist = paddle.jit.to_static(mnist, build_strategy=build_strategy)
optimizer = paddle.optimizer.Adam(
learning_rate=0.001, parameters=mnist.parameters())
......
......@@ -106,7 +106,11 @@ def train(to_static, build_strategy=None):
class TestResnet(unittest.TestCase):
def train(self, to_static):
program_translator.enable(to_static)
return train(to_static)
build_strategy = paddle.static.BuildStrategy()
# Why set `build_strategy.enable_inplace = False` here?
# Because we find that this PASS strategy of PE makes dy2st training loss unstable.
build_strategy.enable_inplace = False
return train(to_static, build_strategy)
def test_resnet(self):
if fluid.is_compiled_with_cuda():
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册