diff --git a/python/paddle/optimizer/optimizer.py b/python/paddle/optimizer/optimizer.py index 2c2f6f1ce7e14ff5960a4bf492e9cf9158ac727c..cb602ff0b3754f82fb2ef9d8b78de18e46d56778 100644 --- a/python/paddle/optimizer/optimizer.py +++ b/python/paddle/optimizer/optimizer.py @@ -21,6 +21,7 @@ from collections import defaultdict from paddle.fluid.distribute_lookup_table import find_distributed_lookup_table from paddle.fluid.framework import Program, Variable, name_scope, default_main_program, default_startup_program, device_guard +import paddle from ..fluid import framework from ..fluid import layers @@ -308,7 +309,8 @@ class Optimizer(object): name=unique_name.generate("learning_rate"), shape=[1], value=float(self._learning_rate), - dtype='float32' if self._dtype is None else self._dtype, + dtype=paddle.get_default_dtype() + if self._dtype is None else self._dtype, persistable=True) # get learning rate Tensor from LearningRateDecay elif isinstance(self._learning_rate, LearningRateDecay): @@ -336,7 +338,8 @@ class Optimizer(object): name=unique_name.generate("learning_rate"), shape=[1], value=float(self._learning_rate), - dtype='float32' if self._dtype is None else self._dtype, + dtype=paddle.get_default_dtype() + if self._dtype is None else self._dtype, persistable=True) @framework.dygraph_only diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 4c7eef5fa65108b35cf9792280eeb6c1e7ddf3fc..9dfb31a5ac25b2afc9fe52bfc8bab5ad277d80b8 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -2090,6 +2090,7 @@ def tanh(x, name=None): return core.ops.tanh(x) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'tanh') + check_type(x, 'x', (Variable), 'tanh') helper = LayerHelper('tanh', **locals()) out = helper.create_variable_for_type_inference(x.dtype) helper.append_op(type='tanh', inputs={'X': x}, outputs={'Out': out})