diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.cu b/paddle/fluid/operators/softmax_with_cross_entropy_op.cu index d00349e943b31327f515e409493137a9bcf20716..89aaac4cbe6399af08b3d340896df7a07e1be543 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.cu +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.cu @@ -404,7 +404,7 @@ class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel { int batch_size = logits->dims()[0]; int feature_size = logits->dims()[1]; auto* logits_data = logits->data(); - auto* labels_data = labels->data(); + auto* labels_data = labels->data(); SoftmaxWithCrossEntropyFusedKernel( logits_data, labels_data, softmax_data, loss_data, batch_size, feature_size, context.cuda_device_context().stream()); diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py index e1996e4fcef2765c4db4b170b996692d9fa33418..178e6cd48613cf25ea3026c6bf097b9b886e11bf 100644 --- a/python/paddle/fluid/dygraph/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -563,7 +563,7 @@ class LayerNorm(layers.Layer): >>> x = fluid.layers.layer_norm(input=data, begin_norm_axis=1) """ - super(LayerNorm, self).__init__(name_scope, dtype) + super(LayerNorm, self).__init__(name_scope) self._scale = scale self._shift = shift self._begin_norm_axis = begin_norm_axis @@ -840,7 +840,7 @@ class NCE(layers.Layer): custom_dist=None, seed=0, is_sparse=False): - super(NCE, self).__init__(name_scope, dtype) + super(NCE, self).__init__(name_scope) self._param_attr = param_attr self._bias_attr = bias_attr self._num_total_classes = num_total_classes @@ -1013,7 +1013,7 @@ class PRelu(layers.Layer): def __init__(self, name_scope, mode, param_attr=None): - super(PRelu, self).__init__(name_scope, dtype) + super(PRelu, self).__init__(name_scope) self._mode = mode self._param_attr = param_attr if self._mode not in ['all', 'channel', 'element']: @@ -1090,7 +1090,7 @@ class BilinearTensorProduct(layers.Layer): act=None, param_attr=None, bias_attr=None): - super(BilinearTensorProduct, self).__init__(name_scope, dtype) + super(BilinearTensorProduct, self).__init__(name_scope) self._param_attr = param_attr self._bias_attr = bias_attr self._act = act @@ -1260,7 +1260,7 @@ class Conv2DTranspose(layers.Layer): bias_attr=None, use_cudnn=True, act=None): - super(Conv2DTranspose, self).__init__(name_scope, dtype) + super(Conv2DTranspose, self).__init__(name_scope) assert param_attr is not False, "param_attr should not be False in conv2d_transpose." self._param_attr = param_attr self._bias_attr = bias_attr @@ -1388,7 +1388,7 @@ class SequenceConv(layers.Layer): bias_attr=None, param_attr=None, act=None): - super(SequenceConv, self).__init__(name_scope, dtype) + super(SequenceConv, self).__init__(name_scope) self._num_filters = num_filters self._filter_size = filter_size self._filter_stride = filter_stride diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 954e822e6e2c0d6766be181d3ce3ce3a44fef2cd..25fe2171a05c3ab173e9845a5ceaff834e0c8dce 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -672,9 +672,8 @@ class TestBook(LayerTest): def make_sampled_softmax_with_cross_entropy(self): with program_guard(fluid.default_main_program(), fluid.default_startup_program()): - logits = self._get_data(name='Logits', shape=[256], dtype='float64') - print(logits.dtype) - label = self._get_data(name='Label', shape=[1], dtype='int64') + logits = self._get_data(name='Logits', shape=[256], dtype='float32') + label = self._get_data(name='Label', shape=[1], dtype='int32') num_samples = 25 output = layers.sampled_softmax_with_cross_entropy(logits, label, num_samples)