提交 61fe139f 编写于 作者: M minqiyang

Polish code

上级 e377d759
...@@ -404,7 +404,7 @@ class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel<T> { ...@@ -404,7 +404,7 @@ class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel<T> {
int batch_size = logits->dims()[0]; int batch_size = logits->dims()[0];
int feature_size = logits->dims()[1]; int feature_size = logits->dims()[1];
auto* logits_data = logits->data<T>(); auto* logits_data = logits->data<T>();
auto* labels_data = labels->data<int64_t>(); auto* labels_data = labels->data<T>();
SoftmaxWithCrossEntropyFusedKernel( SoftmaxWithCrossEntropyFusedKernel(
logits_data, labels_data, softmax_data, loss_data, batch_size, logits_data, labels_data, softmax_data, loss_data, batch_size,
feature_size, context.cuda_device_context().stream()); feature_size, context.cuda_device_context().stream());
......
...@@ -563,7 +563,7 @@ class LayerNorm(layers.Layer): ...@@ -563,7 +563,7 @@ class LayerNorm(layers.Layer):
>>> x = fluid.layers.layer_norm(input=data, begin_norm_axis=1) >>> x = fluid.layers.layer_norm(input=data, begin_norm_axis=1)
""" """
super(LayerNorm, self).__init__(name_scope, dtype) super(LayerNorm, self).__init__(name_scope)
self._scale = scale self._scale = scale
self._shift = shift self._shift = shift
self._begin_norm_axis = begin_norm_axis self._begin_norm_axis = begin_norm_axis
...@@ -840,7 +840,7 @@ class NCE(layers.Layer): ...@@ -840,7 +840,7 @@ class NCE(layers.Layer):
custom_dist=None, custom_dist=None,
seed=0, seed=0,
is_sparse=False): is_sparse=False):
super(NCE, self).__init__(name_scope, dtype) super(NCE, self).__init__(name_scope)
self._param_attr = param_attr self._param_attr = param_attr
self._bias_attr = bias_attr self._bias_attr = bias_attr
self._num_total_classes = num_total_classes self._num_total_classes = num_total_classes
...@@ -1013,7 +1013,7 @@ class PRelu(layers.Layer): ...@@ -1013,7 +1013,7 @@ class PRelu(layers.Layer):
def __init__(self, name_scope, mode, param_attr=None): def __init__(self, name_scope, mode, param_attr=None):
super(PRelu, self).__init__(name_scope, dtype) super(PRelu, self).__init__(name_scope)
self._mode = mode self._mode = mode
self._param_attr = param_attr self._param_attr = param_attr
if self._mode not in ['all', 'channel', 'element']: if self._mode not in ['all', 'channel', 'element']:
...@@ -1090,7 +1090,7 @@ class BilinearTensorProduct(layers.Layer): ...@@ -1090,7 +1090,7 @@ class BilinearTensorProduct(layers.Layer):
act=None, act=None,
param_attr=None, param_attr=None,
bias_attr=None): bias_attr=None):
super(BilinearTensorProduct, self).__init__(name_scope, dtype) super(BilinearTensorProduct, self).__init__(name_scope)
self._param_attr = param_attr self._param_attr = param_attr
self._bias_attr = bias_attr self._bias_attr = bias_attr
self._act = act self._act = act
...@@ -1260,7 +1260,7 @@ class Conv2DTranspose(layers.Layer): ...@@ -1260,7 +1260,7 @@ class Conv2DTranspose(layers.Layer):
bias_attr=None, bias_attr=None,
use_cudnn=True, use_cudnn=True,
act=None): act=None):
super(Conv2DTranspose, self).__init__(name_scope, dtype) super(Conv2DTranspose, self).__init__(name_scope)
assert param_attr is not False, "param_attr should not be False in conv2d_transpose." assert param_attr is not False, "param_attr should not be False in conv2d_transpose."
self._param_attr = param_attr self._param_attr = param_attr
self._bias_attr = bias_attr self._bias_attr = bias_attr
...@@ -1388,7 +1388,7 @@ class SequenceConv(layers.Layer): ...@@ -1388,7 +1388,7 @@ class SequenceConv(layers.Layer):
bias_attr=None, bias_attr=None,
param_attr=None, param_attr=None,
act=None): act=None):
super(SequenceConv, self).__init__(name_scope, dtype) super(SequenceConv, self).__init__(name_scope)
self._num_filters = num_filters self._num_filters = num_filters
self._filter_size = filter_size self._filter_size = filter_size
self._filter_stride = filter_stride self._filter_stride = filter_stride
......
...@@ -672,9 +672,8 @@ class TestBook(LayerTest): ...@@ -672,9 +672,8 @@ class TestBook(LayerTest):
def make_sampled_softmax_with_cross_entropy(self): def make_sampled_softmax_with_cross_entropy(self):
with program_guard(fluid.default_main_program(), with program_guard(fluid.default_main_program(),
fluid.default_startup_program()): fluid.default_startup_program()):
logits = self._get_data(name='Logits', shape=[256], dtype='float64') logits = self._get_data(name='Logits', shape=[256], dtype='float32')
print(logits.dtype) label = self._get_data(name='Label', shape=[1], dtype='int32')
label = self._get_data(name='Label', shape=[1], dtype='int64')
num_samples = 25 num_samples = 25
output = layers.sampled_softmax_with_cross_entropy(logits, label, output = layers.sampled_softmax_with_cross_entropy(logits, label,
num_samples) num_samples)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册