未验证 提交 0404e7a9 编写于 作者: L liu zhengxi 提交者: GitHub

Update the precision of pad, pad2d, pad_constant_like's unit tests from fp32 to fp64 (#22394)

* update the ut precision of pad pad2d pad_constant_like from fp32 to fp64, test=develop
上级 371f377b
...@@ -345,7 +345,7 @@ class Pad2dCPUKernel : public framework::OpKernel<T> { ...@@ -345,7 +345,7 @@ class Pad2dCPUKernel : public framework::OpKernel<T> {
GetPaddings(pads, context); GetPaddings(pads, context);
auto mode = context.Attr<std::string>("mode"); auto mode = context.Attr<std::string>("mode");
auto data_format = context.Attr<std::string>("data_format"); auto data_format = context.Attr<std::string>("data_format");
T value = context.Attr<T>("pad_value"); T value = static_cast<T>(context.Attr<float>("pad_value"));
auto* x = context.Input<Tensor>("X"); auto* x = context.Input<Tensor>("X");
auto in_dims = x->dims(); auto in_dims = x->dims();
......
...@@ -314,7 +314,7 @@ class Pad2dCUDAKernel : public framework::OpKernel<T> { ...@@ -314,7 +314,7 @@ class Pad2dCUDAKernel : public framework::OpKernel<T> {
GetPaddings(pads, context); GetPaddings(pads, context);
auto mode = context.Attr<std::string>("mode"); auto mode = context.Attr<std::string>("mode");
auto data_format = context.Attr<std::string>("data_format"); auto data_format = context.Attr<std::string>("data_format");
T value = context.Attr<T>("pad_value"); T value = static_cast<T>(context.Attr<float>("pad_value"));
auto* x = context.Input<Tensor>("X"); auto* x = context.Input<Tensor>("X");
auto in_dims = x->dims(); auto in_dims = x->dims();
......
...@@ -38,7 +38,7 @@ class PadConstantLikeKernel : public framework::OpKernel<T> { ...@@ -38,7 +38,7 @@ class PadConstantLikeKernel : public framework::OpKernel<T> {
return; return;
} }
T pad_value = context.Attr<T>("pad_value"); T pad_value = static_cast<T>(context.Attr<float>("pad_value"));
out->mutable_data<T>(context.GetPlace()); out->mutable_data<T>(context.GetPlace());
int rank = context.Input<framework::Tensor>("X")->dims().size(); int rank = context.Input<framework::Tensor>("X")->dims().size();
......
...@@ -23,14 +23,15 @@ class TestPad2dOp(OpTest): ...@@ -23,14 +23,15 @@ class TestPad2dOp(OpTest):
self.variable_paddings = False self.variable_paddings = False
self.initTestCase() self.initTestCase()
self.op_type = "pad2d" self.op_type = "pad2d"
self.inputs = {'X': np.random.random(self.shape).astype("float32"), } self.inputs = {'X': np.random.random(self.shape).astype("float64")}
self.attrs = {} self.attrs = {}
if self.variable_paddings: if self.variable_paddings:
self.attrs['paddings'] = [] self.attrs['paddings'] = []
self.inputs['Paddings'] = np.array(self.paddings).flatten().astype( self.inputs['Paddings'] = np.array(self.paddings).flatten().astype(
"int32") "int32")
else: else:
self.attrs['paddings'] = np.array(self.paddings).flatten() self.attrs['paddings'] = np.array(self.paddings).flatten().astype(
"int32")
self.attrs['pad_value'] = self.pad_value self.attrs['pad_value'] = self.pad_value
self.attrs['mode'] = self.mode self.attrs['mode'] = self.mode
self.attrs['data_format'] = self.data_format self.attrs['data_format'] = self.data_format
......
...@@ -24,8 +24,8 @@ class TestPadOp(OpTest): ...@@ -24,8 +24,8 @@ class TestPadOp(OpTest):
self.initTestCase() self.initTestCase()
self.op_type = "pad_constant_like" self.op_type = "pad_constant_like"
self.inputs = { self.inputs = {
'X': np.random.random(self.x_shape).astype("float32"), 'X': np.random.random(self.x_shape).astype("float64"),
'Y': np.random.random(self.y_shape).astype("float32") 'Y': np.random.random(self.y_shape).astype("float64")
} }
self.attrs = {} self.attrs = {}
self.attrs['pad_value'] = self.pad_value self.attrs['pad_value'] = self.pad_value
......
...@@ -37,7 +37,7 @@ class TestPadOp(OpTest): ...@@ -37,7 +37,7 @@ class TestPadOp(OpTest):
} }
def get_dtype(self): def get_dtype(self):
return np.float32 return np.float64
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
......
...@@ -50,9 +50,6 @@ NO_FP64_CHECK_GRAD_OP_LIST = [ ...@@ -50,9 +50,6 @@ NO_FP64_CHECK_GRAD_OP_LIST = [
'modified_huber_loss', \ 'modified_huber_loss', \
'mul', \ 'mul', \
'nce', \ 'nce', \
'pad', \
'pad2d', \
'pad_constant_like', \
'pool2d', \ 'pool2d', \
'pool3d', \ 'pool3d', \
'prroi_pool', \ 'prroi_pool', \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册