未验证 提交 31ea3231 编写于 作者: Q QingshuChen 提交者: GitHub

add fill/fill_any for kunlun (#49645)

上级 a5bf156b
......@@ -214,6 +214,18 @@ XPUOpMap& get_kl2_ops() {
phi::DataType::BOOL,
phi::DataType::FLOAT16,
phi::DataType::FLOAT32})},
{"fill",
XPUKernelSet({phi::DataType::INT64,
phi::DataType::INT32,
phi::DataType::FLOAT16,
phi::DataType::FLOAT64,
phi::DataType::FLOAT32})},
{"fill_any",
XPUKernelSet({phi::DataType::INT64,
phi::DataType::INT32,
phi::DataType::FLOAT16,
phi::DataType::FLOAT64,
phi::DataType::FLOAT32})},
{"fill_any_like",
XPUKernelSet({phi::DataType::INT64,
phi::DataType::INT32,
......
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/impl/fill_kernel_impl.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
PD_REGISTER_KERNEL(fill,
XPU,
ALL_LAYOUT,
phi::FillKernel,
float,
double,
int64_t,
int,
phi::dtype::float16,
bool) {}
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append("..")
import unittest
import numpy as np
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
XPUOpTestWrapper,
create_test_class,
get_xpu_op_support_types,
)
import paddle
paddle.enable_static()
class XPUTestFillAnyOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'fill_any'
self.use_dynamic_create_class = False
class TestFillAnyOp(XPUOpTest):
def setUp(self):
self.op_type = "fill_any"
self.dtype = 'float64'
self.value = 0.0
self.init()
self.inputs = {'X': np.random.random((20, 30)).astype(self.dtype)}
self.attrs = {
'value_float': float(self.value),
'value_int': int(self.value),
}
self.outputs = {
'Out': self.value
* np.ones_like(self.inputs["X"]).astype(self.dtype)
}
def init(self):
pass
def test_check_output(self):
self.check_output_with_place(paddle.XPUPlace(0))
def test_check_grad(self):
self.check_grad_with_place(paddle.XPUPlace(0), ['X'], 'Out')
class TestFillAnyOpFloat32(TestFillAnyOp):
def init(self):
self.dtype = np.float32
self.value = 0.0
class TestFillAnyOpFloat16(TestFillAnyOp):
def init(self):
self.dtype = np.float16
class TestFillAnyOpvalue1(TestFillAnyOp):
def init(self):
self.dtype = np.float32
self.value = 111111555
class TestFillAnyOpvalue2(TestFillAnyOp):
def init(self):
self.dtype = np.float32
self.value = 11111.1111
class TestFillAnyInplace(unittest.TestCase):
def test_fill_any_version(self):
with paddle.fluid.dygraph.guard():
var = paddle.to_tensor(np.ones((4, 2, 3)).astype(np.float32))
self.assertEqual(var.inplace_version, 0)
var.fill_(0)
self.assertEqual(var.inplace_version, 1)
var.fill_(0)
self.assertEqual(var.inplace_version, 2)
var.fill_(0)
self.assertEqual(var.inplace_version, 3)
def test_fill_any_eqaul(self):
with paddle.fluid.dygraph.guard():
tensor = paddle.to_tensor(
np.random.random((20, 30)).astype(np.float32)
)
target = tensor.numpy()
target[...] = 1
tensor.fill_(1)
self.assertEqual((tensor.numpy() == target).all().item(), True)
def test_backward(self):
with paddle.fluid.dygraph.guard():
x = paddle.full([10, 10], -1.0, dtype='float32')
x.stop_gradient = False
y = 2 * x
y.fill_(1)
y.backward()
np.testing.assert_array_equal(
x.grad.numpy(), np.zeros([10, 10])
)
support_types = get_xpu_op_support_types('fill_any')
for stype in support_types:
create_test_class(globals(), XPUTestFillAnyOp, stype)
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append("..")
import unittest
import numpy as np
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
XPUOpTestWrapper,
create_test_class,
get_xpu_op_support_types,
)
import paddle
import paddle.fluid.core as core
from paddle.fluid.op import Operator
paddle.enable_static()
class XPUTestFillOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'fill'
self.use_dynamic_create_class = False
class TestFillOp1(XPUOpTest):
def setUp(self):
self.op_type = "fill"
val = np.random.random(size=[100, 200])
self.inputs = {}
self.attrs = {
'value': val.flatten().tolist(),
'shape': [100, 200],
'dtype': int(core.VarDesc.VarType.FP64),
'force_cpu': False,
}
self.outputs = {'Out': val.astype('float64')}
def test_check_output(self):
self.check_output_with_place(paddle.XPUPlace(0))
class TestFillOp2(XPUOpTest):
def setUp(self):
self.op_type = "fill"
val = np.random.random(size=[100, 200])
self.inputs = {}
self.attrs = {
'value': val.flatten().tolist(),
'shape': [100, 200],
'dtype': int(core.VarDesc.VarType.FP64),
'force_cpu': True,
}
self.outputs = {'Out': val.astype('float64')}
def test_check_output(self):
self.check_output()
class TestFillOp3(unittest.TestCase):
def check_with_place(self, place, f_cpu):
scope = core.Scope()
# create Out Variable
out = scope.var('Out').get_tensor()
# create and run fill_op operator
val = np.random.random(size=[300, 200])
fill_op = Operator(
"fill",
value=val.flatten(),
shape=[300, 200],
dtype=int(core.VarDesc.VarType.FP32),
force_cpu=f_cpu,
Out='Out',
)
fill_op.run(scope, place)
# get result from Out
result_array = np.array(out)
full_array = np.array(val, 'float32')
np.testing.assert_array_equal(result_array, full_array)
def test_fill_op(self):
places = [core.CPUPlace()]
if core.is_compiled_with_xpu():
places.append(core.XPUPlace(0))
for place in places:
self.check_with_place(place, True)
self.check_with_place(place, False)
support_types = get_xpu_op_support_types('fill')
for stype in support_types:
create_test_class(globals(), XPUTestFillOp, stype)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册