diff --git a/cmake/external/xpu.cmake b/cmake/external/xpu.cmake index 3342d8bbd8fac72614b8bdd41dbf53ca99501675..2fc5bf7954ce0a3a02a3146ee5fdf702fb49b047 100644 --- a/cmake/external/xpu.cmake +++ b/cmake/external/xpu.cmake @@ -35,7 +35,7 @@ ELSE () ENDIF() SET(XPU_BASE_URL_WITHOUT_DATE "https://baidu-kunlun-product.cdn.bcebos.com/KL-SDK/klsdk-dev") -SET(XPU_BASE_URL "${XPU_BASE_URL_WITHOUT_DATE}/20210818") +SET(XPU_BASE_URL "${XPU_BASE_URL_WITHOUT_DATE}/20210826") SET(XPU_XRE_URL "${XPU_BASE_URL}/${XPU_XRE_DIR_NAME}.tar.gz" CACHE STRING "" FORCE) SET(XPU_XDNN_URL "${XPU_BASE_URL}/${XPU_XDNN_DIR_NAME}.tar.gz" CACHE STRING "" FORCE) SET(XPU_XCCL_URL "${XPU_BASE_URL_WITHOUT_DATE}/20210623/${XPU_XCCL_DIR_NAME}.tar.gz" CACHE STRING "" FORCE) diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 6a9f5577705335d8185a158b15169d87bf2314d2..57f9d094ac80d788555f5fa47c0b7e98b0bdbac0 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -1254,10 +1254,10 @@ void OperatorWithKernel::ChooseKernel(const RuntimeContext& ctx, } #endif #ifdef PADDLE_WITH_XPU - if ((kernel_iter == kernels.end() && - is_xpu_place(expected_kernel_key.place_) && - !paddle::platform::is_xpu_support_op(type_, expected_kernel_key)) || - paddle::platform::is_in_xpu_black_list(type_)) { + if (is_xpu_place(expected_kernel_key.place_) && + (kernel_iter == kernels.end() || + !paddle::platform::is_xpu_support_op(type_, expected_kernel_key) || + paddle::platform::is_in_xpu_black_list(type_))) { VLOG(3) << "missing XPU kernel: " << type_ << ", expected_kernel_key:" << expected_kernel_key << ", fallbacking to CPU one!"; diff --git a/paddle/fluid/imperative/prepared_operator.cc b/paddle/fluid/imperative/prepared_operator.cc index 93f2fd38a7306417324f097761707a8e7ef2195a..8f45cd0fa6ea148633b3de4dcaed8d01849beb91 100644 --- a/paddle/fluid/imperative/prepared_operator.cc +++ b/paddle/fluid/imperative/prepared_operator.cc @@ -131,10 +131,10 @@ PreparedOp PrepareImpl(const NameVarMap& ins, auto& kernels = kernels_iter->second; auto kernel_iter = kernels.find(expected_kernel_key); #ifdef PADDLE_WITH_XPU - if ((kernel_iter == kernels.end() && - is_xpu_place(expected_kernel_key.place_) && - !paddle::platform::is_xpu_support_op(op.Type(), expected_kernel_key)) || - paddle::platform::is_in_xpu_black_list(op.Type())) { + if (is_xpu_place(expected_kernel_key.place_) && + (kernel_iter == kernels.end() || + !paddle::platform::is_xpu_support_op(op.Type(), expected_kernel_key) || + paddle::platform::is_in_xpu_black_list(op.Type()))) { VLOG(3) << "missing XPU kernel: " << op.Type() << ", expected_kernel_key:" << expected_kernel_key << ", fallbacking to CPU one!"; diff --git a/paddle/fluid/operators/label_smooth_op_xpu.cc b/paddle/fluid/operators/label_smooth_op_xpu.cc new file mode 100644 index 0000000000000000000000000000000000000000..6b6350753909f0dc319d07904b4d81327262684e --- /dev/null +++ b/paddle/fluid/operators/label_smooth_op_xpu.cc @@ -0,0 +1,57 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifdef PADDLE_WITH_XPU +#include "paddle/fluid/operators/label_smooth_op.h" +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { + +template +class LabelSmoothXPUKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const { + auto* out_t = ctx.Output("Out"); + auto* in_t = ctx.Input("X"); + auto* dist_t = ctx.Input("PriorDist"); + auto label_dim = in_t->dims()[in_t->dims().size() - 1]; + auto ptr = out_t->mutable_data(ctx.GetPlace()); + + auto epsilon = ctx.Attr("epsilon"); + auto& dev_ctx = ctx.template device_context(); + if (dist_t) { + PADDLE_THROW( + platform::errors::External("XPU doesn't support dist label smooth")); + } else { + int r = xpu::label_smooth(dev_ctx.x_context(), in_t->data(), ptr, + in_t->numel(), epsilon, label_dim); + PADDLE_ENFORCE_EQ( + r, XPU_SUCCESS, + platform::errors::External("XPU API(label_smooth) return wrong " + "value[%d %s]", + r, XPUAPIErrorMsg[r])); + } + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OP_XPU_KERNEL( + label_smooth, + ops::LabelSmoothXPUKernel); +#endif diff --git a/paddle/fluid/platform/xpu/xpu2_op_list.h b/paddle/fluid/platform/xpu/xpu2_op_list.h index a8b2962d4acafd605480147711cf82f9e629ab69..ab2db1ff3831b1b82d052e456fdb5b1b9d168e72 100644 --- a/paddle/fluid/platform/xpu/xpu2_op_list.h +++ b/paddle/fluid/platform/xpu/xpu2_op_list.h @@ -29,6 +29,8 @@ using XPUOpMap = std::unordered_map; XPUOpMap& get_kl2_ops() { // KL1支持的op,通过op_name, data_type, place来索引 static XPUOpMap s_xpu2_kernels{ + {"label_smooth", + XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})}, {"mul", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace()), pOpKernelType(vartype::FP16, XPUPlace())})}, {"elementwise_sub", diff --git a/python/paddle/fluid/tests/unittests/xpu/test_label_smooth_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_label_smooth_op_xpu.py new file mode 100644 index 0000000000000000000000000000000000000000..5a827c1beb2911c0ae4cd81a9fb2e0dad597e04c --- /dev/null +++ b/python/paddle/fluid/tests/unittests/xpu/test_label_smooth_op_xpu.py @@ -0,0 +1,64 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import paddle +import numpy as np +import sys +sys.path.append("..") +from op_test_xpu import XPUOpTest + +paddle.enable_static() + + +class TestLabelSmoothOp(XPUOpTest): + def config(self): + self.op_type = "label_smooth" + self.epsilon = 0.1 + self.use_xpu = True + batch_size, self.label_dim = 10, 12 + self.label = np.zeros((batch_size, self.label_dim)).astype("float32") + nonzero_index = np.random.randint(self.label_dim, size=(batch_size)) + self.label[np.arange(batch_size), nonzero_index] = 1 + + def setUp(self): + self.config() + smoothed_label = (1 - self.epsilon + ) * self.label + self.epsilon / self.label_dim + self.inputs = {'X': self.label} + self.attrs = {'epsilon': self.epsilon} + self.outputs = {'Out': smoothed_label} + + def test_check_output(self): + if not paddle.is_compiled_with_xpu(): + return + self.check_output_with_place(paddle.XPUPlace(0), atol=1e-6) + + def test_check_grad(self): + return + + +class TestLabelSmoothOp3D(TestLabelSmoothOp): + def setUp(self): + super(TestLabelSmoothOp3D, self).setUp() + self.inputs['X'] = self.inputs['X'].reshape( + [2, -1, self.inputs['X'].shape[-1]]) + self.outputs['Out'] = self.outputs['Out'].reshape(self.inputs['X'] + .shape) + + +if __name__ == '__main__': + unittest.main()