diff --git a/paddle/fluid/operators/load_op.cc b/paddle/fluid/operators/load_op.cc index 4eebda7d53a3c0f4f1d02f01813a6325bca5c0d9..d39beb9266a7e84d9a8f0139b101a94e612b6942 100644 --- a/paddle/fluid/operators/load_op.cc +++ b/paddle/fluid/operators/load_op.cc @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/load_op.h" - #include +#include "paddle/fluid/framework/op_registry.h" + namespace paddle { namespace operators { @@ -65,12 +65,3 @@ class LoadOpProtoMaker : public framework::OpProtoAndCheckerMaker { namespace ops = paddle::operators; REGISTER_OPERATOR(load, ops::LoadOp, ops::LoadOpProtoMaker); - -REGISTER_OP_CPU_KERNEL( - load, - ops::LoadOpKernel, - ops::LoadOpKernel, - ops::LoadOpKernel, - ops::LoadOpKernel, - ops::LoadOpKernel, - ops::LoadOpKernel); diff --git a/paddle/fluid/operators/load_op.cu b/paddle/fluid/operators/load_op.cu deleted file mode 100644 index 04c456ac60306d3b3fbf06aaf9e5f05828ddcb51..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/load_op.cu +++ /dev/null @@ -1,24 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/operators/load_op.h" - -namespace ops = paddle::operators; - -REGISTER_OP_CUDA_KERNEL(load, - ops::LoadOpKernel, - ops::LoadOpKernel, - ops::LoadOpKernel, - ops::LoadOpKernel, - ops::LoadOpKernel); diff --git a/paddle/fluid/operators/load_op.h b/paddle/fluid/operators/load_op.h deleted file mode 100644 index 26c62948783f40c86fb2e2aad59e5ae42585da49..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/load_op.h +++ /dev/null @@ -1,123 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include - -#include "paddle/fluid/framework/convert_utils.h" -#include "paddle/fluid/framework/data_type_transform.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/platform/device_context.h" -#include "paddle/fluid/platform/profiler/event_tracing.h" - -namespace paddle { -namespace operators { -template -class LoadOpKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext &ctx) const override { - auto place = ctx.GetPlace(); - // FIXME(yuyang18): We save variable to local file now, but we should change - // it to save an output stream. - auto filename = ctx.Attr("file_path"); - std::ifstream fin(filename, std::ios::binary); - PADDLE_ENFORCE_EQ(static_cast(fin), - true, - platform::errors::Unavailable( - "Load operator fail to open file %s, please check " - "whether the model file is complete or damaged.", - filename)); - - auto out_var_name = ctx.OutputNames("Out").data(); - auto *out_var = ctx.OutputVar("Out"); - - PADDLE_ENFORCE_NOT_NULL( - out_var, - platform::errors::InvalidArgument( - "The variable %s to be loaded cannot be found.", out_var_name)); - - if (out_var->IsType()) { - LoadLodTensor(fin, place, out_var, ctx); - } else if (out_var->IsType()) { - LoadSelectedRows(fin, place, out_var); - } else { - PADDLE_THROW(platform::errors::InvalidArgument( - "Load operator only supports loading LoDTensor and SelectedRows " - "variable, %s has wrong type", - out_var_name)); - } - } - - void LoadLodTensor(std::istream &fin, - const platform::Place &place, - framework::Variable *var, - const framework::ExecutionContext &ctx) const { - // get device context from pool - platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); - auto &dev_ctx = *pool.Get(place); - auto *tensor = var->GetMutable(); - - auto seek = ctx.Attr("seek"); - - if (seek != -1) { - PADDLE_ENFORCE_GE(seek, - 0, - platform::errors::InvalidArgument( - "seek witn tensor must great than or equal to 0")); - auto shape = ctx.Attr>("shape"); - paddle::framework::DeserializeFromStream( - fin, tensor, dev_ctx, seek, shape); - } else { - paddle::framework::DeserializeFromStream(fin, tensor, dev_ctx); - } - - auto load_as_fp16 = ctx.Attr("load_as_fp16"); - auto in_dtype = framework::TransToProtoVarType(tensor->dtype()); - auto out_dtype = load_as_fp16 ? framework::proto::VarType::FP16 : in_dtype; - - if (in_dtype != out_dtype) { - // convert to float16 tensor - auto in_kernel_type = framework::OpKernelType(in_dtype, place); - auto out_kernel_type = framework::OpKernelType(out_dtype, place); - framework::LoDTensor fp16_tensor; - // copy LoD info to the new tensor - fp16_tensor.set_lod(tensor->lod()); - framework::TransDataType( - in_kernel_type, out_kernel_type, *tensor, &fp16_tensor); - - // reset output tensor - var->Clear(); - tensor = var->GetMutable(); - tensor->set_lod(fp16_tensor.lod()); - tensor->ShareDataWith(fp16_tensor); - } - } - - void LoadSelectedRows(std::istream &fin, - const platform::Place &place, - framework::Variable *var) const { - auto *selectedRows = var->GetMutable(); - // get device context from pool - platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); - auto &dev_ctx = *pool.Get(place); - framework::DeserializeFromStream(fin, selectedRows, dev_ctx); - selectedRows->SyncIndex(); - } -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/load_op_npu.cc b/paddle/fluid/operators/load_op_npu.cc index 5b6b3e2c14fb25c3da4731278bfbf91ecd4cfcf6..c3cd20ffceeb3d8bdde95c126cc31aaee4bd9b92 100644 --- a/paddle/fluid/operators/load_op_npu.cc +++ b/paddle/fluid/operators/load_op_npu.cc @@ -12,7 +12,113 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/load_op.h" +#include +#include +#include + +#include "paddle/fluid/framework/convert_utils.h" +#include "paddle/fluid/framework/data_type_transform.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/profiler/event_tracing.h" + +namespace paddle { +namespace operators { +template +class LoadOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { + auto place = ctx.GetPlace(); + // FIXME(yuyang18): We save variable to local file now, but we should change + // it to save an output stream. + auto filename = ctx.Attr("file_path"); + std::ifstream fin(filename, std::ios::binary); + PADDLE_ENFORCE_EQ(static_cast(fin), + true, + platform::errors::Unavailable( + "Load operator fail to open file %s, please check " + "whether the model file is complete or damaged.", + filename)); + + auto out_var_name = ctx.OutputNames("Out").data(); + auto *out_var = ctx.OutputVar("Out"); + + PADDLE_ENFORCE_NOT_NULL( + out_var, + platform::errors::InvalidArgument( + "The variable %s to be loaded cannot be found.", out_var_name)); + + if (out_var->IsType()) { + LoadLodTensor(fin, place, out_var, ctx); + } else if (out_var->IsType()) { + LoadSelectedRows(fin, place, out_var); + } else { + PADDLE_THROW(platform::errors::InvalidArgument( + "Load operator only supports loading LoDTensor and SelectedRows " + "variable, %s has wrong type", + out_var_name)); + } + } + + void LoadLodTensor(std::istream &fin, + const platform::Place &place, + framework::Variable *var, + const framework::ExecutionContext &ctx) const { + // get device context from pool + platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); + auto &dev_ctx = *pool.Get(place); + auto *tensor = var->GetMutable(); + + auto seek = ctx.Attr("seek"); + + if (seek != -1) { + PADDLE_ENFORCE_GE(seek, + 0, + platform::errors::InvalidArgument( + "seek witn tensor must great than or equal to 0")); + auto shape = ctx.Attr>("shape"); + paddle::framework::DeserializeFromStream( + fin, tensor, dev_ctx, seek, shape); + } else { + paddle::framework::DeserializeFromStream(fin, tensor, dev_ctx); + } + + auto load_as_fp16 = ctx.Attr("load_as_fp16"); + auto in_dtype = framework::TransToProtoVarType(tensor->dtype()); + auto out_dtype = load_as_fp16 ? framework::proto::VarType::FP16 : in_dtype; + + if (in_dtype != out_dtype) { + // convert to float16 tensor + auto in_kernel_type = framework::OpKernelType(in_dtype, place); + auto out_kernel_type = framework::OpKernelType(out_dtype, place); + framework::LoDTensor fp16_tensor; + // copy LoD info to the new tensor + fp16_tensor.set_lod(tensor->lod()); + framework::TransDataType( + in_kernel_type, out_kernel_type, *tensor, &fp16_tensor); + + // reset output tensor + var->Clear(); + tensor = var->GetMutable(); + tensor->set_lod(fp16_tensor.lod()); + tensor->ShareDataWith(fp16_tensor); + } + } + + void LoadSelectedRows(std::istream &fin, + const platform::Place &place, + framework::Variable *var) const { + auto *selectedRows = var->GetMutable(); + // get device context from pool + platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); + auto &dev_ctx = *pool.Get(place); + framework::DeserializeFromStream(fin, selectedRows, dev_ctx); + selectedRows->SyncIndex(); + } +}; + +} // namespace operators +} // namespace paddle namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/load_op_xpu.cc b/paddle/fluid/operators/load_op_xpu.cc deleted file mode 100644 index ef7624e3397826aca4837c159aa9fe657e02fa22..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/load_op_xpu.cc +++ /dev/null @@ -1,29 +0,0 @@ -/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifdef PADDLE_WITH_XPU - -#include "paddle/fluid/operators/load_op.h" - -namespace ops = paddle::operators; - -REGISTER_OP_XPU_KERNEL( - load, - ops::LoadOpKernel, - ops::LoadOpKernel, - ops::LoadOpKernel, - ops::LoadOpKernel, - ops::LoadOpKernel); - -#endif // PADDLE_WITH_XPU diff --git a/paddle/fluid/operators/save_load_op_test.cc b/paddle/fluid/operators/save_load_op_test.cc index 94ec912432f4f12669c7775e88286d79b2e0dfc5..e6bcf7a981401095e0165f8df2bbda4e460feb92 100644 --- a/paddle/fluid/operators/save_load_op_test.cc +++ b/paddle/fluid/operators/save_load_op_test.cc @@ -21,7 +21,9 @@ USE_OP_ITSELF(save); PD_DECLARE_KERNEL(save, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(save_sr, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(cast, CPU, ALL_LAYOUT); -USE_CPU_ONLY_OP(load); +USE_OP_ITSELF(load); +PD_DECLARE_KERNEL(load, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(load_sr, CPU, ALL_LAYOUT); TEST(SaveLoadOp, CPU) { paddle::framework::Scope scope; diff --git a/paddle/phi/kernels/load_kernel.cc b/paddle/phi/kernels/load_kernel.cc new file mode 100644 index 0000000000000000000000000000000000000000..ec1c526517e677b704372b6ed5009f2d443ddf13 --- /dev/null +++ b/paddle/phi/kernels/load_kernel.cc @@ -0,0 +1,70 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/load_kernel.h" + +#include + +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/core/serialization.h" +#include "paddle/phi/kernels/cast_kernel.h" + +namespace phi { + +template +void LoadKernel(const Context& dev_ctx, + const std::string& file_path, + int64_t seek, + const std::vector& shape, + bool load_as_fp16, + DenseTensor* out) { + // FIXME(yuyang18): We save variable to local file now, but we should change + // it to save an output stream. + std::ifstream fin(file_path, std::ios::binary); + PADDLE_ENFORCE_EQ(static_cast(fin), + true, + phi::errors::Unavailable( + "Load operator fail to open file %s, please check " + "whether the model file is complete or damaged.", + file_path)); + PADDLE_ENFORCE_NOT_NULL(out, + phi::errors::InvalidArgument( + "The variable to be loaded cannot be found.")); + + if (seek != -1) { + PADDLE_ENFORCE_GE(seek, + 0, + phi::errors::InvalidArgument( + "seek witn tensor must great than or equal to 0")); + DeserializeFromStream(fin, out, dev_ctx, seek, shape); + } else { + DeserializeFromStream(fin, out, dev_ctx); + } + + auto in_dtype = out->dtype(); + auto out_dtype = load_as_fp16 ? DataType::FLOAT16 : in_dtype; + if (in_dtype != out_dtype) { + CastKernel(dev_ctx, *out, out_dtype, out); + } +} + +} // namespace phi + +PD_REGISTER_KERNEL(load, CPU, ALL_LAYOUT, phi::LoadKernel, float) {} +#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) +PD_REGISTER_KERNEL(load, GPU, ALL_LAYOUT, phi::LoadKernel, float) {} +#endif +#ifdef PADDLE_WITH_XPU +PD_REGISTER_KERNEL(load, XPU, ALL_LAYOUT, phi::LoadKernel, float) {} +#endif diff --git a/paddle/phi/kernels/load_kernel.h b/paddle/phi/kernels/load_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..e468ccd1a1dbcfeffb9c3d600d8c1e67582becff --- /dev/null +++ b/paddle/phi/kernels/load_kernel.h @@ -0,0 +1,31 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include "paddle/phi/core/dense_tensor.h" + +namespace phi { + +template +void LoadKernel(const Context& dev_ctx, + const std::string& file_path, + int64_t seek, + const std::vector& shape, + bool load_as_fp16, + DenseTensor* out); + +} // namespace phi diff --git a/paddle/phi/kernels/selected_rows/load_kernel.cc b/paddle/phi/kernels/selected_rows/load_kernel.cc new file mode 100644 index 0000000000000000000000000000000000000000..f239c664e360acbfe242dbede7749c307013c369 --- /dev/null +++ b/paddle/phi/kernels/selected_rows/load_kernel.cc @@ -0,0 +1,54 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/selected_rows/load_kernel.h" + +#include + +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/core/serialization.h" + +namespace phi { +namespace sr { + +template +void LoadKernel(const Context& dev_ctx, + const std::string& file_path, + int64_t seek, + const std::vector& shape, + bool load_as_fp16, + SelectedRows* out) { + // FIXME(yuyang18): We save variable to local file now, but we should change + // it to save an output stream. + std::ifstream fin(file_path, std::ios::binary); + PADDLE_ENFORCE_EQ(static_cast(fin), + true, + phi::errors::Unavailable( + "Load operator fail to open file %s, please check " + "whether the model file is complete or damaged.", + file_path)); + PADDLE_ENFORCE_NOT_NULL(out, + phi::errors::InvalidArgument( + "The variable to be loaded cannot be found.")); + + DeserializeFromStream(fin, out, dev_ctx); +} + +} // namespace sr +} // namespace phi + +PD_REGISTER_KERNEL(load_sr, CPU, ALL_LAYOUT, phi::sr::LoadKernel, float) {} +#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) +PD_REGISTER_KERNEL(load_sr, GPU, ALL_LAYOUT, phi::sr::LoadKernel, float) {} +#endif diff --git a/paddle/phi/kernels/selected_rows/load_kernel.h b/paddle/phi/kernels/selected_rows/load_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..f50884a9e87938eb44058bf926ff736da4fba1d7 --- /dev/null +++ b/paddle/phi/kernels/selected_rows/load_kernel.h @@ -0,0 +1,33 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include "paddle/phi/core/selected_rows.h" + +namespace phi { +namespace sr { + +template +void LoadKernel(const Context& dev_ctx, + const std::string& file_path, + int64_t seek, + const std::vector& shape, + bool load_as_fp16, + SelectedRows* out); + +} // namespace sr +} // namespace phi diff --git a/paddle/phi/ops/compat/load_sig.cc b/paddle/phi/ops/compat/load_sig.cc new file mode 100644 index 0000000000000000000000000000000000000000..a697076a982bd0625529e0efd7ec8bd4e882c9e5 --- /dev/null +++ b/paddle/phi/ops/compat/load_sig.cc @@ -0,0 +1,33 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/core/compat/op_utils.h" + +namespace phi { + +KernelSignature LoadOpArgumentMapping(const ArgumentMappingContext& ctx) { + if (ctx.IsDenseTensorOutput("Out")) { + return KernelSignature( + "load", {}, {"file_path", "seek", "shape", "load_as_fp16"}, {"Out"}); + } else if (ctx.IsSelectedRowsOutput("Out")) { + return KernelSignature( + "load_sr", {}, {"file_path", "seek", "shape", "load_as_fp16"}, {"Out"}); + } else { + return KernelSignature("unregistered", {}, {}, {}); + } +} + +} // namespace phi + +PD_REGISTER_ARG_MAPPING_FN(load, phi::LoadOpArgumentMapping);