diff --git a/paddle/operators/assign_value_op.cc b/paddle/operators/assign_value_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..a0bce99ff3df4f9d921a0d40e3abd900b2f6f663 --- /dev/null +++ b/paddle/operators/assign_value_op.cc @@ -0,0 +1,82 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/assign_value_op.h" + +namespace paddle { +namespace operators { + +class AssignValueOp : public framework::OperatorWithKernel { + public: + AssignValueOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorWithKernel(type, inputs, outputs, attrs) {} + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of AssignValueOp should not be null."); + auto shape = ctx->Attrs().Get>("shape"); + ctx->SetOutputDim("Out", framework::make_ddim(shape)); + } + + protected: + framework::OpKernelType GetActualKernelType( + const framework::ExecutionContext &ctx) const override { + return framework::OpKernelType( + framework::proto::DataType(ctx.Attr("dtype")), ctx.GetPlace()); + } +}; + +class AssignValueOpMaker : public framework::OpProtoAndCheckerMaker { + public: + AssignValueOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddOutput("Out", "(Tensor) Output tensor of assign_value operator."); + AddAttr>("shape", + "(vector) " + "Shape of values."); + AddAttr("dtype", "data type of values") + .InEnum({framework::proto::DataType::INT32, + framework::proto::DataType::FP32}); + AddAttr>("fp32_values", "store the float values") + .SetDefault({}); + AddAttr>("int32_values", "store the int values") + .SetDefault({}); + AddComment(R"DOC( +AssignValue operator + +$$Out = values$$ +)DOC"); + } +}; + +template +class AssignValueCPUKernel : public AssignValueKernel { + protected: + virtual void Copy(void *dst, const void *src, size_t size, + const framework::ExecutionContext &ctx) const { + std::memcpy(dst, src, size); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(assign_value, ops::AssignValueOp, ops::AssignValueOpMaker); +REGISTER_OP_CPU_KERNEL(assign_value, ops::AssignValueCPUKernel, + ops::AssignValueCPUKernel) diff --git a/paddle/operators/assign_value_op.cu.cc b/paddle/operators/assign_value_op.cu.cc new file mode 100644 index 0000000000000000000000000000000000000000..8afb032037fe415a128919939fd14b5ac46faa4b --- /dev/null +++ b/paddle/operators/assign_value_op.cu.cc @@ -0,0 +1,36 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +Indicesou may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/assign_value_op.h" + +namespace paddle { +namespace operators { + +template +class AssignValueGPUKernel : public AssignValueKernel { + protected: + virtual void Copy(void* dst, const void* src, size_t size, + const framework::ExecutionContext& ctx) const { + auto& dev_ctx = ctx.template device_context(); + paddle::platform::GpuMemcpyAsync(dst, src, size, cudaMemcpyHostToDevice, + dev_ctx.stream()); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL(assign_value, ops::AssignValueGPUKernel, + ops::AssignValueGPUKernel); diff --git a/paddle/operators/assign_value_op.h b/paddle/operators/assign_value_op.h new file mode 100644 index 0000000000000000000000000000000000000000..bdb5bce272fb997e5fb1fa82540f56c42117b9e4 --- /dev/null +++ b/paddle/operators/assign_value_op.h @@ -0,0 +1,55 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" +#include "paddle/platform/enforce.h" + +namespace paddle { +namespace operators { + +template +class AssignValueKernel : public framework::OpKernel { + public: + virtual void Compute(const framework::ExecutionContext& ctx) const { + auto shape = ctx.Attr>("shape"); + auto* out = ctx.Output("Out"); + out->Resize(framework::make_ddim(shape)); + auto* dst = out->mutable_data(ctx.GetPlace()); + int dtype = ctx.Attr("dtype"); + const char* value_name = nullptr; + switch (dtype) { + case framework::proto::DataType::INT32: + value_name = "int32_values"; + break; + case framework::proto::DataType::FP32: + value_name = "fp32_values"; + break; + default: + PADDLE_THROW("Unsupported dtype for assign_value_op: %d", dtype); + break; + } + auto values = ctx.Attr>(value_name); + Copy(dst, values.data(), sizeof(T) * values.size(), ctx); + } + + protected: + virtual void Copy(void* dst, const void* src, size_t size, + const framework::ExecutionContext& ctx) const = 0; +}; + +} // namespace operators +} // namespace paddle diff --git a/python/paddle/v2/fluid/layers/tensor.py b/python/paddle/v2/fluid/layers/tensor.py index 5f12ecfc14f7521948acdf27f1d6249e8052abc5..639f8b03ede49eebee446d2e0a25ebd5beeb4135 100644 --- a/python/paddle/v2/fluid/layers/tensor.py +++ b/python/paddle/v2/fluid/layers/tensor.py @@ -1,5 +1,9 @@ from ..layer_helper import LayerHelper from ..param_attr import ParamAttr +from ..framework import convert_np_dtype_to_dtype_ +from ..framework import Variable +from ..core import DataType +import numpy __all__ = [ 'create_tensor', 'create_parameter', 'cast', 'concat', 'sums', 'assign', @@ -121,7 +125,7 @@ def assign(input, output): This function copies the *input* Variable to the *output* Variable. Args: - input(Variable): The source variable + input(Variable|numpy.ndarray): The source variable output(Variable): The destination variable Returns: @@ -134,11 +138,32 @@ def assign(input, output): fluid.layers.assign(hidden, out) """ helper = LayerHelper('assign', **locals()) - helper.append_op( - type='scale', - inputs={'X': [input]}, - outputs={'Out': [output]}, - attrs={'scale': 1.0}) + if isinstance(input, Variable): + helper.append_op( + type='scale', + inputs={'X': [input]}, + outputs={'Out': [output]}, + attrs={'scale': 1.0}) + elif isinstance(input, numpy.ndarray): + dtype = convert_np_dtype_to_dtype_(input.dtype) + if dtype == DataType.FP32: + value_name = "fp32_values" + elif dtype == DataType.INT32: + value_name = "int32_values" + else: + raise ValueError("Unsupported dtype %s", input.dtype) + + helper.append_op( + type='assign_value', + outputs={'Out': [output]}, + attrs={ + 'dtype': dtype, + 'shape': list(input.shape), + value_name: [float(v) for v in input.flat] + }) + else: + raise ValueError("Wrong type for assign input: %s" % type(input)) + return output diff --git a/python/paddle/v2/fluid/tests/test_assign_value_op.py b/python/paddle/v2/fluid/tests/test_assign_value_op.py new file mode 100644 index 0000000000000000000000000000000000000000..c3f3f87839a8278f859f742b3918a517ac46c72e --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_assign_value_op.py @@ -0,0 +1,38 @@ +import paddle.v2.fluid as fluid +import paddle.v2.fluid.layers as layers +import op_test +import numpy +import unittest +import paddle.v2.fluid.framework as framework + + +class TestAssignValueOp(op_test.OpTest): + def setUp(self): + self.op_type = "assign_value" + x = numpy.random.random(size=(2, 5)).astype(numpy.float32) + self.inputs = {} + self.outputs = {'Out': x} + self.attrs = { + 'shape': x.shape, + 'dtype': framework.convert_np_dtype_to_dtype_(x.dtype), + 'fp32_values': [float(v) for v in x.flat] + } + + def test_forward(self): + self.check_output() + + def test_assign(self): + val = numpy.random.random(size=(2, 5)).astype(numpy.float32) + x = layers.create_tensor(dtype="float32") + layers.assign(input=val, output=x) + exe = fluid.Executor(fluid.CPUPlace()) + fetched_x = exe.run(fluid.default_main_program(), + feed={}, + fetch_list=[x]) + self.assertTrue( + numpy.allclose(fetched_x, val), + "fetch_x=%s val=%s" % (fetched_x, val)) + + +if __name__ == '__main__': + unittest.main()