diff --git a/paddle/framework/tensor_util.h b/paddle/framework/tensor_util.h index f541d2ba693a169d074c070dd794a2dd4e52aabf..091b63bf0f907a5449f08f0e36abb6577fa5e43e 100644 --- a/paddle/framework/tensor_util.h +++ b/paddle/framework/tensor_util.h @@ -116,8 +116,8 @@ inline void Copy(const Tensor& src, const platform::Place& dst_place, * @param[in] src The external tensor. * @param[in] ctx The device context contains device resources. * - * * @note CopyFromVector assumes that the tensor has been resized - * before invoking. + * * @note CopyFromVector will resize dst to an 1D tensor with the same + * size as src. */ template inline void CopyFromVector(const std::vector& src, diff --git a/paddle/operators/assign_value_op.cc b/paddle/operators/assign_value_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..d5671c1183a0f58d2aedb0723bd462684ac5636e --- /dev/null +++ b/paddle/operators/assign_value_op.cc @@ -0,0 +1,73 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/assign_value_op.h" + +namespace paddle { +namespace operators { + +class AssignValueOp : public framework::OperatorWithKernel { + public: + AssignValueOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorWithKernel(type, inputs, outputs, attrs) {} + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of AssignValueOp should not be null."); + auto shape = ctx->Attrs().Get>("shape"); + ctx->SetOutputDim("Out", framework::make_ddim(shape)); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + return framework::OpKernelType( + framework::proto::DataType(ctx.Attr("dtype")), ctx.GetPlace()); + } +}; + +class AssignValueOpMaker : public framework::OpProtoAndCheckerMaker { + public: + AssignValueOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddOutput("Out", "(Tensor) Output tensor of assign_value operator."); + AddAttr>("shape", + "(vector) " + "Shape of values."); + AddAttr("dtype", "data type of values") + .InEnum({framework::proto::DataType::INT32, + framework::proto::DataType::FP32}); + AddAttr>("fp32_values", "store the float values") + .SetDefault({}); + AddAttr>("int32_values", "store the int values") + .SetDefault({}); + AddComment(R"DOC( +AssignValue operator + +$$Out = values$$ +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(assign_value, ops::AssignValueOp, ops::AssignValueOpMaker); +REGISTER_OP_CPU_KERNEL(assign_value, ops::AssignValueKernel, + ops::AssignValueKernel); diff --git a/paddle/operators/assign_value_op.cu.cc b/paddle/operators/assign_value_op.cu.cc new file mode 100644 index 0000000000000000000000000000000000000000..b17e20150053cea4c6b9ed6a5f222f77f4a4bd36 --- /dev/null +++ b/paddle/operators/assign_value_op.cu.cc @@ -0,0 +1,19 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +Indicesou may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/assign_value_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL(assign_value, ops::AssignValueKernel, + ops::AssignValueKernel); diff --git a/paddle/operators/assign_value_op.h b/paddle/operators/assign_value_op.h new file mode 100644 index 0000000000000000000000000000000000000000..db2e43077999fa0f9aaada74026dd701ab2bf464 --- /dev/null +++ b/paddle/operators/assign_value_op.h @@ -0,0 +1,50 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" +#include "paddle/platform/enforce.h" + +namespace paddle { +namespace operators { + +template +class AssignValueKernel : public framework::OpKernel { + public: + virtual void Compute(const framework::ExecutionContext& ctx) const { + auto shape = ctx.Attr>("shape"); + auto* out = ctx.Output("Out"); + int dtype = ctx.Attr("dtype"); + const char* value_name = nullptr; + switch (dtype) { + case framework::proto::DataType::INT32: + value_name = "int32_values"; + break; + case framework::proto::DataType::FP32: + value_name = "fp32_values"; + break; + default: + PADDLE_THROW("Unsupported dtype for assign_value_op: %d", dtype); + break; + } + auto values = ctx.Attr>(value_name); + framework::CopyFromVector(values, ctx.device_context(), out); + out->Resize(framework::make_ddim(shape)); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/python/paddle/v2/fluid/layers/tensor.py b/python/paddle/v2/fluid/layers/tensor.py index 5f12ecfc14f7521948acdf27f1d6249e8052abc5..57668a7983b37780471e90afc83b5add95c33fad 100644 --- a/python/paddle/v2/fluid/layers/tensor.py +++ b/python/paddle/v2/fluid/layers/tensor.py @@ -1,5 +1,9 @@ from ..layer_helper import LayerHelper from ..param_attr import ParamAttr +from ..framework import convert_np_dtype_to_dtype_ +from ..framework import Variable +from ..core import DataType +import numpy __all__ = [ 'create_tensor', 'create_parameter', 'cast', 'concat', 'sums', 'assign', @@ -121,7 +125,7 @@ def assign(input, output): This function copies the *input* Variable to the *output* Variable. Args: - input(Variable): The source variable + input(Variable|numpy.ndarray): The source variable output(Variable): The destination variable Returns: @@ -134,11 +138,37 @@ def assign(input, output): fluid.layers.assign(hidden, out) """ helper = LayerHelper('assign', **locals()) - helper.append_op( - type='scale', - inputs={'X': [input]}, - outputs={'Out': [output]}, - attrs={'scale': 1.0}) + if isinstance(input, Variable): + helper.append_op( + type='scale', + inputs={'X': [input]}, + outputs={'Out': [output]}, + attrs={'scale': 1.0}) + elif isinstance(input, numpy.ndarray): + dtype = convert_np_dtype_to_dtype_(input.dtype) + if dtype == DataType.FP32: + value_name = "fp32_values" + values = [float(v) for v in input.flat] + elif dtype == DataType.INT32: + value_name = "int32_values" + values = [int(v) for v in input.flat] + else: + raise ValueError("Unsupported dtype %s", input.dtype) + if input.size > 1024 * 1024: + raise ValueError("The size of input is too big. Please consider " + "saving it to file and 'load_op' to load it") + + helper.append_op( + type='assign_value', + outputs={'Out': [output]}, + attrs={ + 'dtype': dtype, + 'shape': list(input.shape), + value_name: values + }) + else: + raise ValueError("Wrong type for assign input: %s" % type(input)) + return output diff --git a/python/paddle/v2/fluid/tests/test_assign_value_op.py b/python/paddle/v2/fluid/tests/test_assign_value_op.py new file mode 100644 index 0000000000000000000000000000000000000000..51b99d091825ab3edc2175202ae5d8a364a54378 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_assign_value_op.py @@ -0,0 +1,40 @@ +import paddle.v2.fluid as fluid +import paddle.v2.fluid.layers as layers +import op_test +import numpy +import unittest +import paddle.v2.fluid.framework as framework + + +class TestAssignValueOp(op_test.OpTest): + def setUp(self): + self.op_type = "assign_value" + x = numpy.random.random(size=(2, 5)).astype(numpy.float32) + self.inputs = {} + self.outputs = {'Out': x} + self.attrs = { + 'shape': x.shape, + 'dtype': framework.convert_np_dtype_to_dtype_(x.dtype), + 'fp32_values': [float(v) for v in x.flat] + } + + def test_forward(self): + self.check_output() + + def test_assign(self): + val = ( + -100 + 200 * numpy.random.random(size=(2, 5))).astype(numpy.int32) + x = layers.create_tensor(dtype="float32") + layers.assign(input=val, output=x) + exe = fluid.Executor(fluid.CPUPlace()) + fetched_x = exe.run(fluid.default_main_program(), + feed={}, + fetch_list=[x])[0] + self.assertTrue( + numpy.array_equal(fetched_x, val), + "fetch_x=%s val=%s" % (fetched_x, val)) + self.assertEqual(fetched_x.dtype, val.dtype) + + +if __name__ == '__main__': + unittest.main()