未验证 提交 50d670ee 编写于 作者: F fengjiayi 提交者: GitHub

Unify dtype and datatype (#5869)

* Change all `data_type` in Python to `dtype`

* Change `date_type` in C++ to `dtype`

* Refine
上级 1ab1b092
......@@ -522,7 +522,7 @@ ParamGradInfoMap AppendBackward(
new OpDescBind("fill_constant", {}, {{"Out", {fill_one_op_out}}},
{{"shape", std::vector<int>{1}},
{"value", static_cast<float>(1.0)},
{"data_type", target.GetDataType()}}));
{"dtype", target.GetDataType()}}));
// infer var type of fill_one_op
fill_one_op->InferVarType(root_block);
......
......@@ -302,7 +302,7 @@ LoDTensor TensorArray::Stack() const {
const auto& first_dims = values_.front().dims();
// check all the values have the same shape
// TODO(superjom) check the same dtypes
// TODO(superjom) check the same data_type
for (size_t idx = 1; idx < size(); idx++) {
const auto& value_dims = values_[idx].dims();
PADDLE_ENFORCE_EQ(first_dims, value_dims);
......
......@@ -25,8 +25,8 @@ class CastOpProtoMaker : public framework::OpProtoAndCheckerMaker {
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "The input tensor of cast op");
AddOutput("Out", "The output tensor of cast op");
AddAttr<int>("out_data_type", "output data type");
AddAttr<int>("in_data_type", "input data type");
AddAttr<int>("out_dtype", "output data type");
AddAttr<int>("in_dtype", "input data type");
AddComment(R"DOC(
Cast Operator.
......@@ -58,8 +58,8 @@ class CastOpGradMaker : public framework::SingleGradOpDescMaker {
grad->SetType("cast");
grad->SetInput("X", OutputGrad("Out"));
grad->SetOutput("Out", InputGrad("X"));
grad->SetAttr("out_data_type", GetAttr("in_data_type"));
grad->SetAttr("in_data_type", GetAttr("out_data_type"));
grad->SetAttr("out_dtype", GetAttr("in_dtype"));
grad->SetAttr("in_dtype", GetAttr("out_dtype"));
return std::unique_ptr<framework::OpDescBind>(grad);
}
};
......
......@@ -55,7 +55,7 @@ class CastOpKernel : public framework::OpKernel<InT> {
auto* in = context.Input<framework::Tensor>("X");
auto* out = context.Output<framework::Tensor>("Out");
framework::VisitDataType(
static_cast<framework::DataType>(context.Attr<int>("out_data_type")),
static_cast<framework::DataType>(context.Attr<int>("out_dtype")),
CastOpFunctor<Place, InT>(in, out, context.device_context()));
}
};
......
......@@ -52,7 +52,7 @@ class FillConstantBatchSizeLikeOp : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelType(
const framework::ExecutionContext &ctx) const override {
return framework::OpKernelType(
static_cast<framework::DataType>(ctx.Attr<int>("data_type")),
static_cast<framework::DataType>(ctx.Attr<int>("dtype")),
ctx.device_context());
}
};
......@@ -63,7 +63,7 @@ class FillConstantBatchSizeLikeOpMaker
FillConstantBatchSizeLikeOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddAttr<int>("data_type",
AddAttr<int>("dtype",
"(int, default 5 (FP32)) "
"Output data type")
.SetDefault(framework::DataType::FP32);
......
......@@ -34,7 +34,7 @@ class FillConstantOp : public framework::OperatorBase {
using framework::OperatorBase::OperatorBase;
void Run(const framework::Scope &scope,
const platform::DeviceContext &dev_ctx) const override {
auto data_type = static_cast<framework::DataType>(Attr<int>("data_type"));
auto data_type = static_cast<framework::DataType>(Attr<int>("dtype"));
auto value = Attr<float>("value");
auto force_cpu = Attr<bool>("force_cpu");
auto &out =
......@@ -55,7 +55,7 @@ class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker {
FillConstantOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddAttr<int>("data_type",
AddAttr<int>("dtype",
"(int, default 5 (FP32)) "
"Output data type")
.SetDefault(framework::DataType::FP32);
......
......@@ -60,7 +60,7 @@ class GaussianRandomOp : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override {
return framework::OpKernelType(
static_cast<framework::DataType>(ctx.Attr<int>("data_type")),
static_cast<framework::DataType>(ctx.Attr<int>("dtype")),
ctx.device_context());
}
};
......@@ -88,7 +88,7 @@ class GaussianRandomOpMaker : public framework::OpProtoAndCheckerMaker {
"Random seed of generator."
"0 means use system wide seed.")
.SetDefault(0);
AddAttr<int>("data_type",
AddAttr<int>("dtype",
"(int, default 5(FP32)) "
"Output data type.")
.SetDefault(framework::DataType::FP32);
......
......@@ -49,7 +49,7 @@ class NCCLInitOpMaker : public framework::OpProtoAndCheckerMaker {
AddOutput("Communicator",
"Create Communicator for communicating between gpus");
AddAttr<std::vector<int>>("gpus", "(vector<int>) GPU id lists");
AddAttr<int>("data_type",
AddAttr<int>("dtype",
"(int, default 5 (FP32)) "
"Output data type")
.SetDefault(framework::DataType::FP32);
......
......@@ -401,7 +401,7 @@ class RecurrentGradOp : public RecurrentBase {
auto &inside_tensor = cur_scope.FindVar(inside_grad_name)
->Get<framework::LoDTensor>();
framework::AttributeMap attrs;
attrs["data_type"] = framework::ToDataType(inside_tensor.type());
attrs["dtype"] = framework::ToDataType(inside_tensor.type());
attrs["shape"] = framework::vectorize2int(inside_tensor.dims());
attrs["value"] = 0.0f;
......
......@@ -62,7 +62,7 @@ class RNNMemoryHelperOpInfoMaker : public framework::OpProtoAndCheckerMaker {
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "");
AddOutput("Out", "");
AddAttr<int>("data_type",
AddAttr<int>("dtype",
"(int, default 5 (FP32)) "
"Output data type")
.SetDefault(framework::DataType::FP32);
......@@ -95,7 +95,7 @@ class RNNMemoryHelperGradOp : public framework::OperatorBase {
auto &in_var_tensor = in_var->Get<framework::LoDTensor>();
framework::AttributeMap attrs;
attrs["data_type"] = framework::ToDataType(in_var_tensor.type());
attrs["dtype"] = framework::ToDataType(in_var_tensor.type());
attrs["shape"] = framework::vectorize2int(in_var_tensor.dims());
attrs["value"] = 0.0f;
......@@ -121,7 +121,7 @@ class RNNMemoryHelperGradOpInfoMaker
AddInput("X", "");
AddInput("Out", "");
AddOutput(framework::GradVarName("X"), "");
AddAttr<int>("data_type",
AddAttr<int>("dtype",
"(int, default 5 (FP32)) "
"Output data type")
.SetDefault(framework::DataType::FP32);
......
......@@ -66,7 +66,7 @@ class UniformRandomOp : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override {
return framework::OpKernelType(
static_cast<framework::DataType>(ctx.Attr<int>("data_type")),
static_cast<framework::DataType>(ctx.Attr<int>("dtype")),
ctx.device_context());
}
};
......@@ -99,7 +99,7 @@ uniform distribution.
"Random seed used for generating samples. "
"0 means use a seed generated by the system.")
.SetDefault(0);
AddAttr<int>("data_type", "(int, default 5(FP32)) Output tensor data type")
AddAttr<int>("dtype", "(int, default 5(FP32)) Output tensor data type")
.SetDefault(framework::DataType::FP32);
}
};
......
......@@ -180,7 +180,7 @@ class WhileGradOp : public framework::OperatorBase {
if (var->IsType<LoDTensor>()) {
auto &inside_tensor = var->Get<framework::LoDTensor>();
framework::AttributeMap attrs;
attrs["data_type"] = framework::ToDataType(inside_tensor.type());
attrs["dtype"] = framework::ToDataType(inside_tensor.type());
attrs["shape"] = framework::vectorize2int(inside_tensor.dims());
attrs["value"] = 0.0f;
......
......@@ -202,9 +202,9 @@ void BindVarDsec(py::module &m) {
},
py::return_value_policy::reference)
.def("set_shape", &VarDescBind::SetShape)
.def("set_data_type", &VarDescBind::SetDataType)
.def("set_dtype", &VarDescBind::SetDataType)
.def("shape", &VarDescBind::Shape, py::return_value_policy::reference)
.def("data_type", &VarDescBind::GetDataType)
.def("dtype", &VarDescBind::GetDataType)
.def("lod_level", &VarDescBind::GetLodLevel)
.def("set_lod_level", &VarDescBind::SetLoDLevel)
.def("type", &VarDescBind::GetType)
......
......@@ -8,7 +8,7 @@ def _clone_var_in_block_(block, var):
return block.create_var(
name=var.name,
shape=var.shape,
dtype=var.data_type,
dtype=var.dtype,
type=var.type,
lod_level=var.lod_level,
persistable=True)
......@@ -57,7 +57,7 @@ class Evaluator(object):
attrs={
"shape": g_var.shape,
"value": .0,
"data_type": 5,
"dtype": 5,
})
block.append_op(
type="scale", inputs={"X": zeros}, outputs={"Out": g_var})
......@@ -93,7 +93,7 @@ class Accuracy(Evaluator):
def _update_ops(self, input, label, k=1, **kwargs):
block = self._main_program.global_block()
topk_out = block.create_var(dtype=input.data_type)
topk_out = block.create_var(dtype=input.dtype)
topk_indices = block.create_var(dtype="int64")
block.append_op(
type="top_k",
......@@ -122,16 +122,16 @@ class Accuracy(Evaluator):
inputs={"X": [self._states["Total"]]},
outputs={"Out": [self._states["Total"]]},
attrs={
"in_data_type": 5, # float32
"out_data_type": 2, #int32
"in_dtype": 5, # float32
"out_dtype": 2, # int32
})
block.append_op(
type="cast",
inputs={"X": [self._states["Correct"]]},
outputs={"Out": [self._states["Correct"]]},
attrs={
"in_data_type": 5,
"out_data_type": 2,
"in_dtype": 5,
"out_dtype": 2,
})
block.append_op(
......@@ -153,7 +153,7 @@ class Accuracy(Evaluator):
else:
eval_program = Program()
block = eval_program.global_block()
eval_out = block.create_var(dtype=self._states["Total"].data_type)
eval_out = block.create_var(dtype=self._states["Total"].dtype)
e_total = _clone_var_in_block_(block, self._states["Total"])
e_correct = _clone_var_in_block_(block, self._states["Correct"])
block.append_op(
......@@ -161,16 +161,16 @@ class Accuracy(Evaluator):
inputs={"X": [e_total]},
outputs={"Out": [e_total]},
attrs={
"in_data_type": 2, #int32
"out_data_type": 5, #float32
"in_dtype": 2, # int32
"out_dtype": 5, # float32
})
block.append_op(
type="cast",
inputs={"X": [e_correct]},
outputs={"Out": [e_correct]},
attrs={
"in_data_type": 2,
"out_data_type": 5,
"in_dtype": 2,
"out_dtype": 5,
})
block.append_op(
type="elementwise_div",
......
......@@ -99,9 +99,9 @@ class Variable(object):
if not isinstance(dtype, core.DataType):
dtype = convert_np_dtype_to_dtype_(dtype)
if is_new_var:
self.desc.set_data_type(dtype)
self.desc.set_dtype(dtype)
else:
old_dtype = self.data_type
old_dtype = self.dtype
if dtype != old_dtype:
raise ValueError("Variable {0} has been created before. "
"The previous data type is {1}; the new "
......@@ -162,8 +162,8 @@ class Variable(object):
return tuple(self.desc.shape())
@property
def data_type(self):
return self.desc.data_type()
def dtype(self):
return self.desc.dtype()
@property
def lod_level(self):
......
......@@ -93,7 +93,7 @@ class ConstantInitializer(Initializer):
outputs={"Out": var},
attrs={
"shape": var.shape,
"data_type": int(var.data_type),
"dtype": int(var.dtype),
"value": self._value
})
var.op = op
......@@ -140,7 +140,7 @@ class UniformInitializer(Initializer):
outputs={"Out": var},
attrs={
"shape": var.shape,
"data_type": int(var.data_type),
"dtype": int(var.dtype),
"min": self._low,
"max": self._high,
"seed": self._seed
......@@ -188,7 +188,7 @@ class NormalInitializer(Initializer):
outputs={"Out": var},
attrs={
"shape": var.shape,
"data_type": int(var.data_type),
"dtype": int(var.dtype),
"mean": self._mean,
"std": self._std_dev,
"seed": self._seed
......@@ -265,7 +265,7 @@ class XavierInitializer(Initializer):
outputs={"Out": var},
attrs={
"shape": var.shape,
"data_type": int(var.data_type),
"dtype": int(var.dtype),
"min": -limit,
"max": limit,
"seed": self._seed
......@@ -278,7 +278,7 @@ class XavierInitializer(Initializer):
outputs={"Out": var},
attrs={
"shape": var.shape,
"data_type": int(var.data_type),
"dtype": int(var.dtype),
"mean": 0.0,
"std": std,
"seed": self._seed
......@@ -348,7 +348,7 @@ class MSRAInitializer(Initializer):
outputs={"Out": var},
attrs={
"shape": var.shape,
"data_type": int(var.data_type),
"dtype": int(var.dtype),
"min": -limit,
"max": limit,
"seed": self._seed
......@@ -361,7 +361,7 @@ class MSRAInitializer(Initializer):
outputs={"Out": var},
attrs={
"shape": var.shape,
"data_type": int(var.data_type),
"dtype": int(var.dtype),
"mean": 0.0,
"std": std,
"seed": self._seed
......
......@@ -23,7 +23,7 @@ def _clone_var_in_block_(block, var):
return block.create_var(
name=var.name,
shape=var.shape,
dtype=var.data_type,
dtype=var.dtype,
type=var.type,
lod_level=var.lod_level,
persistable=True)
......
......@@ -108,8 +108,8 @@ class LayerHelper(object):
dtype = None
for each in inputs:
if dtype is None:
dtype = each.data_type
elif dtype != each.data_type:
dtype = each.dtype
elif dtype != each.dtype:
raise ValueError("Data Type mismatch")
return dtype
......@@ -149,7 +149,7 @@ class LayerHelper(object):
self.startup_program.global_block().create_var(
name=var.name,
type=var.type,
dtype=var.data_type,
dtype=var.dtype,
shape=var.shape,
persistable=True,
initializer=initializer)
......@@ -180,10 +180,10 @@ class LayerHelper(object):
b = self.create_parameter(
attr=bias_attr,
shape=size,
dtype=input_var.data_type,
dtype=input_var.dtype,
suffix='b',
initializer=bias_initializer)
tmp = self.create_tmp_variable(dtype=input_var.data_type)
tmp = self.create_tmp_variable(dtype=input_var.dtype)
self.append_op(
type='elementwise_add',
inputs={'X': [input_var],
......@@ -198,7 +198,7 @@ class LayerHelper(object):
return input_var
if isinstance(act, basestring):
act = {'type': act}
tmp = self.create_tmp_variable(dtype=input_var.data_type)
tmp = self.create_tmp_variable(dtype=input_var.dtype)
act_type = act.pop('type')
self.append_op(
type=act_type,
......
......@@ -114,7 +114,7 @@ def embedding(input,
is_sparse=False,
param_initializer=None,
param_attr=None,
data_type='float32',
dtype='float32',
main_program=None,
startup_program=None):
"""
......@@ -125,7 +125,7 @@ def embedding(input,
size: The size of the layer
is_sparse: A flag that decleares whether the input is sparse
param_attr: Parameters for this layer
data_type: The type of data : float32, float_16, int etc
dtype: The type of data : float32, float_16, int etc
main_program: Name of the main program that calls this
startup_program: Name of the startup program
......@@ -145,9 +145,9 @@ def embedding(input,
w = helper.create_parameter(
attr=helper.param_attr,
shape=size,
dtype=data_type,
dtype=dtype,
initializer=param_initializer or _get_default_param_initializer())
tmp = helper.create_tmp_variable(data_type)
tmp = helper.create_tmp_variable(dtype)
helper.append_op(
type='lookup_table',
inputs={'Ids': input,
......@@ -167,23 +167,23 @@ def dynamic_lstm(input,
gate_activation='sigmoid',
cell_activation='tanh',
candidate_activation='tanh',
data_type='float32',
dtype='float32',
main_program=None,
startup_program=None):
helper = LayerHelper('lstm', **locals())
size = size / 4
weight = helper.create_parameter(
attr=helper.param_attr, shape=[size, 4 * size], dtype=data_type)
attr=helper.param_attr, shape=[size, 4 * size], dtype=dtype)
bias_size = [1, 7 * size]
if not use_peepholes:
bias_size[1] = 4 * size
bias = helper.create_parameter(
attr=helper.bias_attr, shape=bias_size, dtype=data_type, suffix='b')
attr=helper.bias_attr, shape=bias_size, dtype=dtype, suffix='b')
hidden = helper.create_tmp_variable(data_type)
cell = helper.create_tmp_variable(data_type)
batch_gate = helper.create_tmp_variable(data_type)
batch_cell_pre_act = helper.create_tmp_variable(data_type)
hidden = helper.create_tmp_variable(dtype)
cell = helper.create_tmp_variable(dtype)
batch_gate = helper.create_tmp_variable(dtype)
batch_cell_pre_act = helper.create_tmp_variable(dtype)
helper.append_op(
type='lstm',
......@@ -209,7 +209,7 @@ def dynamic_lstm(input,
def data(name,
shape,
append_batch_size=True,
data_type='float32',
dtype='float32',
type=core.VarDesc.VarType.LOD_TENSOR,
main_program=None,
startup_program=None,
......@@ -221,7 +221,7 @@ def data(name,
name: The name/alias of the function
shape: Tuple declaring the shape.
append_batch_size: Whether or not to append the data as a batch.
data_type: The type of data : float32, float_16, int etc
dtype: The type of data : float32, float_16, int etc
type: The output type. By default it is LOD_TENSOR.
main_program: Name of the main program that calls this
startup_program: Name of the startup program
......@@ -251,7 +251,7 @@ def data(name,
return helper.create_global_variable(
name=name,
shape=shape,
dtype=data_type,
dtype=dtype,
type=type,
stop_gradient=stop_gradient)
......@@ -362,9 +362,9 @@ def _create_op_func_(op_type):
o_name = not_intermediate_outputs[0].name
intermediate_output_names = [output.name for output in intermediate_outputs]
def infer_and_check_data_type(op_proto, **kwargs):
def infer_and_check_dtype(op_proto, **kwargs):
"""
This function performs the sanity check for data_type and
This function performs the sanity check for dtype and
instance type.
"""
dtype = None
......@@ -379,8 +379,8 @@ def _create_op_func_(op_type):
op_type))
if dtype is None:
dtype = each.data_type
elif dtype != each.data_type:
dtype = each.dtype
elif dtype != each.dtype:
raise ValueError(
"operator {0} must input same dtype".format(op_type))
......@@ -389,7 +389,7 @@ def _create_op_func_(op_type):
def func(**kwargs):
helper = LayerHelper(op_type, **kwargs)
dtype = infer_and_check_data_type(op_proto, **kwargs)
dtype = infer_and_check_dtype(op_proto, **kwargs)
inputs = dict()
for ipt in op_proto.inputs:
......@@ -426,19 +426,19 @@ _create_op_func_('reshape')
_create_op_func_('transpose')
def cast(x, data_type, main_program=None):
def cast(x, dtype, main_program=None):
"""
This function takes in the input with input_data_type
and casts it to the output_data_type as the output.
This function takes in the input with input_dtype
and casts it to the output_dtype as the output.
"""
helper = LayerHelper('cast', **locals())
out = helper.create_tmp_variable(dtype=data_type)
out = helper.create_tmp_variable(dtype=dtype)
helper.append_op(
type='cast',
inputs={'X': [x]},
outputs={'Out': [out]},
attrs={'in_data_type': x.data_type,
'out_data_type': out.data_type})
attrs={'in_dtype': x.dtype,
'out_dtype': out.dtype})
return out
......@@ -519,8 +519,8 @@ def split_lod_tensor(input,
main_program=None,
startup_program=None):
helper = LayerHelper('split_lod_tensor', **locals())
out_true = helper.create_tmp_variable(dtype=input.data_type)
out_false = helper.create_tmp_variable(dtype=input.data_type)
out_true = helper.create_tmp_variable(dtype=input.dtype)
out_false = helper.create_tmp_variable(dtype=input.dtype)
helper.append_op(
type='split_lod_tensor',
inputs={
......@@ -541,7 +541,7 @@ def merge_lod_tensor(in_true,
main_program=None,
startup_program=None):
helper = LayerHelper('merge_lod_tensor', **locals())
out = helper.create_tmp_variable(dtype=in_true.data_type)
out = helper.create_tmp_variable(dtype=in_true.dtype)
helper.append_op(
type='merge_lod_tensor',
inputs={'X': x,
......@@ -559,9 +559,9 @@ def cos_sim(X, Y, **kwargs):
X and Y and returns that as the output.
"""
helper = LayerHelper('cos_sim', **kwargs)
out = helper.create_tmp_variable(dtype=X.data_type)
xnorm = helper.create_tmp_variable(dtype=X.data_type)
ynorm = helper.create_tmp_variable(dtype=X.data_type)
out = helper.create_tmp_variable(dtype=X.dtype)
xnorm = helper.create_tmp_variable(dtype=X.dtype)
ynorm = helper.create_tmp_variable(dtype=X.dtype)
helper.append_op(
type='cos_sim',
inputs={'X': [X],
......@@ -577,7 +577,7 @@ def cross_entropy(input, label, **kwargs):
This function computes cross_entropy using the input and label.
"""
helper = LayerHelper('cross_entropy', **kwargs)
out = helper.create_tmp_variable(dtype=input.data_type)
out = helper.create_tmp_variable(dtype=input.dtype)
helper.append_op(
type='cross_entropy',
inputs={'X': [input],
......@@ -593,14 +593,14 @@ def square_error_cost(input, label, **kwargs):
The output is appending the op to do the above.
"""
helper = LayerHelper('square_error_cost', **kwargs)
minus_out = helper.create_tmp_variable(dtype=input.data_type)
minus_out = helper.create_tmp_variable(dtype=input.dtype)
helper.append_op(
type='elementwise_sub',
inputs={'X': [input],
'Y': [label]},
outputs={'Out': [minus_out]})
square_out = helper.create_tmp_variable(dtype=input.data_type)
square_out = helper.create_tmp_variable(dtype=input.dtype)
helper.append_op(
type='square', inputs={'X': [minus_out]}, outputs={'Y': [square_out]})
return square_out
......@@ -612,7 +612,7 @@ def accuracy(input, label, k=1, **kwargs):
The output is the top_k inputs and their indices.
"""
helper = LayerHelper("accuracy", **kwargs)
topk_out = helper.create_tmp_variable(dtype=input.data_type)
topk_out = helper.create_tmp_variable(dtype=input.dtype)
topk_indices = helper.create_tmp_variable(dtype="int64")
helper.append_op(
type="top_k",
......@@ -883,12 +883,12 @@ def batch_norm(input,
initializer=ConstantInitializer(0.0))
mean = helper.create_global_variable(
dtype=input.data_type, shape=param_shape, persistable=True)
dtype=input.dtype, shape=param_shape, persistable=True)
helper.set_variable_initializer(
var=mean, initializer=ConstantInitializer(0.0))
variance = helper.create_global_variable(
dtype=input.data_type, shape=param_shape, persistable=True)
dtype=input.dtype, shape=param_shape, persistable=True)
helper.set_variable_initializer(
var=variance, initializer=ConstantInitializer(1.0))
......@@ -927,8 +927,8 @@ def batch_norm(input,
def beam_search_decode(ids, scores, main_program=None, startup_program=None):
helper = LayerHelper('beam_search_decode', **locals())
sentence_ids = helper.create_tmp_variable(dtype=ids.data_type)
sentence_scores = helper.create_tmp_variable(dtype=ids.data_type)
sentence_ids = helper.create_tmp_variable(dtype=ids.dtype)
sentence_scores = helper.create_tmp_variable(dtype=ids.dtype)
helper.append_op(
type="beam_search_decode",
......@@ -1066,7 +1066,7 @@ class StaticRNN(object):
boot_var = parent_block.create_var(
name=var_name,
shape=shape,
dtype=batch_ref.data_type,
dtype=batch_ref.dtype,
persistable=False)
parent_block.append_op(
......@@ -1076,7 +1076,7 @@ class StaticRNN(object):
attrs={
'value': init_value,
'shape': boot_var.shape,
'data_type': boot_var.data_type,
'dtype': boot_var.dtype,
'input_dim_idx': ref_batch_dim_idx,
'output_dim_idx': init_batch_dim_idx
})
......@@ -1085,7 +1085,7 @@ class StaticRNN(object):
else:
pre_mem = self.helper.create_variable(
name=unique_name("@".join([self.helper.name, "mem"])),
dtype=init.data_type,
dtype=init.dtype,
shape=init.shape)
self.memories[pre_mem.name] = StaticRNNMemoryLink(
init=init, pre_mem=pre_mem)
......@@ -1101,10 +1101,7 @@ class StaticRNN(object):
raise ValueError("Static RNN only take fix seq_len input")
ipt = self.helper.create_variable(
name=x.name,
dtype=x.data_type,
shape=list(x.shape[1:]),
type=x.type)
name=x.name, dtype=x.dtype, shape=list(x.shape[1:]), type=x.type)
self.inputs.append(ipt)
return ipt
......@@ -1113,17 +1110,17 @@ class StaticRNN(object):
if not isinstance(o, Variable):
raise TypeError("step output takes a Variable")
tmp_o = self.helper.create_tmp_variable(dtype=o.data_type)
tmp_o = self.helper.create_tmp_variable(dtype=o.dtype)
self.helper.append_op(
type='rnn_memory_helper',
inputs={'X': [o]},
outputs={'Out': tmp_o},
attrs={'data_type': o.data_type})
attrs={'dtype': o.dtype})
out_var = self.parent_block().create_var(
name=tmp_o.name,
shape=[self.seq_len] + list(tmp_o.shape),
dtype=tmp_o.data_type)
dtype=tmp_o.dtype)
self.outputs.append(out_var)
......@@ -1195,13 +1192,13 @@ class StaticRNN(object):
pre_memories.append(mem.pre_mem.name)
mem_var = rnn_block.var(mem.mem.name)
assert isinstance(mem_var, Variable)
new_mem = self.helper.create_tmp_variable(dtype=mem_var.data_type)
new_mem = self.helper.create_tmp_variable(dtype=mem_var.dtype)
rnn_block.append_op(
type='rnn_memory_helper',
inputs={'X': [mem_var]},
outputs={'Out': [new_mem]},
attrs={'data_type': mem_var.data_type})
attrs={'dtype': mem_var.dtype})
memories.append(new_mem.name)
......@@ -1251,7 +1248,7 @@ class While(object):
if not isinstance(cond, Variable):
raise TypeError("condition should be a variable")
assert isinstance(cond, Variable)
if cond.data_type != core.DataType.BOOL:
if cond.dtype != core.DataType.BOOL:
raise TypeError("condition should be a bool variable")
if reduce(lambda a, b: a * b, cond.shape, 1) != 1:
raise TypeError("condition should be a bool scalar")
......@@ -1323,9 +1320,9 @@ def lstm(x,
main_program=main_program,
startup_program=startup_program)
data_type = x.data_type
c = helper.create_tmp_variable(data_type)
h = helper.create_tmp_variable(data_type)
dtype = x.dtype
c = helper.create_tmp_variable(dtype)
h = helper.create_tmp_variable(dtype)
helper.append_op(
type='lstm_unit',
......@@ -1367,7 +1364,7 @@ def lod_tensor_to_array(x, table, main_program=None):
array = helper.create_variable(
name=unique_name("lod_tensor_to_array"),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.data_type)
dtype=x.dtype)
helper.append_op(
type='lod_tensor_to_array',
inputs={'X': x,
......@@ -1382,7 +1379,7 @@ def array_to_lod_tensor(x, table, main_program=None):
LOD_Tensor.
"""
helper = LayerHelper("array_to_lod_tensor", **locals())
tmp = helper.create_tmp_variable(dtype=x.data_type)
tmp = helper.create_tmp_variable(dtype=x.dtype)
helper.append_op(
type="array_to_lod_tensor",
inputs={'X': x,
......@@ -1394,7 +1391,7 @@ def array_to_lod_tensor(x, table, main_program=None):
def fill_constant(shape, dtype, value, main_program=None, startup_program=None):
"""
This function creates a tensor , with shape as mentioned in the input and
specified data_type and fills this up with a constant value that
specified dtype and fills this up with a constant value that
comes in the input. It also sets the stop_gradient to be True.
"""
helper = LayerHelper("fill_constant", **locals())
......@@ -1403,11 +1400,9 @@ def fill_constant(shape, dtype, value, main_program=None, startup_program=None):
type='fill_constant',
inputs={},
outputs={'Out': [out]},
attrs={
'shape': shape,
'data_type': out.data_type,
'value': float(value)
})
attrs={'shape': shape,
'dtype': out.dtype,
'value': float(value)})
out.stop_gradient = True
return out
......@@ -1428,7 +1423,7 @@ def fill_constant_batch_size_like(input,
outputs={'Out': [out]},
attrs={
'shape': shape,
'data_type': out.data_type,
'dtype': out.dtype,
'value': float(value),
'input_dim_idx': input_dim_idx,
'output_dim_idx': output_dim_idx
......@@ -1461,7 +1456,7 @@ def increment(x, value=1.0, in_place=True, main_program=None):
"""
helper = LayerHelper("increment", **locals())
if not in_place:
out = helper.create_tmp_variable(dtype=x.data_type)
out = helper.create_tmp_variable(dtype=x.dtype)
else:
out = x
helper.append_op(
......@@ -1482,7 +1477,7 @@ def array_write(x, i, array=None, main_program=None):
array = helper.create_variable(
name="{0}.out".format(helper.name),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.data_type)
dtype=x.dtype)
helper.append_op(
type='write_to_array',
inputs={'X': [x],
......@@ -1521,7 +1516,7 @@ def array_read(array, i, main_program=None):
array,
Variable) or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY:
raise TypeError("array should be tensor array vairable")
out = helper.create_tmp_variable(dtype=array.data_type)
out = helper.create_tmp_variable(dtype=array.dtype)
helper.append_op(
type='read_from_array',
inputs={'X': [array],
......@@ -1536,7 +1531,7 @@ def shrink_memory(x, i, table, main_program=None):
as mentioned in the input parameter.
"""
helper = LayerHelper('shrink_memory', **locals())
out = helper.create_tmp_variable(dtype=x.data_type)
out = helper.create_tmp_variable(dtype=x.dtype)
helper.append_op(
type='shrink_rnn_memory',
inputs={'X': [x],
......@@ -1698,11 +1693,11 @@ class IfElse(object):
parent_block = self.parent_block()
out_true = parent_block.create_var(
name=unique_name('ifelse_input' + self.helper.name),
dtype=x.data_type)
dtype=x.dtype)
out_false = parent_block.create_var(
name=unique_name('ifelse_input' + self.helper.name),
dtype=x.data_type)
dtype=x.dtype)
parent_block.append_op(
type='split_lod_tensor',
inputs={
......@@ -1744,7 +1739,7 @@ class IfElse(object):
# create outside tensor
outside_out = parent_block.create_var(
name=unique_name("_".join([self.helper.name, 'output'])),
dtype=each_out.data_type)
dtype=each_out.dtype)
out_table.append(outside_out)
# assign local var to outside
......
......@@ -92,7 +92,7 @@ class Optimizer(object):
var = self.helper.create_global_variable(
name=unique_name(name),
persistable=True,
dtype=dtype or param.data_type,
dtype=dtype or param.dtype,
type=param.type,
shape=param.shape)
self.helper.set_variable_initializer(
......@@ -202,7 +202,7 @@ class Optimizer(object):
"""
params_grads = append_backward_ops(loss, parameter_list, no_grad_set or
set())
# Add regularization if any
# Add regularization if any
params_grads = append_regularization_ops(params_grads)
optimize_ops = self.create_optimization_pass(params_grads, loss,
startup_program)
......
......@@ -7,11 +7,11 @@ from paddle.v2.fluid.executor import Executor
from paddle.v2.fluid.io import save_persistables, load_persistables
from paddle.v2.fluid.optimizer import SGDOptimizer
x = layers.data(name='x', shape=[13], data_type='float32')
x = layers.data(name='x', shape=[13], dtype='float32')
y_predict = layers.fc(input=x, size=1, act=None)
y = layers.data(name='y', shape=[1], data_type='float32')
y = layers.data(name='y', shape=[1], dtype='float32')
cost = layers.square_error_cost(input=y_predict, label=y)
avg_cost = layers.mean(x=cost)
......
......@@ -90,8 +90,8 @@ def vgg16_bn_drop(input):
classdim = 10
data_shape = [3, 32, 32]
images = layers.data(name='pixel', shape=data_shape, data_type='float32')
label = layers.data(name='label', shape=[1], data_type='int64')
images = layers.data(name='pixel', shape=data_shape, dtype='float32')
label = layers.data(name='label', shape=[1], dtype='int64')
# Add neural network config
# option 1. resnet
......
......@@ -34,26 +34,26 @@ def load_parameter(file_name, h, w):
def db_lstm():
# 8 features
word = layers.data(name='word_data', shape=[1], data_type='int64')
predicate = layers.data(name='verb_data', shape=[1], data_type='int64')
ctx_n2 = layers.data(name='ctx_n2_data', shape=[1], data_type='int64')
ctx_n1 = layers.data(name='ctx_n1_data', shape=[1], data_type='int64')
ctx_0 = layers.data(name='ctx_0_data', shape=[1], data_type='int64')
ctx_p1 = layers.data(name='ctx_p1_data', shape=[1], data_type='int64')
ctx_p2 = layers.data(name='ctx_p2_data', shape=[1], data_type='int64')
mark = layers.data(name='mark_data', shape=[1], data_type='int64')
word = layers.data(name='word_data', shape=[1], dtype='int64')
predicate = layers.data(name='verb_data', shape=[1], dtype='int64')
ctx_n2 = layers.data(name='ctx_n2_data', shape=[1], dtype='int64')
ctx_n1 = layers.data(name='ctx_n1_data', shape=[1], dtype='int64')
ctx_0 = layers.data(name='ctx_0_data', shape=[1], dtype='int64')
ctx_p1 = layers.data(name='ctx_p1_data', shape=[1], dtype='int64')
ctx_p2 = layers.data(name='ctx_p2_data', shape=[1], dtype='int64')
mark = layers.data(name='mark_data', shape=[1], dtype='int64')
predicate_embedding = layers.embedding(
input=predicate,
size=[pred_len, word_dim],
data_type='float32',
dtype='float32',
is_sparse=IS_SPARSE,
param_attr={'name': 'vemb'})
mark_embedding = layers.embedding(
input=mark,
size=[mark_dict_len, mark_dim],
data_type='float32',
dtype='float32',
is_sparse=IS_SPARSE)
word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2]
......@@ -125,7 +125,7 @@ def to_lodtensor(data, place):
def main():
# define network topology
feature_out = db_lstm()
target = layers.data(name='target', shape=[1], data_type='int64')
target = layers.data(name='target', shape=[1], dtype='int64')
crf_cost = layers.linear_chain_crf(
input=feature_out,
label=target,
......
......@@ -8,8 +8,8 @@ import paddle.v2.fluid.nets as nets
from paddle.v2.fluid.executor import Executor
from paddle.v2.fluid.optimizer import AdamOptimizer
images = layers.data(name='pixel', shape=[1, 28, 28], data_type='float32')
label = layers.data(name='label', shape=[1], data_type='int64')
images = layers.data(name='pixel', shape=[1, 28, 28], dtype='float32')
label = layers.data(name='label', shape=[1], dtype='int64')
conv_pool_1 = nets.simple_img_conv_pool(
input=images,
filter_size=5,
......
......@@ -10,7 +10,7 @@ from paddle.v2.fluid.optimizer import MomentumOptimizer
from paddle.v2.fluid.regularizer import L2DecayRegularizer
BATCH_SIZE = 128
image = layers.data(name='x', shape=[784], data_type='float32')
image = layers.data(name='x', shape=[784], dtype='float32')
param_attr = {
'name': None,
......@@ -27,7 +27,7 @@ predict = layers.fc(input=hidden2,
act='softmax',
param_attr=param_attr)
label = layers.data(name='y', shape=[1], data_type='int64')
label = layers.data(name='y', shape=[1], dtype='int64')
cost = layers.cross_entropy(input=predict, label=label)
avg_cost = layers.mean(x=cost)
......
......@@ -18,11 +18,11 @@ def get_usr_combined_features():
USR_DICT_SIZE = paddle.dataset.movielens.max_user_id() + 1
uid = layers.data(name='user_id', shape=[1], data_type='int64')
uid = layers.data(name='user_id', shape=[1], dtype='int64')
usr_emb = layers.embedding(
input=uid,
data_type='float32',
dtype='float32',
size=[USR_DICT_SIZE, 32],
param_attr={'name': 'user_table'},
is_sparse=IS_SPARSE)
......@@ -31,7 +31,7 @@ def get_usr_combined_features():
USR_GENDER_DICT_SIZE = 2
usr_gender_id = layers.data(name='gender_id', shape=[1], data_type='int64')
usr_gender_id = layers.data(name='gender_id', shape=[1], dtype='int64')
usr_gender_emb = layers.embedding(
input=usr_gender_id,
......@@ -42,7 +42,7 @@ def get_usr_combined_features():
usr_gender_fc = layers.fc(input=usr_gender_emb, size=16)
USR_AGE_DICT_SIZE = len(paddle.dataset.movielens.age_table)
usr_age_id = layers.data(name='age_id', shape=[1], data_type="int64")
usr_age_id = layers.data(name='age_id', shape=[1], dtype="int64")
usr_age_emb = layers.embedding(
input=usr_age_id,
......@@ -53,7 +53,7 @@ def get_usr_combined_features():
usr_age_fc = layers.fc(input=usr_age_emb, size=16)
USR_JOB_DICT_SIZE = paddle.dataset.movielens.max_job_id() + 1
usr_job_id = layers.data(name='job_id', shape=[1], data_type="int64")
usr_job_id = layers.data(name='job_id', shape=[1], dtype="int64")
usr_job_emb = layers.embedding(
input=usr_job_id,
......@@ -75,11 +75,11 @@ def get_mov_combined_features():
MOV_DICT_SIZE = paddle.dataset.movielens.max_movie_id() + 1
mov_id = layers.data(name='movie_id', shape=[1], data_type='int64')
mov_id = layers.data(name='movie_id', shape=[1], dtype='int64')
mov_emb = layers.embedding(
input=mov_id,
data_type='float32',
dtype='float32',
size=[MOV_DICT_SIZE, 32],
param_attr={'name': 'movie_table'},
is_sparse=IS_SPARSE)
......@@ -88,7 +88,7 @@ def get_mov_combined_features():
CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories())
category_id = layers.data(name='category_id', shape=[1], data_type='int64')
category_id = layers.data(name='category_id', shape=[1], dtype='int64')
mov_categories_emb = layers.embedding(
input=category_id, size=[CATEGORY_DICT_SIZE, 32], is_sparse=IS_SPARSE)
......@@ -98,7 +98,7 @@ def get_mov_combined_features():
MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict())
mov_title_id = layers.data(name='movie_title', shape=[1], data_type='int64')
mov_title_id = layers.data(name='movie_title', shape=[1], dtype='int64')
mov_title_emb = layers.embedding(
input=mov_title_id, size=[MOV_TITLE_DICT_SIZE, 32], is_sparse=IS_SPARSE)
......@@ -126,7 +126,7 @@ def model():
# need cos sim
inference = layers.cos_sim(X=usr_combined_features, Y=mov_combined_features)
label = layers.data(name='score', shape=[1], data_type='float32')
label = layers.data(name='score', shape=[1], dtype='float32')
square_cost = layers.square_error_cost(input=inference, label=label)
......
......@@ -10,8 +10,8 @@ from paddle.v2.fluid.optimizer import AdamOptimizer
def convolution_net(input_dim, class_dim=2, emb_dim=32, hid_dim=32):
data = layers.data(name="words", shape=[1], data_type="int64")
label = layers.data(name="label", shape=[1], data_type="int64")
data = layers.data(name="words", shape=[1], dtype="int64")
label = layers.data(name="label", shape=[1], dtype="int64")
emb = layers.embedding(input=data, size=[input_dim, emb_dim])
conv_3 = nets.sequence_conv_pool(
......
......@@ -14,8 +14,8 @@ def stacked_lstm_net(input_dim,
hid_dim=512,
stacked_num=3):
assert stacked_num % 2 == 1
data = layers.data(name="words", shape=[1], data_type="int64")
label = layers.data(name="label", shape=[1], data_type="int64")
data = layers.data(name="words", shape=[1], dtype="int64")
label = layers.data(name="label", shape=[1], dtype="int64")
emb = layers.embedding(input=data, size=[input_dim, emb_dim])
# add bias attr
......
......@@ -12,19 +12,19 @@ def lstm_net(dict_dim, class_dim=2, emb_dim=32, seq_len=80, batch_size=50):
name="words",
shape=[seq_len * batch_size, 1],
append_batch_size=False,
data_type="int64")
dtype="int64")
label = layers.data(
name="label",
shape=[batch_size, 1],
append_batch_size=False,
data_type="int64")
dtype="int64")
emb = layers.embedding(input=data, size=[dict_dim, emb_dim])
emb = layers.reshape(x=emb, shape=[batch_size, seq_len, emb_dim])
emb = layers.transpose(x=emb, axis=[1, 0, 2])
c_pre_init = layers.fill_constant(
dtype=emb.data_type, shape=[batch_size, emb_dim], value=0.0)
dtype=emb.dtype, shape=[batch_size, emb_dim], value=0.0)
layer_1_out = layers.lstm(emb, c_pre_init=c_pre_init, hidden_dim=emb_dim)
layer_1_out = layers.transpose(x=layer_1_out, axis=[1, 0, 2])
......
......@@ -16,34 +16,34 @@ IS_SPARSE = True
word_dict = paddle.dataset.imikolov.build_dict()
dict_size = len(word_dict)
first_word = layers.data(name='firstw', shape=[1], data_type='int64')
second_word = layers.data(name='secondw', shape=[1], data_type='int64')
third_word = layers.data(name='thirdw', shape=[1], data_type='int64')
forth_word = layers.data(name='forthw', shape=[1], data_type='int64')
next_word = layers.data(name='nextw', shape=[1], data_type='int64')
first_word = layers.data(name='firstw', shape=[1], dtype='int64')
second_word = layers.data(name='secondw', shape=[1], dtype='int64')
third_word = layers.data(name='thirdw', shape=[1], dtype='int64')
forth_word = layers.data(name='forthw', shape=[1], dtype='int64')
next_word = layers.data(name='nextw', shape=[1], dtype='int64')
embed_first = layers.embedding(
input=first_word,
size=[dict_size, EMBED_SIZE],
data_type='float32',
dtype='float32',
is_sparse=IS_SPARSE,
param_attr={'name': 'shared_w'})
embed_second = layers.embedding(
input=second_word,
size=[dict_size, EMBED_SIZE],
data_type='float32',
dtype='float32',
is_sparse=IS_SPARSE,
param_attr={'name': 'shared_w'})
embed_third = layers.embedding(
input=third_word,
size=[dict_size, EMBED_SIZE],
data_type='float32',
dtype='float32',
is_sparse=IS_SPARSE,
param_attr={'name': 'shared_w'})
embed_forth = layers.embedding(
input=forth_word,
size=[dict_size, EMBED_SIZE],
data_type='float32',
dtype='float32',
is_sparse=IS_SPARSE,
param_attr={'name': 'shared_w'})
......
......@@ -458,7 +458,7 @@ class OpTest(unittest.TestCase):
mean_inputs = map(block.var, output_names)
if len(mean_inputs) == 1:
loss = block.create_var(dtype=mean_inputs[0].data_type, shape=[1])
loss = block.create_var(dtype=mean_inputs[0].dtype, shape=[1])
op = block.append_op(
inputs={"X": mean_inputs}, outputs={"Out": loss}, type='mean')
op.desc.infer_var_type(block.desc)
......@@ -466,8 +466,7 @@ class OpTest(unittest.TestCase):
else:
avg_sum = []
for cur_loss in mean_inputs:
cur_avg_loss = block.create_var(
dtype=cur_loss.data_type, shape=[1])
cur_avg_loss = block.create_var(dtype=cur_loss.dtype, shape=[1])
op = block.append_op(
inputs={"X": [cur_loss]},
outputs={"Out": [cur_avg_loss]},
......@@ -476,13 +475,13 @@ class OpTest(unittest.TestCase):
op.desc.infer_shape(block.desc)
avg_sum.append(cur_avg_loss)
loss_sum = block.create_var(dtype=avg_sum[0].data_type, shape=[1])
loss_sum = block.create_var(dtype=avg_sum[0].dtype, shape=[1])
op_sum = block.append_op(
inputs={"X": avg_sum}, outputs={"Out": loss_sum}, type='sum')
op_sum.desc.infer_var_type(block.desc)
op_sum.desc.infer_shape(block.desc)
loss = block.create_var(dtype=loss_sum.data_type, shape=[1])
loss = block.create_var(dtype=loss_sum.dtype, shape=[1])
op_loss = block.append_op(
inputs={"X": loss_sum},
outputs={"Out": loss},
......
......@@ -10,8 +10,8 @@ class TestCastOp(op_test.OpTest):
self.inputs = {'X': ipt.astype('float32')}
self.outputs = {'Out': ipt.astype('float64')}
self.attrs = {
'in_data_type': int(core.DataType.FP32),
'out_data_type': int(core.DataType.FP64)
'in_dtype': int(core.DataType.FP32),
'out_dtype': int(core.DataType.FP64)
}
self.op_type = 'cast'
......
......@@ -9,7 +9,7 @@ import numpy
class ConditionalBlock(unittest.TestCase):
def test_forward(self):
data = layers.data(name='X', shape=[1], data_type='float32')
data = layers.data(name='X', shape=[1], dtype='float32')
data.stop_gradient = False
cond = layers.ConditionalBlock(inputs=[data])
out = layers.create_tensor(dtype='float32')
......
......@@ -8,11 +8,11 @@ import numpy
class TestExecutor(unittest.TestCase):
def test_mul(self):
a = data(name='a', shape=[784], data_type='float32')
a = data(name='a', shape=[784], dtype='float32')
b = data(
name='b',
shape=[784, 100],
data_type='float32',
dtype='float32',
append_batch_size=False)
out = mul(x=a, y=b)
place = core.CPUPlace()
......
......@@ -32,7 +32,7 @@ class TestLayer(unittest.TestCase):
images = layers.data(
name='pixel',
shape=[3, 48, 48],
data_type='float32',
dtype='float32',
main_program=main_program)
layers.batch_norm(
input=images,
......@@ -47,7 +47,7 @@ class TestLayer(unittest.TestCase):
images = layers.data(
name='pixel',
shape=[3, 48, 48],
data_type='float32',
dtype='float32',
main_program=main_program)
layers.dropout(
x=images,
......@@ -64,7 +64,7 @@ class TestLayer(unittest.TestCase):
images = layers.data(
name='pixel',
shape=[3, 48, 48],
data_type='float32',
dtype='float32',
main_program=main_program,
startup_program=startup_program)
conv1 = conv_block(images, 64, 2, [0.3, 0], main_program,
......@@ -80,13 +80,13 @@ class TestLayer(unittest.TestCase):
image1 = layers.data(
name='pixel1',
shape=[3, 48, 48],
data_type='float32',
dtype='float32',
main_program=main_program,
startup_program=startup_program)
image2 = layers.data(
name='pixel2',
shape=[3, 48, 48],
data_type='float32',
dtype='float32',
main_program=main_program,
startup_program=startup_program)
out = layers.elementwise_add(
......
......@@ -19,13 +19,13 @@ class TestBook(unittest.TestCase):
x = layers.data(
name='x',
shape=[2],
data_type='float32',
dtype='float32',
main_program=program,
startup_program=init_program)
y = layers.data(
name='y',
shape=[1],
data_type='float32',
dtype='float32',
main_program=program,
startup_program=init_program)
......
......@@ -9,11 +9,11 @@ class TestBook(unittest.TestCase):
def test_fit_a_line(self):
program = Program()
x = layers.data(
name='x', shape=[13], data_type='float32', main_program=program)
name='x', shape=[13], dtype='float32', main_program=program)
y_predict = layers.fc(input=x, size=1, act=None, main_program=program)
y = layers.data(
name='y', shape=[1], data_type='float32', main_program=program)
name='y', shape=[1], dtype='float32', main_program=program)
cost = layers.square_error_cost(
input=y_predict, label=y, main_program=program)
......@@ -28,12 +28,9 @@ class TestBook(unittest.TestCase):
# Change g_program, so the rest layers use `g_program`
images = layers.data(
name='pixel',
shape=[784],
data_type='float32',
main_program=program)
name='pixel', shape=[784], dtype='float32', main_program=program)
label = layers.data(
name='label', shape=[1], data_type='int32', main_program=program)
name='label', shape=[1], dtype='int32', main_program=program)
hidden1 = layers.fc(input=images,
size=128,
act='relu',
......@@ -58,7 +55,7 @@ class TestBook(unittest.TestCase):
images = layers.data(
name='pixel',
shape=[3, 48, 48],
data_type='int32',
dtype='int32',
main_program=program)
layers.conv2d(
input=images,
......@@ -74,10 +71,10 @@ class TestBook(unittest.TestCase):
images = layers.data(
name='pixel',
shape=[1, 28, 28],
data_type='float32',
dtype='float32',
main_program=program)
label = layers.data(
name='label', shape=[1], data_type='int32', main_program=program)
name='label', shape=[1], dtype='int32', main_program=program)
conv_pool_1 = nets.simple_img_conv_pool(
input=images,
filter_size=5,
......@@ -112,39 +109,39 @@ class TestBook(unittest.TestCase):
dict_size = 10000
embed_size = 32
first_word = layers.data(
name='firstw', shape=[1], data_type='int64', main_program=program)
name='firstw', shape=[1], dtype='int64', main_program=program)
second_word = layers.data(
name='secondw', shape=[1], data_type='int64', main_program=program)
name='secondw', shape=[1], dtype='int64', main_program=program)
third_word = layers.data(
name='thirdw', shape=[1], data_type='int64', main_program=program)
name='thirdw', shape=[1], dtype='int64', main_program=program)
forth_word = layers.data(
name='forthw', shape=[1], data_type='int64', main_program=program)
name='forthw', shape=[1], dtype='int64', main_program=program)
next_word = layers.data(
name='nextw', shape=[1], data_type='int64', main_program=program)
name='nextw', shape=[1], dtype='int64', main_program=program)
embed_first = layers.embedding(
input=first_word,
size=[dict_size, embed_size],
data_type='float32',
dtype='float32',
param_attr={'name': 'shared_w'},
main_program=program)
embed_second = layers.embedding(
input=second_word,
size=[dict_size, embed_size],
data_type='float32',
dtype='float32',
param_attr={'name': 'shared_w'},
main_program=program)
embed_third = layers.embedding(
input=third_word,
size=[dict_size, embed_size],
data_type='float32',
dtype='float32',
param_attr={'name': 'shared_w'},
main_program=program)
embed_forth = layers.embedding(
input=forth_word,
size=[dict_size, embed_size],
data_type='float32',
dtype='float32',
param_attr={'name': 'shared_w'},
main_program=program)
......@@ -173,12 +170,9 @@ class TestBook(unittest.TestCase):
# Change g_program, so the rest layers use `g_program`
images = layers.data(
name='pixel',
shape=[784],
data_type='float32',
main_program=program)
name='pixel', shape=[784], dtype='float32', main_program=program)
label = layers.data(
name='label', shape=[1], data_type='int32', main_program=program)
name='label', shape=[1], dtype='int32', main_program=program)
hidden = layers.fc(input=images, size=128, main_program=program)
crf = layers.linear_chain_crf(
input=hidden, label=label, main_program=program)
......
......@@ -132,7 +132,7 @@ class TestCPULoDTensorArrayOpGrad(unittest.TestCase):
x = layers.data(
name='x',
shape=[1],
data_type='float32',
dtype='float32',
main_program=program,
stop_gradient=False)
table = layers.lod_rank_table(x, level=0, main_program=program)
......
......@@ -11,10 +11,9 @@ import numpy as np
class TestMNISTIfElseOp(unittest.TestCase):
def test_raw_api(self):
kwargs = {'startup_program': Program(), 'main_program': Program()}
image = layers.data(
name='x', shape=[784], data_type='float32', **kwargs)
image = layers.data(name='x', shape=[784], dtype='float32', **kwargs)
label = layers.data(name='y', shape=[1], data_type='int64', **kwargs)
label = layers.data(name='y', shape=[1], dtype='int64', **kwargs)
limit = layers.fill_constant_batch_size_like(
input=label, dtype='int64', shape=[1], value=5.0, **kwargs)
......@@ -84,10 +83,9 @@ class TestMNISTIfElseOp(unittest.TestCase):
def test_ifelse(self):
kwargs = {'startup_program': Program(), 'main_program': Program()}
image = layers.data(
name='x', shape=[784], data_type='float32', **kwargs)
image = layers.data(name='x', shape=[784], dtype='float32', **kwargs)
label = layers.data(name='y', shape=[1], data_type='int64', **kwargs)
label = layers.data(name='y', shape=[1], dtype='int64', **kwargs)
limit = layers.fill_constant_batch_size_like(
input=label, dtype='int64', shape=[1], value=5.0, **kwargs)
......
......@@ -20,7 +20,7 @@ class TestParameter(unittest.TestCase):
self.assertIsNotNone(param)
self.assertEqual('fc.w', param.name)
self.assertEqual((784, 100), param.shape)
self.assertEqual(core.DataType.FP32, param.data_type)
self.assertEqual(core.DataType.FP32, param.dtype)
self.assertEqual(0, param.block.idx)
exe = Executor(core.CPUPlace())
p = exe.run(g_main_program, fetch_list=[param])[0]
......
......@@ -101,13 +101,13 @@ class TestVarDesc(unittest.TestCase):
self.assertEqual(src_shape, res_shape)
self.assertEqual(core.VarDesc.VarType.SELECTED_ROWS, var.type())
def test_data_type(self):
def test_dtype(self):
program_desc = core.ProgramDesc()
block = program_desc.block(0)
var = block.var('my_var')
var.set_type(core.VarDesc.VarType.LOD_TENSOR)
var.set_data_type(core.DataType.INT32)
self.assertEqual(core.DataType.INT32, var.data_type())
var.set_dtype(core.DataType.INT32)
self.assertEqual(core.DataType.INT32, var.dtype())
self.assertEqual(core.VarDesc.VarType.LOD_TENSOR, var.type())
......
......@@ -118,14 +118,14 @@ class RecurrentOpTest1(unittest.TestCase):
def create_rnn_op(self):
x = layers.data(
shape=[self.sent_len, self.batch_size, self.input_dim],
data_type='float32',
dtype='float32',
name='x',
append_batch_size=False,
**self.p_info)
x.stop_gradient = False
h_boot = layers.data(
shape=[self.input_dim],
data_type='float32',
dtype='float32',
name='h_boot',
**self.p_info)
h_boot.stop_gradient = False
......@@ -251,14 +251,14 @@ class RecurrentOpTest2(RecurrentOpTest1):
def create_rnn_op(self):
x = layers.data(
shape=[self.sent_len, self.batch_size, self.input_dim],
data_type='float32',
dtype='float32',
name='x',
append_batch_size=False,
**self.p_info)
x.stop_gradient = False
h_boot = layers.data(
shape=[self.input_dim],
data_type='float32',
dtype='float32',
name='h_boot',
**self.p_info)
h_boot.stop_gradient = False
......@@ -350,21 +350,21 @@ class RecurrentOpMultipleMemoryTest(RecurrentOpTest1):
def create_rnn_op(self):
x = layers.data(
shape=[self.sent_len, self.batch_size, self.input_dim],
data_type='float32',
dtype='float32',
name='x',
append_batch_size=False,
**self.p_info)
x.stop_gradient = False
h_boot1 = layers.data(
shape=[self.batch_size, self.input_dim],
data_type='float32',
dtype='float32',
name='h_boot1',
append_batch_size=False,
**self.p_info)
h_boot1.stop_gradient = False
h_boot2 = layers.data(
shape=[self.batch_size, self.input_dim],
data_type='float32',
dtype='float32',
name='h_boot2',
append_batch_size=False,
**self.p_info)
......@@ -435,7 +435,7 @@ class RecurrentOpNoMemBootTest(RecurrentOpTest1):
def create_rnn_op(self):
x = layers.data(
shape=[self.sent_len, self.batch_size, self.input_dim],
data_type='float32',
dtype='float32',
name='x',
append_batch_size=False,
**self.p_info)
......
......@@ -9,7 +9,7 @@ import numpy
class TestShrinkRNNMemory(unittest.TestCase):
def test_shrink_rnn_memory(self):
x = layers.data('x', shape=[100], data_type='float32')
x = layers.data('x', shape=[100], dtype='float32')
x.stop_gradient = False
table = layers.lod_rank_table(x=x)
i = layers.zeros(dtype='int64', shape=[1])
......
......@@ -123,13 +123,13 @@ class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase):
x = layers.data(
name='x',
shape=[1],
data_type='float32',
dtype='float32',
main_program=program,
stop_gradient=False)
y = layers.data(
name='y',
shape=[1],
data_type='bool',
dtype='bool',
main_program=program,
stop_gradient=False)
......
......@@ -22,13 +22,13 @@ class TestVariable(unittest.TestCase):
w = b.create_var(
dtype="float64", shape=[784, 100], lod_level=0, name="fc.w")
self.assertNotEqual(str(w), "")
self.assertEqual(core.DataType.FP64, w.data_type)
self.assertEqual(core.DataType.FP64, w.dtype)
self.assertEqual((784, 100), w.shape)
self.assertEqual("fc.w", w.name)
self.assertEqual(0, w.lod_level)
w = b.create_var(name='fc.w')
self.assertEqual(core.DataType.FP64, w.data_type)
self.assertEqual(core.DataType.FP64, w.dtype)
self.assertEqual((784, 100), w.shape)
self.assertEqual("fc.w", w.name)
self.assertEqual(0, w.lod_level)
......
......@@ -9,11 +9,11 @@ import numpy
class TestWhileOp(unittest.TestCase):
def test_simple_forward(self):
d0 = layers.data(
"d0", shape=[10], append_batch_size=False, data_type='float32')
"d0", shape=[10], append_batch_size=False, dtype='float32')
d1 = layers.data(
"d1", shape=[10], append_batch_size=False, data_type='float32')
"d1", shape=[10], append_batch_size=False, dtype='float32')
d2 = layers.data(
"d2", shape=[10], append_batch_size=False, data_type='float32')
"d2", shape=[10], append_batch_size=False, dtype='float32')
i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
init = layers.zeros(shape=[10], dtype='float32')
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册