提交 6b18b3cc 编写于 作者: G guosheng

Merge branch 'develop' of https://github.com/PaddlePaddle/paddle into enhance-include-pool

...@@ -22,6 +22,12 @@ std::vector<framework::DDim> InferShapeContext::GetInputsDim( ...@@ -22,6 +22,12 @@ std::vector<framework::DDim> InferShapeContext::GetInputsDim(
return GetDims(names); return GetDims(names);
} }
DDim InferShapeContext::GetInputsElementDim(const std::string &name,
int idx) const {
const std::vector<std::string> &names = Inputs(name);
return this->GetDim(names[idx]);
}
void InferShapeContext::SetOutputsDim( void InferShapeContext::SetOutputsDim(
const std::string &name, const std::vector<framework::DDim> &dims) { const std::string &name, const std::vector<framework::DDim> &dims) {
auto &names = Outputs(name); auto &names = Outputs(name);
......
...@@ -37,6 +37,7 @@ class InferShapeContext { ...@@ -37,6 +37,7 @@ class InferShapeContext {
virtual framework::DDim GetInputDim(const std::string &name) const = 0; virtual framework::DDim GetInputDim(const std::string &name) const = 0;
std::vector<framework::DDim> GetInputsDim(const std::string &name) const; std::vector<framework::DDim> GetInputsDim(const std::string &name) const;
DDim GetInputsElementDim(const std::string &name, int idx) const;
virtual void SetOutputDim(const std::string &name, const DDim &dim) = 0; virtual void SetOutputDim(const std::string &name, const DDim &dim) = 0;
void SetOutputsDim(const std::string &name, void SetOutputsDim(const std::string &name,
......
...@@ -37,9 +37,15 @@ class WriteToArrayOp : public ArrayOp { ...@@ -37,9 +37,15 @@ class WriteToArrayOp : public ArrayOp {
<< " to " << offset + 1; << " to " << offset + 1;
out->resize(offset + 1); out->resize(offset + 1);
} }
auto *out_tensor = &out->at(offset); if (x_tensor.memory_size() > 0) {
CopyFrom(x_tensor, dev_ctx.GetPlace(), dev_ctx, out_tensor); auto *out_tensor = &out->at(offset);
out_tensor->set_lod(x_tensor.lod()); CopyFrom(x_tensor, dev_ctx.GetPlace(), dev_ctx, out_tensor);
out_tensor->set_lod(x_tensor.lod());
} else {
VLOG(10) << "WARNING: The input tensor 'x_tensor' holds no memory, so "
"nothing has been written to output array["
<< offset << "].";
}
} }
}; };
......
...@@ -287,7 +287,6 @@ class WhileGradOpShapeInference : public framework::InferShapeBase { ...@@ -287,7 +287,6 @@ class WhileGradOpShapeInference : public framework::InferShapeBase {
auto p_names = ctx->Inputs(kParameters); auto p_names = ctx->Inputs(kParameters);
auto pg_names = ctx->Outputs(kParamGrads); auto pg_names = ctx->Outputs(kParamGrads);
auto dims = ctx->GetInputsDim(kParameters);
auto var_types = ctx->GetInputsVarType(kParameters); auto var_types = ctx->GetInputsVarType(kParameters);
std::vector<std::string> names_to_set; std::vector<std::string> names_to_set;
std::vector<framework::DDim> dims_to_set; std::vector<framework::DDim> dims_to_set;
...@@ -295,13 +294,14 @@ class WhileGradOpShapeInference : public framework::InferShapeBase { ...@@ -295,13 +294,14 @@ class WhileGradOpShapeInference : public framework::InferShapeBase {
if (pg_names[i] == framework::kEmptyVarName) { if (pg_names[i] == framework::kEmptyVarName) {
continue; continue;
} }
auto dims = ctx->GetInputsElementDim(kParameters, i);
if (var_types[i] == framework::VarDesc::LOD_TENSOR) { if (var_types[i] == framework::VarDesc::LOD_TENSOR) {
names_to_set.push_back(pg_names[i]); names_to_set.push_back(pg_names[i]);
dims_to_set.push_back(dims[i]); dims_to_set.push_back(dims);
} else if (var_types[i] == framework::VarDesc::LOD_TENSOR_ARRAY) { } else if (var_types[i] == framework::VarDesc::LOD_TENSOR_ARRAY) {
// not sure how to set the dim of LOD_TENSOR_ARRAY // not sure how to set the dim of LOD_TENSOR_ARRAY
names_to_set.push_back(pg_names[i]); names_to_set.push_back(pg_names[i]);
dims_to_set.push_back(dims[i]); dims_to_set.push_back(dims);
} }
} }
ctx->SetDims(names_to_set, dims_to_set); ctx->SetDims(names_to_set, dims_to_set);
......
...@@ -185,6 +185,7 @@ def data(name, ...@@ -185,6 +185,7 @@ def data(name,
shape, shape,
append_batch_size=True, append_batch_size=True,
dtype='float32', dtype='float32',
lod_level=0,
type=core.VarDesc.VarType.LOD_TENSOR, type=core.VarDesc.VarType.LOD_TENSOR,
main_program=None, main_program=None,
startup_program=None, startup_program=None,
...@@ -198,6 +199,7 @@ def data(name, ...@@ -198,6 +199,7 @@ def data(name,
append_batch_size: Whether or not to append the data as a batch. append_batch_size: Whether or not to append the data as a batch.
dtype: The type of data : float32, float_16, int etc dtype: The type of data : float32, float_16, int etc
type: The output type. By default it is LOD_TENSOR. type: The output type. By default it is LOD_TENSOR.
lod_level(int): The LoD Level. 0 means the input data is not a sequence.
main_program: Name of the main program that calls this main_program: Name of the main program that calls this
startup_program: Name of the startup program startup_program: Name of the startup program
stop_gradient: A boolean that mentions whether gradient should flow. stop_gradient: A boolean that mentions whether gradient should flow.
...@@ -228,7 +230,8 @@ def data(name, ...@@ -228,7 +230,8 @@ def data(name,
shape=shape, shape=shape,
dtype=dtype, dtype=dtype,
type=type, type=type,
stop_gradient=stop_gradient) stop_gradient=stop_gradient,
lod_level=lod_level)
def create_tensor(dtype, name=None, main_program=None, startup_program=None): def create_tensor(dtype, name=None, main_program=None, startup_program=None):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册