提交 9cb455fa 编写于 作者: F fengjiayi

update function

上级 0fb5e351
......@@ -98,15 +98,6 @@ class PaddingLoDTensorFunctor<platform::CPUDeviceContext, T> {
CopyValidData<T>(pad_tensor, &seq_tensor, seq_offsets, pad_seq_len,
step_width, norm_by_times, kSeqToPad, layout);
// Set pad_tensor's lod info if possible
if (layout == kBatchLengthWidth) {
framework::LoD pad_lod(seq_lod.begin() + lod_level, seq_lod.end());
for (size_t i = 0; i < pad_lod[0].size(); ++i) {
pad_lod[0][i] = i * pad_seq_len;
}
pad_tensor->set_lod(pad_lod);
}
}
};
......
......@@ -106,14 +106,6 @@ class PaddingLoDTensorFunctor<platform::CUDADeviceContext, T> {
pad_data, seq_data, pad_value_data, pad_value.numel() == 1,
seq_offsets.CUDAData(context.GetPlace()), seq_num, pad_seq_len,
step_width, norm_by_times, layout);
if (layout == kBatchLengthWidth) {
framework::LoD pad_lod(seq_lod.begin() + lod_level, seq_lod.end());
for (size_t i = 0; i < pad_lod[0].size(); ++i) {
pad_lod[0][i] = i * pad_seq_len;
}
pad_tensor->set_lod(pad_lod);
}
}
};
......
......@@ -40,7 +40,8 @@ class SequencePadOp : public framework::OperatorWithKernel {
"The Input(PadValue) must be a scalar or a tensor whose "
"shape equals to time steps in sequences");
int batch_dim_size = -1;
int out_dim_0 = -1;
int out_dim_1 = -1;
if (ctx->IsRuntime()) {
// run time
......@@ -64,7 +65,8 @@ class SequencePadOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_GE(padded_length, max_seq_len,
"The Attr(padded_length) must be -1 or an int greater "
"than the length of the longest original sequence.");
batch_dim_size = padded_length * seq_num;
out_dim_0 = seq_num;
out_dim_1 = padded_length;
} else {
// compile time
framework::VarDesc* x_desc =
......@@ -72,9 +74,11 @@ class SequencePadOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_GE(x_desc->GetLoDLevel(), 1);
}
auto out_dims = x_dims;
out_dims[0] = batch_dim_size;
ctx->SetOutputDim("Out", out_dims);
std::vector<int> out_dims_vec{out_dim_0, out_dim_1};
auto time_step_dims_vec = framework::vectorize2int(time_step_dims);
out_dims_vec.insert(out_dims_vec.end(), time_step_dims_vec.begin(),
time_step_dims_vec.end());
ctx->SetOutputDim("Out", framework::make_ddim(out_dims_vec));
}
};
......@@ -118,9 +122,9 @@ class SequencePadOpMaker : public framework::OpProtoAndCheckerMaker {
and Input(PadValue):
PadValue.data = [0]
and attribite 'padded_length' = 4,
then we get 1-level LoDTensor:
Out.lod = [[0, 4, 8]]
Out.data = [a, b, 0, 0, c, d, e, 0]
then we get LoDTensor:
Out.data = [[a, b, 0, 0],
[c, d, e, 0]]
Case 2:
......@@ -131,9 +135,9 @@ class SequencePadOpMaker : public framework::OpProtoAndCheckerMaker {
PadValue.data = [0]
and attribite 'padded_length' = -1, which mean using the length
of longest input sequence(3 in this case),
then we get 1-level LoDTensor:
Out.lod = [[0, 3, 6]]
Out.data = [[a1, a2], [b1, b2], [0, 0], [c1, c2], [d1, d2], [e1, e2]]
then we get LoDTensor:
Out.data = [[[a1, a2], [b1, b2], [0, 0]],
[[c1, c2], [d1, d2], [e1, e2]]]
Case 3:
......@@ -144,9 +148,9 @@ class SequencePadOpMaker : public framework::OpProtoAndCheckerMaker {
PadValue.data = [p1, p2]
and attribite 'padded_length' = -1, which mean using the length
of longest input sequence(3 in this case),
then we get 1-level LoDTensor:
Out.lod = [[0, 3, 6]]
Out.data = [[a1, a2], [b1, b2], [p1, p2], [c1, c2], [d1, d2], [e1, e2]]
then we get LoDTensor:
Out.data = [[[a1, a2], [b1, b2], [p1, p2]],
[[c1, c2], [d1, d2], [e1, e2]]]
)DOC");
}
......
......@@ -61,11 +61,8 @@ class TestSequencePadOp(OpTest):
padded_sequences.append(seq)
start_idx = end_idx
out_len_lod = self.x_len_lod[:]
out_len_lod_0 = [padded_length] * len(x_len_lod_0)
out_len_lod[0] = out_len_lod_0
out_data = np.concatenate(padded_sequences, axis=0)
self.outputs = {'Out': (out_data, out_len_lod)}
out_data = np.array(padded_sequences)
self.outputs = {'Out': out_data}
def setUp(self):
self.op_type = 'sequence_pad'
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册