未验证 提交 99a9615a 编写于 作者: A Aurelius84 提交者: GitHub

Removing length dims constraints of seq_pad and seq_unpad (#19497)

* Removing last dims constraints of seq_pad and seq_unpad test=develop

* fix test_layer api code test=develop

* fix sequence_pad_op.cc conflict test=develop

* remove test_analyzer_mm_dnn test=develop

* fix vectorize bug test=develop

* fix vectorize<int> test=develop
上级 cca26f5c
......@@ -146,8 +146,8 @@ paddle.fluid.layers.conv2d_transpose (ArgSpec(args=['input', 'num_filters', 'out
paddle.fluid.layers.conv3d_transpose (ArgSpec(args=['input', 'num_filters', 'output_size', 'filter_size', 'padding', 'stride', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, None, 0, 1, 1, None, None, None, True, None, None)), ('document', 'fb08f59141971b11f5f03bba06e9fc5a'))
paddle.fluid.layers.sequence_expand (ArgSpec(args=['x', 'y', 'ref_level', 'name'], varargs=None, keywords=None, defaults=(-1, None)), ('document', '10e122eb755c2bd1f78ef2332b28f1a0'))
paddle.fluid.layers.sequence_expand_as (ArgSpec(args=['x', 'y', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '858c432e7cbd8bb952cc2eb555457d50'))
paddle.fluid.layers.sequence_pad (ArgSpec(args=['x', 'pad_value', 'maxlen', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '1ba3ccfe13ed5091e113c09c13dc3a20'))
paddle.fluid.layers.sequence_unpad (ArgSpec(args=['x', 'length', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '7f5ce36fb0016621e6bc001f4236d978'))
paddle.fluid.layers.sequence_pad (ArgSpec(args=['x', 'pad_value', 'maxlen', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'df08b9c499ab3a90f95d08ab5b6c6c62'))
paddle.fluid.layers.sequence_unpad (ArgSpec(args=['x', 'length', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e478180d5bc010a84f35af958cafa62c'))
paddle.fluid.layers.lstm_unit (ArgSpec(args=['x_t', 'hidden_t_prev', 'cell_t_prev', 'forget_bias', 'param_attr', 'bias_attr', 'name'], varargs=None, keywords=None, defaults=(0.0, None, None, None)), ('document', 'fe126c58e4339410e875ab1eba246d21'))
paddle.fluid.layers.reduce_sum (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', 'dd5f06fb7cf39ca06cbab4abd03e6893'))
paddle.fluid.layers.reduce_mean (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', 'a3024789eba11a70c2ef27c358173400'))
......
......@@ -124,11 +124,6 @@ set(LAC_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/lac")
download_model_and_data(${LAC_INSTALL_DIR} "lac_model.tar.gz" "lac_data.txt.tar.gz")
inference_analysis_api_test(test_analyzer_lac ${LAC_INSTALL_DIR} analyzer_lac_tester.cc)
# MM DNN
set(MM_DNN_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/mm_dnn")
download_model_and_data(${MM_DNN_INSTALL_DIR} "MM_DNN_model.tar.gz" "MM_DNN_data.txt.tar.gz")
inference_analysis_api_test(test_analyzer_mm_dnn ${MM_DNN_INSTALL_DIR} analyzer_mm_dnn_tester.cc)
# Pyramid DNN
set(PYRAMID_DNN_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/pyramid_dnn")
download_model_and_data(${PYRAMID_DNN_INSTALL_DIR} "PyramidDNN_model.tar.gz" "PyramidDNN_data.txt.tar.gz")
......
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/inference/tests/api/tester_helper.h"
namespace paddle {
namespace inference {
struct DataRecord {
std::vector<std::vector<int64_t>> query, title;
std::vector<size_t> lod1, lod2;
size_t batch_iter{0}, batch_size{1}, num_samples; // total number of samples
DataRecord() = default;
explicit DataRecord(const std::string &path, int batch_size = 1)
: batch_size(batch_size) {
Load(path);
}
DataRecord NextBatch() {
DataRecord data;
size_t batch_end = batch_iter + batch_size;
// NOTE skip the final batch, if no enough data is provided.
if (batch_end <= query.size()) {
GetInputPerBatch(query, &data.query, &data.lod1, batch_iter, batch_end);
GetInputPerBatch(title, &data.title, &data.lod2, batch_iter, batch_end);
}
batch_iter += batch_size;
return data;
}
void Load(const std::string &path) {
std::ifstream file(path);
std::string line;
int num_lines = 0;
while (std::getline(file, line)) {
num_lines++;
std::vector<std::string> data;
split(line, '\t', &data);
// load query data
std::vector<int64_t> query_data;
split_to_int64(data[0], ' ', &query_data);
// load title data
std::vector<int64_t> title_data;
split_to_int64(data[1], ' ', &title_data);
query.push_back(std::move(query_data));
title.push_back(std::move(title_data));
}
num_samples = num_lines;
}
};
void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data,
int batch_size) {
PaddleTensor lod_query_tensor, lod_title_tensor;
lod_query_tensor.name = "left";
lod_title_tensor.name = "right";
auto one_batch = data->NextBatch();
// assign data
TensorAssignData<int64_t>(&lod_query_tensor, one_batch.query, one_batch.lod1);
TensorAssignData<int64_t>(&lod_title_tensor, one_batch.title, one_batch.lod2);
// Set inputs.
input_slots->assign({lod_query_tensor, lod_title_tensor});
for (auto &tensor : *input_slots) {
tensor.dtype = PaddleDType::INT64;
}
}
void SetConfig(AnalysisConfig *cfg) {
cfg->SetModel(FLAGS_infer_model);
cfg->DisableGpu();
cfg->SwitchSpecifyInputNames();
cfg->SwitchIrOptim();
}
void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
DataRecord data(FLAGS_infer_data, FLAGS_batch_size);
std::vector<PaddleTensor> input_slots;
int epoch = FLAGS_test_all_data ? data.num_samples / FLAGS_batch_size : 1;
LOG(INFO) << "number of samples: " << epoch * FLAGS_batch_size;
for (int bid = 0; bid < epoch; ++bid) {
PrepareInputs(&input_slots, &data, FLAGS_batch_size);
(*inputs).emplace_back(input_slots);
}
}
// Easy for profiling independently.
void profile(bool use_mkldnn = false) {
AnalysisConfig cfg;
SetConfig(&cfg);
std::vector<std::vector<PaddleTensor>> outputs;
if (use_mkldnn) {
cfg.EnableMKLDNN();
cfg.pass_builder()->AppendPass("fc_mkldnn_pass");
}
std::vector<std::vector<PaddleTensor>> input_slots_all;
SetInput(&input_slots_all);
TestPrediction(reinterpret_cast<const PaddlePredictor::Config *>(&cfg),
input_slots_all, &outputs, FLAGS_num_threads);
if (FLAGS_num_threads == 1 && !FLAGS_test_all_data) {
PADDLE_ENFORCE_GT(outputs.size(), 0);
PADDLE_ENFORCE_EQ(outputs.back().size(), 2UL);
for (auto &output : outputs.back()) {
size_t size = GetSize(output);
PADDLE_ENFORCE_GT(size, 0);
float *result = static_cast<float *>(output.data.data());
// output is probability, which is in (-1, 1).
for (size_t i = 0; i < size; i++) {
EXPECT_GT(result[i], -1);
EXPECT_LT(result[i], 1);
}
}
}
}
TEST(Analyzer_MM_DNN, profile) { profile(); }
#ifdef PADDLE_WITH_MKLDNN
TEST(Analyzer_MM_DNN, profile_mkldnn) { profile(true /* use_mkldnn */); }
#endif
// Check the fuse status
TEST(Analyzer_MM_DNN, fuse_statis) {
AnalysisConfig cfg;
SetConfig(&cfg);
int num_ops;
auto predictor = CreatePaddlePredictor<AnalysisConfig>(cfg);
auto fuse_statis = GetFuseStatis(
static_cast<AnalysisPredictor *>(predictor.get()), &num_ops);
}
// Compare result of NativeConfig and AnalysisConfig
void compare(bool use_mkldnn = false) {
AnalysisConfig cfg;
SetConfig(&cfg);
if (use_mkldnn) {
cfg.EnableMKLDNN();
cfg.pass_builder()->AppendPass("fc_mkldnn_pass");
}
std::vector<std::vector<PaddleTensor>> input_slots_all;
SetInput(&input_slots_all);
CompareNativeAndAnalysis(
reinterpret_cast<const PaddlePredictor::Config *>(&cfg), input_slots_all);
}
TEST(Analyzer_MM_DNN, compare) { compare(); }
#ifdef PADDLE_WITH_MKLDNN
TEST(Analyzer_MM_DNN, compare_mkldnn) { compare(true /* use_mkldnn */); }
#endif
// Compare Deterministic result
TEST(Analyzer_MM_DNN, compare_determine) {
AnalysisConfig cfg;
SetConfig(&cfg);
std::vector<std::vector<PaddleTensor>> input_slots_all;
SetInput(&input_slots_all);
CompareDeterministic(reinterpret_cast<const PaddlePredictor::Config *>(&cfg),
input_slots_all);
}
#ifdef PADDLE_WITH_MKLDNN
void TestMkldnnCacheClear(int mkldnn_input_shape_cache_capacity,
std::vector<std::vector<PaddleTensor>> *outputs) {
AnalysisConfig config;
SetConfig(&config);
config.EnableMKLDNN();
config.SetMkldnnCacheCapacity(mkldnn_input_shape_cache_capacity);
std::vector<PaddleTensor> input;
auto predictor = CreatePaddlePredictor<AnalysisConfig>(config);
int sample_num = 10;
DataRecord data(FLAGS_infer_data, FLAGS_batch_size);
outputs->resize(sample_num);
for (int i = 0; i < sample_num; i++) {
PrepareInputs(&input, &data, FLAGS_batch_size);
predictor->Run(input, &(*outputs)[i], 1);
}
}
TEST(Analyzer_MM_DNN, mkldnn_cache_clear) {
std::vector<std::vector<PaddleTensor>> outputs, cache_outputs;
// 0 means do not use cache clear strategy.
TestMkldnnCacheClear(0, &outputs);
// 4 means use cache clear strategy, and the
// mkldnn_input_shape_cache_capacity is 4.
TestMkldnnCacheClear(4, &cache_outputs);
// compare the result.
for (size_t i = 0; i < outputs.size(); i++) {
CompareResult(outputs[i], cache_outputs[i]);
}
}
void TestMkldnnShapeBlobSize(int mkldnn_input_shape_cache_capacity) {
AnalysisConfig config;
SetConfig(&config);
config.EnableMKLDNN();
config.SwitchUseFeedFetchOps(false);
// Since AnalysisPredictor::Run() will reset cur_mkldnn_session_id to default
// before its finished, we use AnalysisPredictor::ZeroCopyRun() here to check
// the mkldnn_shape_blob_size.
if (mkldnn_input_shape_cache_capacity > 0) {
platform::set_cur_mkldnn_session_id(
platform::kMKLDNNSessionID_CacheClearing);
platform::set_cur_input_shape_cache_capacity(
mkldnn_input_shape_cache_capacity);
}
std::vector<PaddleTensor> input;
auto predictor = CreatePaddlePredictor<AnalysisConfig>(config);
int sample_num = 10;
DataRecord data(FLAGS_infer_data, FLAGS_batch_size);
auto &pool = platform::DeviceContextPool::Instance();
auto *dev_ctx = dynamic_cast<platform::MKLDNNDeviceContext *>(
pool.Get(platform::CPUPlace()));
// clear before test
dev_ctx->ResetBlobMap();
for (int i = 0; i < sample_num; i++) {
PrepareInputs(&input, &data, FLAGS_batch_size);
ConvertPaddleTensorToZeroCopyTensor(predictor.get(), input);
if (mkldnn_input_shape_cache_capacity > 0) {
std::stringstream ss;
for (size_t i = 0; i < input.size(); i++) {
for (size_t j = 0; j < input[i].shape.size(); ++j) {
ss << input[i].shape[j] << "-";
}
}
platform::set_cur_input_shape_str(ss.str());
}
predictor->ZeroCopyRun();
}
if (mkldnn_input_shape_cache_capacity > 0) {
PADDLE_ENFORCE_EQ(dev_ctx->GetShapeBlobSize(),
mkldnn_input_shape_cache_capacity);
} else {
PADDLE_ENFORCE_EQ(dev_ctx->GetShapeBlobSize(), 1UL);
}
}
TEST(Analyzer_MM_DNN, mkldnn_shape_blob_size) {
// 0 means do not use cache clear strategy.
TestMkldnnShapeBlobSize(0);
// 4 means use cache clear strategy, and the
// mkldnn_input_shape_cache_capacity is 4.
TestMkldnnShapeBlobSize(4);
}
#endif
} // namespace inference
} // namespace paddle
......@@ -25,24 +25,25 @@ class SequencePadOp : public framework::OperatorWithKernel {
protected:
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of SequencePadOp should not be null.");
PADDLE_ENFORCE(ctx->HasInput("PadValue"),
"Input(PadValue) of SequencePadOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of SequencePadOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Length"),
"Output(Length) of SequencePadOp should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
"Input(X) of SequencePadOp should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasInput("PadValue"), true,
"Input(PadValue) of SequencePadOp should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
"Output(Out) of SequencePadOp should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasOutput("Length"), true,
"Output(Length) of SequencePadOp should not be null.");
auto x_dims = ctx->GetInputDim("X");
PADDLE_ENFORCE_GE(x_dims.size(), 2,
"The rank of Input(X) can't be less than 2.");
auto time_step_dims = framework::slice_ddim(x_dims, 1, x_dims.size());
auto pad_value_dims = ctx->GetInputDim("PadValue");
PADDLE_ENFORCE(pad_value_dims == framework::make_ddim({1}) ||
pad_value_dims == time_step_dims,
"The Input(PadValue) must be a scalar or a tensor whose "
"shape equals to time steps in sequences");
PADDLE_ENFORCE_EQ(pad_value_dims == framework::make_ddim({1}) ||
pad_value_dims == time_step_dims,
true,
"The Input(PadValue) must be a scalar or a tensor whose "
"shape equals to time steps in sequences");
int out_dim_0 = -1;
......@@ -52,7 +53,8 @@ class SequencePadOp : public framework::OperatorWithKernel {
framework::Variable* x_var =
boost::get<framework::Variable*>(ctx->GetInputVarPtrs("X")[0]);
const auto& x_lod = x_var->Get<LoDTensor>().lod();
PADDLE_ENFORCE(!x_lod.empty(), "The Input(X) must hold lod info.");
PADDLE_ENFORCE_EQ(x_lod.empty(), false,
"The Input(X) must hold lod info.");
const auto& x_lod_0 = x_lod[0];
PADDLE_ENFORCE_GE(x_lod_0.size(), 2,
"The Input(X)'s lod info is corrupted.");
......@@ -80,7 +82,7 @@ class SequencePadOp : public framework::OperatorWithKernel {
}
std::vector<int> out_dims_vec{out_dim_0, padded_length};
std::vector<int> len_dims_vec{out_dim_0, 1};
std::vector<int> len_dims_vec{out_dim_0};
auto time_step_dims_vec = framework::vectorize<int>(time_step_dims);
out_dims_vec.insert(out_dims_vec.end(), time_step_dims_vec.begin(),
time_step_dims_vec.end());
......@@ -143,7 +145,7 @@ class SequencePadOpMaker : public framework::OpProtoAndCheckerMaker {
then we get LoDTensor:
Out.data = [[a, b, 0, 0],
[c, d, e, 0]]
Length.data = [[2], [3]]
Length.data = [2, 3]
Case 2:
......@@ -157,7 +159,7 @@ class SequencePadOpMaker : public framework::OpProtoAndCheckerMaker {
then we get LoDTensor:
Out.data = [[[a1, a2], [b1, b2], [0, 0]],
[[c1, c2], [d1, d2], [e1, e2]]]
Length.data = [[2], [3]]
Length.data = [2, 3]
Case 3:
......@@ -171,7 +173,7 @@ class SequencePadOpMaker : public framework::OpProtoAndCheckerMaker {
then we get LoDTensor:
Out.data = [[[a1, a2], [b1, b2], [p1, p2]],
[[c1, c2], [d1, d2], [e1, e2]]]
Length.data = [[2], [3]]
Length.data = [2, 3]
)DOC");
}
......@@ -182,10 +184,11 @@ class SequencePadGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of SequencePadGradOp should not be null.");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"Input(Out@GRAD) of SequencePadGradOp should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
"Input(X) of SequencePadGradOp should not be null.");
PADDLE_ENFORCE_EQ(
ctx->HasInput(framework::GradVarName("Out")), true,
"Input(Out@GRAD) of SequencePadGradOp should not be null.");
if (ctx->HasOutput(framework::GradVarName("X"))) {
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
......
......@@ -25,22 +25,22 @@ class SequenceUnpadOp : public framework::OperatorWithKernel {
protected:
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of SequenceUnpadOp should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Length"),
"Input(Length) of SequenceUnpadOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of SequenceUnpadOp should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
"Input(X) of SequenceUnpadOp should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasInput("Length"), true,
"Input(Length) of SequenceUnpadOp should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
"Output(Out) of SequenceUnpadOp should not be null.");
auto x_dims = ctx->GetInputDim("X");
PADDLE_ENFORCE_GE(x_dims.size(), 2,
"The rank of Input(X) can't be less than 2.");
auto len_dims = ctx->GetInputDim("Length");
PADDLE_ENFORCE(len_dims.size() == 2 && len_dims[1] == 1,
"The shape of Input(Length) should be [batch_size, 1].");
PADDLE_ENFORCE(
len_dims[0] == x_dims[0],
PADDLE_ENFORCE_EQ(len_dims.size(), 1,
"The shape of Input(Length) should be [batch_size].");
PADDLE_ENFORCE_EQ(
len_dims[0], x_dims[0],
"Input(X) and Input(Length) should have the same first dimension.");
int64_t out_dim_0 = -1;
......@@ -96,7 +96,7 @@ class SequenceUnpadOpMaker : public framework::OpProtoAndCheckerMaker {
in which there are 3 sequences padded to length 5, and the acutal length
specified by Input(Length):
Length.data = [[2], [3], [4]],
Length.data = [2, 3, 4],
after unpadding, Output(Out) will be:
......@@ -112,10 +112,10 @@ class SequenceUnpadGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of SequenceUnpadGradOp should not be null.");
PADDLE_ENFORCE(
ctx->HasInput(framework::GradVarName("Out")),
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
"Input(X) of SequenceUnpadGradOp should not be null.");
PADDLE_ENFORCE_EQ(
ctx->HasInput(framework::GradVarName("Out")), true,
"Input(Out@GRAD) of SequenceUnpadGradOp should not be null.");
if (ctx->HasOutput(framework::GradVarName("X"))) {
......
......@@ -4588,7 +4588,7 @@ def sequence_pad(x, pad_value, maxlen=None, name=None):
import paddle.fluid as fluid
import numpy
x = fluid.layers.data(name='y', shape=[10, 5],
x = fluid.layers.data(name='x', shape=[10, 5],
dtype='float32', lod_level=1)
pad_value = fluid.layers.assign(
input=numpy.array([0.0], dtype=numpy.float32))
......@@ -4637,7 +4637,7 @@ def sequence_unpad(x, length, name=None):
in which there are 3 sequences padded to length 5, and the acutal length
specified by input Variable **length**:
length.data = [[2], [3], [4]],
length.data = [2, 3, 4],
after unpadding, the output Variable will be:
......@@ -4659,9 +4659,15 @@ def sequence_unpad(x, length, name=None):
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[10, 5], dtype='float32')
len = fluid.layers.data(name='length', shape=[1], dtype='int64')
out = fluid.layers.sequence_unpad(x=x, length=len)
import numpy
# pad data
x = fluid.layers.data(name='x', shape=[10, 5], dtype='float32', lod_level=1)
pad_value = fluid.layers.assign(input=numpy.array([0.0], dtype=numpy.float32))
pad_data, len = fluid.layers.sequence_pad(x=x, pad_value=pad_value)
# upad data
unpad_data = fluid.layers.sequence_unpad(x=pad_data, length=len)
"""
assert not in_dygraph_mode(), (
......
......@@ -2176,7 +2176,7 @@ class TestBook(LayerTest):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
x = layers.data(name='x', shape=[10, 5], dtype='float32')
length = layers.data(name='length', shape=[1], dtype='int64')
length = layers.data(name='length', shape=[], dtype='int64')
return (layers.sequence_unpad(x=x, length=length))
def test_sequence_softmax(self):
......
......@@ -62,7 +62,7 @@ class TestSequencePadOp(OpTest):
start_idx = end_idx
out_data = np.array(padded_sequences)
length = np.array(self.x_len_lod[0]).reshape((-1, 1))
length = np.array(self.x_len_lod[0]).reshape((-1))
self.outputs = {'Out': out_data, 'Length': length}
def setUp(self):
......
......@@ -39,10 +39,7 @@ class TestSequenceUnpadOp(OpTest):
else:
out_shape = out_shape + self.x_shape[2:]
self.inputs = {
'X': x,
'Length': np.array(self.length).astype('int64').reshape(-1, 1)
}
self.inputs = {'X': x, 'Length': np.array(self.length).astype('int64')}
self.outputs = {'Out': (out.reshape(out_shape), out_lod)}
def setUp(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册