diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index a103be7191d02a96ee97d76f786f9364938c1c65..6666dd8e60a509c3f82f66f832ecce03811e9fb6 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -544,11 +544,13 @@ class RuntimeInferShapeContext : public InferShapeContext { void ShareLoD(const std::string& in, const std::string& out, size_t i = 0, size_t j = 0) const override { - PADDLE_ENFORCE_LT(i, Inputs(in).size()); - PADDLE_ENFORCE_LT(j, Outputs(out).size()); - Variable* in_var = scope_.FindVar(Inputs(in)[i]); - Variable* out_var = scope_.FindVar(Outputs(out)[j]); + const std::vector& inputs = Inputs(in); + const std::vector& outputs = Outputs(out); + PADDLE_ENFORCE_LT(i, inputs.size()); + PADDLE_ENFORCE_LT(j, outputs.size()); + Variable* in_var = scope_.FindVar(inputs.at(i)); if (!in_var->IsType()) return; + Variable* out_var = scope_.FindVar(outputs.at(j)); PADDLE_ENFORCE(out_var->IsType(), "The %d-th output of Output(%s) must be LoDTensor.", j, out); auto in_tensor = in_var->Get(); @@ -576,20 +578,6 @@ class RuntimeInferShapeContext : public InferShapeContext { out_tensor->set_layout(in_tensor.layout()); } - void ShareLayout(const std::string& in, const std::string& out, size_t i = 0, - size_t j = 0) const { - PADDLE_ENFORCE_LT(i, Inputs(in).size()); - PADDLE_ENFORCE_LT(j, Outputs(out).size()); - Variable* in_var = scope_.FindVar(Inputs(in)[i]); - Variable* out_var = scope_.FindVar(Outputs(out)[j]); - if (!in_var->IsType()) return; - PADDLE_ENFORCE(out_var->IsType(), - "The %d-th output of Output(%s) must be LoDTensor.", j, out); - auto in_tensor = in_var->Get(); - auto* out_tensor = out_var->GetMutable(); - out_tensor->set_layout(in_tensor.layout()); - } - bool IsRuntime() const override { return true; } protected: diff --git a/paddle/fluid/framework/shape_inference.cc b/paddle/fluid/framework/shape_inference.cc index 89eb00ff65598eff5f4ba541df107e8da04e1a89..ddff2c7c261746ac9986e79cff3da7e0a9654adc 100644 --- a/paddle/fluid/framework/shape_inference.cc +++ b/paddle/fluid/framework/shape_inference.cc @@ -46,16 +46,6 @@ std::vector InferShapeContext::GetReaderDims( return this->GetRepeatedDims(arg_names[0]); } -void InferShapeContext::ShareLoDs(const std::string &in, - const std::string &out) const { - PADDLE_ENFORCE_EQ(Inputs(in).size(), Outputs(out).size(), - "The number of arguments in %s and %s is not equal.", in, - out); - for (size_t i = 0; i < in.size(); ++i) { - ShareLoD(in, out, i, i); - } -} - DDim InferShapeContext::GetInputsElementDim(const std::string &name, int idx) const { const std::vector &names = Inputs(name); diff --git a/paddle/fluid/framework/shape_inference.h b/paddle/fluid/framework/shape_inference.h index fd220d961af85dd55fe2031409180823d8f178fc..5f497cafa0f75f7c23d550ef767d55274de7c900 100644 --- a/paddle/fluid/framework/shape_inference.h +++ b/paddle/fluid/framework/shape_inference.h @@ -56,8 +56,6 @@ class InferShapeContext { virtual const std::vector &Outputs( const std::string &name) const = 0; - void ShareLoDs(const std::string &in, const std::string &out) const; - virtual void ShareLoD(const std::string &in, const std::string &out, size_t i = 0, size_t j = 0) const = 0; diff --git a/python/paddle/reader/decorator.py b/python/paddle/reader/decorator.py index 5b9459b670ac8583ee0e65a3c1b51f6248bb6303..b2ef9f75809004d9df0003217c2dafcd69e83890 100644 --- a/python/paddle/reader/decorator.py +++ b/python/paddle/reader/decorator.py @@ -15,7 +15,7 @@ __all__ = [ 'map_readers', 'buffered', 'compose', 'chain', 'shuffle', 'ComposeNotAligned', 'firstn', 'xmap_readers', 'PipeReader', - 'multiprocess_reader' + 'multiprocess_reader', 'Fake' ] from threading import Thread @@ -504,3 +504,39 @@ class PipeReader: yield decomp_buff else: break + + +class Fake(object): + """ + fake reader will cache the first data it read and yield it out for data_num times. + It is used to cache a data from real reader and use it for speed testing. + + :param reader: the origin reader + :param data_num: times that this reader will yield data. + + :return: a fake reader. + + Examples: + .. code-block:: python + + def reader(): + for i in range(10): + yield i + + fake_reader = Fake()(reader, 100) + """ + + def __init__(self): + self.data = None + self.yield_num = 0 + + def __call__(self, reader, data_num): + def fake_reader(): + if self.data is None: + self.data = next(reader()) + while self.yield_num < data_num: + yield self.data + self.yield_num += 1 + self.yield_num = 0 + + return fake_reader diff --git a/python/paddle/reader/tests/decorator_test.py b/python/paddle/reader/tests/decorator_test.py index c324092f8850e4bd64955aa9c987746b5cec54b5..b9af8348e16c051db64d57a9594aee303d83aef2 100644 --- a/python/paddle/reader/tests/decorator_test.py +++ b/python/paddle/reader/tests/decorator_test.py @@ -203,5 +203,21 @@ class TestMultiProcessReader(unittest.TestCase): self.reader_test(use_pipe=True) +class TestFakeReader(unittest.TestCase): + def test_fake_reader(self): + def reader(): + for i in range(10): + yield i + + data_num = 100 + fake_reader = paddle.reader.Fake()(reader, data_num) + for _ in range(10): + i = 0 + for data in fake_reader(): + self.assertEqual(data, 0) + i += 1 + self.assertEqual(i, data_num) + + if __name__ == '__main__': unittest.main()