提交 20725f2d 编写于 作者: Q qijun

add executor feed operator test

上级 623848af
......@@ -48,8 +48,7 @@ Executor::~Executor() {
}
}
void Executor::Run(const ProgramDesc& pdesc, Scope* scope,
std::vector<Tensor>* outputs) {
void Executor::Run(const ProgramDesc& pdesc, Scope* scope) {
// TODO(tonyyang-svail):
// - only runs the first block
// - only runs on the first device
......@@ -76,14 +75,15 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope,
device_context->Wait();
}
// // print tensor value
// for (auto& var : block.vars()) {
// std::cout << var.name() << std::endl;
// auto v = scope->FindVar(var.name());
// const LoDTensor& t = v->Get<LoDTensor>();
// for (int i = 0; i < t.numel(); ++i)
// std::cout << t.data<float>()[i] << " ";
// std::cout << std::endl;
// }
for (auto& var : block.vars()) {
std::cout << var.name() << std::endl;
auto v = scope->FindVar(var.name());
const LoDTensor& t = v->Get<LoDTensor>();
for (int i = 0; i < t.numel(); ++i) {
std::cout << t.data<float>()[i] << " ";
}
std::cout << std::endl;
}
}
} // namespace framework
......
......@@ -26,7 +26,7 @@ class Executor {
public:
explicit Executor(const std::vector<platform::Place>& places);
~Executor();
void Run(const ProgramDesc&, Scope*, std::vector<Tensor>*);
void Run(const ProgramDesc&, Scope*);
private:
std::vector<platform::DeviceContext*> device_contexts_;
......
......@@ -13,17 +13,18 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/executor.h"
#include <memory> // for unique_ptr
#include <mutex> // for call_once
#include <vector>
#include "gtest/gtest.h"
#include "paddle/framework/attribute.h"
#include "paddle/framework/grad_op_builder.h"
#include "paddle/framework/op_registry.h"
#include "paddle/framework/operator.h"
#include <vector>
USE_OP(elementwise_add);
USE_OP(gaussian_random);
USE_OP(feed);
using std::string;
using namespace paddle::platform;
......@@ -58,7 +59,67 @@ void add_gaussian_random_op(string var_name, proto_block* block) {
Out->add_arguments(var_name);
}
class ExecutorTester : public ::testing::Test {
void add_feed_op(string var_name, int index, proto_block* block) {
std::vector<int> dim{3};
// insert variable
auto a = block->add_vars();
a->set_name(var_name);
auto a_lt = a->mutable_lod_tensor();
a_lt->set_data_type(paddle::framework::DataType::FP32);
for (int i : dim) {
a_lt->add_dims(i);
}
// insert operation
auto op = block->add_ops();
op->set_type("feed");
// set dims attr
auto dims = op->add_attrs();
dims->set_name("dims");
dims->set_type(paddle::framework::AttrType::INTS);
for (int i : dim) {
dims->add_ints(i);
}
// set col attr
auto col = op->add_attrs();
col->set_name("col");
col->set_type(paddle::framework::AttrType::INT);
col->set_i(index);
auto Out = op->add_outputs();
Out->set_parameter("Out");
Out->add_arguments(var_name);
}
std::once_flag set_variable_flag;
template <typename T>
void set_feed_variable(const std::vector<std::vector<T>>& inputs) {
typedef std::vector<paddle::framework::Tensor> FeedInputs;
Variable* g_feed_value = GetScope()->FindVar("feed_value");
FeedInputs& feed_inputs = *(g_feed_value->GetMutable<FeedInputs>());
auto size = inputs.size();
std::call_once(set_variable_flag, [&]() {
feed_inputs.reserve(size);
for (size_t i = 0; i < size; i++) {
paddle::framework::Tensor tmp;
tmp.mutable_data<T>(make_ddim({static_cast<int64_t>(inputs[i].size())}),
CPUPlace());
feed_inputs.push_back(tmp);
}
});
for (size_t i = 0; i < size; i++) {
memcpy(feed_inputs[i].data<T>(), inputs[i].data(),
inputs[i].size() * sizeof(T));
}
}
class ExecutorTesterRandom : public ::testing::Test {
public:
virtual void SetUp() override {
auto root_block = pdesc_.add_blocks();
......@@ -84,33 +145,103 @@ class ExecutorTester : public ::testing::Test {
auto Out = op->add_outputs();
Out->set_parameter("Out");
Out->add_arguments("c");
scope_ = GetScope();
}
protected:
std::vector<Tensor>* outputs_{nullptr};
ProgramDesc pdesc_;
Scope scope_;
Scope* scope_;
};
TEST_F(ExecutorTester, InitCPU) {
class ExecutorTesterFeed : public ::testing::Test {
public:
virtual void SetUp() override {
auto root_block = pdesc_.add_blocks();
root_block->set_idx(0);
root_block->set_parent_idx(-1);
add_feed_op("a", 0, root_block);
add_feed_op("b", 1, root_block);
auto c = root_block->add_vars();
c->set_name("c");
auto c_lt = c->mutable_lod_tensor();
c_lt->set_data_type(paddle::framework::DataType::FP32);
auto op = root_block->add_ops();
op->set_type("elementwise_add");
auto X = op->add_inputs();
X->set_parameter("X");
X->add_arguments("a");
auto Y = op->add_inputs();
Y->set_parameter("Y");
Y->add_arguments("b");
auto Out = op->add_outputs();
Out->set_parameter("Out");
Out->add_arguments("c");
std::vector<float> vec1 = {1.0, 2.0, 3.0};
std::vector<float> vec2 = {4.0, 5.0, 6.0};
inputs_.push_back(vec1);
inputs_.push_back(vec2);
}
protected:
ProgramDesc pdesc_;
std::vector<std::vector<float>> inputs_;
};
TEST_F(ExecutorTesterRandom, CPU) {
std::vector<Place> places;
CPUPlace cpu_place1, cpu_place2;
places.push_back(cpu_place1);
places.push_back(cpu_place2);
Executor* executor = new Executor(places);
executor->Run(pdesc_, &scope_, outputs_);
executor->Run(pdesc_, scope_);
delete executor;
}
TEST_F(ExecutorTesterFeed, CPU) {
std::vector<Place> places;
CPUPlace cpu_place;
places.push_back(cpu_place);
Executor* executor = new Executor(places);
// 3 mini-batch
for (int i = 0; i < 3; i++) {
// need to set feed variable before Executor::Run
set_feed_variable<float>(inputs_);
executor->Run(pdesc_, GetScope());
}
delete executor;
}
#ifdef PADDLE_WITH_GPU
TEST_F(ExecutorTester, InitGPU) {
TEST_F(ExecutorTesterRandom, GPU) {
std::vector<Place> places;
GPUPlace gpu_place(0);
places.push_back(gpu_place);
Executor* executor = new Executor(places);
executor->Run(pdesc_, scope_);
delete executor;
}
TEST_F(ExecutorTesterFeed, GPU) {
std::vector<Place> places;
GPUPlace gpu_place0(0);
places.push_back(gpu_place0);
GPUPlace gpu_place(0);
places.push_back(gpu_place);
Executor* executor = new Executor(places);
executor->Run(pdesc_, &scope_, outputs_);
// need to set feed variable before Executor::Run
set_feed_variable<float>(inputs_);
executor->Run(pdesc_, scope_);
delete executor;
}
#endif
......@@ -28,19 +28,30 @@ class FeedOp : public framework::OperatorWithKernel {
int col = ctx->Attrs().Get<int>("col");
framework::Variable* g_feed_variable =
framework::GetScope()->FindVar("feed_value");
FeedInputs tensors = g_feed_variable->Get<FeedInputs>();
auto in_dim = tensors[col].dims();
ctx->SetOutputDim("Y", in_dim);
ctx->SetOutputDim("Out", in_dim);
// need to handle LodTensor later
}
framework::DataType IndicateDataType(
const framework::ExecutionContext& ctx) const override {
return static_cast<framework::DataType>(Attr<int>("data_type"));
}
};
class FeedOpMaker : public framework::OpProtoAndCheckerMaker {
public:
FeedOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddAttr<int>("col", "The col in Global Feed Variable");
AddAttr<int>("data_type", "output data type")
.SetDefault(framework::DataType::FP32);
AddAttr<int>("col", "The col in global feed variable").SetDefault(0);
AddAttr<std::vector<int>>("dims", "The dimension of random tensor.");
AddOutput("Out", "The output of dropout op.");
AddComment(R"DOC(Feed data to global feed variable)DOC");
}
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册