提交 d185e8c1 编写于 作者: Z zhupengyang 提交者: GitHub

[XPU] add dropout bridge and unit test (#2650)

test=develop
上级 73a4216d
......@@ -17,6 +17,7 @@ lite_cc_library(subgraph_bridge_batch_norm_op_xpu SRCS batch_norm_op.cc DEPS ${x
lite_cc_library(subgraph_bridge_transpose_op_xpu SRCS transpose_op.cc DEPS ${xpu_subgraph_bridge_deps})
lite_cc_library(subgraph_bridge_reshape_op_xpu SRCS reshape_op.cc DEPS ${xpu_subgraph_bridge_deps})
lite_cc_library(subgraph_bridge_layer_norm_op_xpu SRCS layer_norm_op.cc DEPS ${xpu_subgraph_bridge_deps})
lite_cc_library(subgraph_bridge_dropout_op_xpu SRCS dropout_op.cc DEPS ${xpu_subgraph_bridge_deps})
set(xpu_subgraph_bridges
subgraph_bridge_registry
......@@ -32,6 +33,7 @@ set(xpu_subgraph_bridges
subgraph_bridge_transpose_op_xpu
subgraph_bridge_reshape_op_xpu
subgraph_bridge_layer_norm_op_xpu
subgraph_bridge_dropout_op_xpu
CACHE INTERNAL "xpu_subgraph_bridges")
message(STATUS "+++++ xpu_subgraph_bridges: ${xpu_subgraph_bridges}")
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/xpu/bridges/graph.h"
#include "lite/kernels/xpu/bridges/utility.h"
namespace paddle {
namespace lite {
namespace subgraph {
namespace xpu {
int DropoutConverter(void* ctx, OpLite* op) {
CHECK(ctx != nullptr);
CHECK(op != nullptr);
auto graph = static_cast<Graph*>(ctx);
auto op_info = op->op_info();
auto op_type = op_info->Type();
VLOG(3) << "[XPU] Converting " + op_type + "...";
// Create node and set params from op
auto x_var_name = op_info->Input("X").front();
auto out_var_name = op_info->Output("Out").front();
auto dropout_prob = op_info->GetAttr<float>("dropout_prob");
auto dropout_implementation =
op_info->GetAttr<std::string>("dropout_implementation");
double rate;
if (dropout_implementation == "downgrade_in_infer") {
rate = 1. - dropout_prob;
} else if (dropout_implementation == "upscale_in_train") {
rate = 1.;
} else {
LOG(FATAL) << "unsupported dropout_implementation == "
<< dropout_implementation << " for dropout";
}
CHECK(graph->HasNode(x_var_name));
graph->AddNode(
out_var_name,
graph->builder_.CreateDropout(*graph->GetNode(x_var_name), rate));
return SUCCESS;
}
} // namespace xpu
} // namespace subgraph
} // namespace lite
} // namespace paddle
REGISTER_SUBGRAPH_BRIDGE(XPU,
dropout,
paddle::lite::subgraph::xpu::DropoutConverter);
......@@ -26,3 +26,4 @@ USE_SUBGRAPH_BRIDGE(XPU, transpose);
USE_SUBGRAPH_BRIDGE(XPU, transpose2);
USE_SUBGRAPH_BRIDGE(XPU, reshape);
USE_SUBGRAPH_BRIDGE(XPU, reshape2);
USE_SUBGRAPH_BRIDGE(XPU, dropout);
......@@ -28,6 +28,7 @@ if((NOT LITE_WITH_OPENCL AND NOT LITE_WITH_FPGA) AND (LITE_WITH_X86 OR LITE_WITH
lite_cc_test(test_kernel_transpose_compute SRCS transpose_compute_test.cc DEPS arena_framework ${xpu_kernels} ${x86_kernels} ${arm_kernels} ${lite_ops} ${host_kernels})
lite_cc_test(test_kernel_reshape_compute SRCS reshape_compute_test.cc DEPS arena_framework ${xpu_kernels} ${x86_kernels} ${arm_kernels} ${lite_ops} ${host_kernels})
lite_cc_test(test_kernel_layer_norm_compute SRCS layer_norm_compute_test.cc DEPS arena_framework ${xpu_kernels} ${x86_kernels} ${arm_kernels} ${lite_ops} ${host_kernels})
lite_cc_test(test_kernel_dropout_compute SRCS dropout_compute_test.cc DEPS arena_framework ${xpu_kernels} ${x86_kernels} ${arm_kernels} ${lite_ops} ${host_kernels})
lite_cc_test(test_kernel_softmax_compute SRCS softmax_compute_test.cc DEPS arena_framework ${xpu_kernels} ${x86_kernels} ${arm_kernels} ${lite_ops} ${host_kernels})
if(LITE_BUILD_EXTRA)
......
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include <cmath>
#include <string>
#include "lite/api/paddle_use_kernels.h"
#include "lite/api/paddle_use_ops.h"
#include "lite/core/arena/framework.h"
namespace paddle {
namespace lite {
class DropoutComputeTester : public arena::TestCase {
protected:
// common attributes for this op.
std::string type_ = "dropout";
std::string input_ = "x";
std::string output_ = "out";
std::string mask_ = "mask";
DDim dims_{{1}};
float dropout_prob_ = 0.5;
bool fix_seed_ = true;
int seed_ = 1;
std::string dropout_implementation_ = "downgrade_in_infer";
public:
DropoutComputeTester(const Place& place,
const std::string& alias,
DDim dims,
float dropout_prob,
bool fix_seed,
int seed,
std::string dropout_implementation)
: TestCase(place, alias),
dims_(dims),
dropout_prob_(dropout_prob),
fix_seed_(fix_seed),
seed_(seed),
dropout_implementation_(dropout_implementation) {}
void RunBaseline(Scope* scope) override {
auto* out = scope->NewTensor(output_);
CHECK(out);
out->Resize(dims_);
auto* output_data = out->mutable_data<float>();
auto* x = scope->FindTensor(input_);
const auto* x_data = x->data<float>();
if (dropout_implementation_ == "downgrade_in_infer") {
float rate = 1 - dropout_prob_;
for (int64_t i = 0; i < dims_.production(); i++) {
output_data[i] = x_data[i] * rate;
}
} else if (dropout_implementation_ == "upscale_in_train") {
memcpy(output_data, x_data, sizeof(float) * dims_.production());
} else {
LOG(FATAL) << "unsupported dropout_implementation: "
<< dropout_implementation_;
}
}
void PrepareOpDesc(cpp::OpDesc* op_desc) {
op_desc->SetType(type_);
op_desc->SetInput("X", {input_});
op_desc->SetOutput("Out", {output_});
op_desc->SetOutput("Mask", {mask_});
op_desc->SetAttr("dropout_prob", dropout_prob_);
op_desc->SetAttr("fix_seed", fix_seed_);
op_desc->SetAttr("seed", seed_);
op_desc->SetAttr("dropout_implementation", dropout_implementation_);
}
void PrepareData() override {
std::vector<float> input_data(dims_.production());
for (int i = 0; i < dims_.production(); i++) {
#if 0
float sign = i % 3 == 0 ? -1.0f : 1.0f;
input_data[i] = sign * static_cast<float>(i % 128) * 0.013f + 0.001;
#else
input_data[i] = 1;
#endif
}
SetCommonTensor(input_, dims_, input_data.data());
}
};
TEST(Dropout, precision) {
LOG(INFO) << "test dropout op";
float abs_error = 2e-5;
Place place;
#if defined(LITE_WITH_XPU)
place = TARGET(kXPU);
#else
return;
#endif
std::vector<std::vector<int64_t>> dims{
/*{3} ,*/ {3, 4} /*, {3, 4, 5}, {1, 2, 3, 4}, {2, 3, 4, 5}*/};
for (auto dim : dims) {
for (auto dropout_prob : {/*0.,*/ 0.5 /*, 1.*/}) {
for (auto dropout_implementation :
{"downgrade_in_infer", "upscale_in_train"}) {
std::unique_ptr<arena::TestCase> tester(
new DropoutComputeTester(place,
"def",
DDim(dim),
dropout_prob,
true,
1,
dropout_implementation));
arena::Arena arena(std::move(tester), place, abs_error);
arena.TestPrecision({"mask"});
}
}
}
}
} // namespace lite
} // namespace paddle
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册