提交 6d483034 编写于 作者: L liuruilong

correct typos

上级 4a493ead
......@@ -19,7 +19,7 @@ namespace paddle_mobile {
namespace framework {
std::shared_ptr<ProgramDesc> ProgramOptimize::FushionOptimize(
std::shared_ptr<ProgramDesc> ProgramOptimize::FusionOptimize(
std::shared_ptr<ProgramDesc> ori_des, bool add_split) {
// ProgramDesc *optimize_program = new ProgramDesc(*ori_des);
std::shared_ptr<ProgramDesc> optimize_program =
......
......@@ -27,7 +27,7 @@ namespace framework {
class ProgramOptimize {
public:
ProgramOptimize() {}
std::shared_ptr<ProgramDesc> FushionOptimize(
std::shared_ptr<ProgramDesc> FusionOptimize(
std::shared_ptr<ProgramDesc> ori_des, bool add_split = false);
private:
......
......@@ -147,7 +147,7 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadProgram(
if (optimize) {
framework::ProgramOptimize program_optimize;
program.optimizeProgram =
program_optimize.FushionOptimize(originProgramDesc, can_add_split);
program_optimize.FusionOptimize(originProgramDesc, can_add_split);
}
if (optimize) {
program.optimizeProgram->Description("optimize: ");
......
......@@ -21,7 +21,7 @@ namespace paddle_mobile {
namespace operators {
template <typename Dtype, typename T>
void FushionConvAddOp<Dtype, T>::InferShape() const {
void FusionConvAddOp<Dtype, T>::InferShape() const {
auto in_dims = this->param_.Input()->dims();
auto filter_dims = this->param_.Filter()->dims();
const std::vector<int> &strides = this->param_.Strides();
......@@ -44,14 +44,14 @@ void FushionConvAddOp<Dtype, T>::InferShape() const {
framework::DDim ddim = framework::make_ddim(output_shape);
this->param_.Output()->Resize(ddim);
}
template class FushionConvAddOp<CPU, float>;
template class FusionConvAddOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
namespace ops = paddle_mobile::operators;
#ifdef PADDLE_MOBILE_CPU
USE_OP_CPU(conv_add);
REGISTER_OPERATOR_CPU(conv_add, ops::FushionConvAddOp);
REGISTER_OPERATOR_CPU(conv_add, ops::FusionConvAddOp);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#endif
......
......@@ -47,20 +47,20 @@ class FusionConvAddMatcher : public framework::FusionOpMatcher {
};
template <typename DeviceType, typename T>
class FushionConvAddOp : public framework::OperatorWithKernel<
DeviceType, FushionConvAddParam,
class FusionConvAddOp : public framework::OperatorWithKernel<
DeviceType, FusionConvAddParam,
operators::ConvAddKernel<DeviceType, T>> {
public:
FushionConvAddOp(const string &type, const VariableNameMap &inputs,
FusionConvAddOp(const string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType, FushionConvAddParam,
: framework::OperatorWithKernel<DeviceType, FusionConvAddParam,
operators::ConvAddKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, FushionConvAddParam,
DeviceType, FusionConvAddParam,
operators::ConvAddKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
......
......@@ -24,9 +24,9 @@ limitations under the License. */
namespace paddle_mobile {
namespace operators {
class FushionConvAddReluOpMatcher : public framework::FusionOpMatcher {
class FusionConvAddReluOpMatcher : public framework::FusionOpMatcher {
public:
FushionConvAddReluOpMatcher() {
FusionConvAddReluOpMatcher() {
node_ = framework::Node(G_OP_TYPE_CONV);
node_ > std::make_shared<framework::Node>(G_OP_TYPE_ELEMENTWISE_ADD) >
std::make_shared<framework::Node>(G_OP_TYPE_RELU);
......@@ -43,7 +43,7 @@ class FushionConvAddReluOpMatcher : public framework::FusionOpMatcher {
template <typename DeviceType, typename T>
class FusionConvAddReluOp : public framework::OperatorWithKernel<
DeviceType, FushionConvAddReluParam,
DeviceType, FusionConvAddReluParam,
operators::ConvAddReluKernel<DeviceType, T>> {
public:
FusionConvAddReluOp(const string &type, const VariableNameMap &inputs,
......@@ -51,12 +51,12 @@ class FusionConvAddReluOp : public framework::OperatorWithKernel<
const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<
DeviceType, FushionConvAddReluParam,
DeviceType, FusionConvAddReluParam,
operators::ConvAddReluKernel<DeviceType, T>>(type, inputs, outputs,
attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, FushionConvAddReluParam,
DeviceType, FusionConvAddReluParam,
operators::ConvAddReluKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
......@@ -65,7 +65,7 @@ class FusionConvAddReluOp : public framework::OperatorWithKernel<
#ifdef PADDLE_MOBILE_CPU
// static framework::FusionOpRegistrar fusion_conv_add_relu_registrar(new
// FushionConvAddReluOpMatcher());
// FusionConvAddReluOpMatcher());
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#endif
......
......@@ -19,7 +19,7 @@ namespace paddle_mobile {
namespace operators {
template <typename Dtype, typename T>
void FushionFcOp<Dtype, T>::InferShape() const {
void FusionFcOp<Dtype, T>::InferShape() const {
auto x_dims = this->param_.InputX()->dims();
auto y_dims = this->param_.InputY()->dims();
int x_num_col_dims = this->param_.XNumColDims();
......@@ -49,14 +49,14 @@ void FushionFcOp<Dtype, T>::InferShape() const {
framework::DDim ddim = framework::make_ddim(output_dims);
this->param_.Out()->Resize(ddim);
}
template class FushionFcOp<CPU, float>;
template class FusionFcOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
namespace ops = paddle_mobile::operators;
#ifdef PADDLE_MOBILE_CPU
USE_OP_CPU(fc);
REGISTER_OPERATOR_CPU(fc, ops::FushionFcOp);
REGISTER_OPERATOR_CPU(fc, ops::FusionFcOp);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#endif
......
......@@ -21,7 +21,7 @@ limitations under the License. */
#include "framework/operator.h"
#include "framework/program/program-optimize/fusion_op_register.h"
#include "operators/kernel/fushion_fc_kernel.h"
#include "operators/kernel/fusion_fc_kernel.h"
namespace paddle_mobile {
namespace operators {
......@@ -45,22 +45,22 @@ class FusionFcMatcher : public framework::FusionOpMatcher {
};
template <typename DeviceType, typename T>
class FushionFcOp : public framework::OperatorWithKernel<
DeviceType, FushionFcParam,
operators::FushionFcKernel<DeviceType, T>> {
class FusionFcOp : public framework::OperatorWithKernel<
DeviceType, FusionFcParam,
operators::FusionFcKernel<DeviceType, T>> {
public:
FushionFcOp(const string &type, const VariableNameMap &inputs,
FusionFcOp(const string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<
DeviceType, FushionFcParam,
operators::FushionFcKernel<DeviceType, T>>(type, inputs, outputs,
DeviceType, FusionFcParam,
operators::FusionFcKernel<DeviceType, T>>(type, inputs, outputs,
attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, FushionFcParam,
operators::FushionFcKernel<DeviceType, T>>::OperatorWithKernel;
DeviceType, FusionFcParam,
operators::FusionFcKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
protected:
......
......@@ -20,7 +20,7 @@ namespace operators {
template <>
void ConvAddKernel<CPU, float>::Compute(
const FushionConvAddParam &param) const {
const FusionConvAddParam &param) const {
const Tensor *input = param.Input();
Tensor filter = *param.Filter();
Tensor bias = *param.Bias();
......
......@@ -21,7 +21,7 @@ namespace operators {
template <>
void ConvAddReluKernel<CPU, float>::Compute(
const FushionConvAddReluParam &param) const {
const FusionConvAddReluParam &param) const {
const Tensor *input = param.Input();
Tensor filter = *param.Filter();
Tensor bias = *param.Bias();
......
......@@ -16,13 +16,13 @@ limitations under the License. */
#pragma once
#include "operators/kernel/fushion_fc_kernel.h"
#include "operators/kernel/fusion_fc_kernel.h"
namespace paddle_mobile {
namespace operators {
template <>
void FushionFcKernel<CPU, float>::Compute(const FushionFcParam &param) const {
void FusionFcKernel<CPU, float>::Compute(const FusionFcParam &param) const {
const Tensor *input_x = param.InputX();
const Tensor *input_y = param.InputY();
const Tensor *input_z = param.InputZ();
......
......@@ -35,9 +35,9 @@ using framework::DDim;
using framework::OpKernelBase;
template <typename DeviceType, typename T>
class ConvAddKernel : public OpKernelBase<DeviceType, FushionConvAddParam> {
class ConvAddKernel : public OpKernelBase<DeviceType, FusionConvAddParam> {
public:
void Compute(const FushionConvAddParam &param) const;
void Compute(const FusionConvAddParam &param) const;
};
} // namespace operators
......
......@@ -33,9 +33,9 @@ using framework::OpKernelBase;
template <typename DeviceType, typename T>
class ConvAddReluKernel
: public OpKernelBase<DeviceType, FushionConvAddReluParam> {
: public OpKernelBase<DeviceType, FusionConvAddReluParam> {
public:
void Compute(const FushionConvAddReluParam &param) const;
void Compute(const FusionConvAddReluParam &param) const;
};
} // namespace operators
......
......@@ -24,10 +24,10 @@ namespace paddle_mobile {
namespace operators {
template <typename DeviceType, typename T>
class FushionFcKernel
: public framework::OpKernelBase<DeviceType, FushionFcParam> {
class FusionFcKernel
: public framework::OpKernelBase<DeviceType, FusionFcParam> {
public:
void Compute(const FushionFcParam& param) const;
void Compute(const FusionFcParam& param) const;
};
} // namespace operators
} // namespace paddle_mobile
......
......@@ -41,7 +41,7 @@ Print &operator<<(Print &printer, const ConvParam &conv_param) {
#ifdef FUSION_CONVADD_OP
Print &operator<<(Print &printer, const FushionConvAddParam &conv_param) {
Print &operator<<(Print &printer, const FusionConvAddParam &conv_param) {
printer << "parameter of conv_add: "
<< "\n";
printer << " stride: "
......
......@@ -752,9 +752,9 @@ class ReluParam : public OpParam {
#endif
#ifdef FUSION_FC_OP
class FushionFcParam : public OpParam {
class FusionFcParam : public OpParam {
public:
FushionFcParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
FusionFcParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope) {
input_x_ = InputXFrom<LoDTensor>(inputs, scope);
input_y_ = InputYFrom<LoDTensor>(inputs, scope);
......@@ -790,9 +790,9 @@ class FushionFcParam : public OpParam {
#endif
#ifdef FUSION_CONVADD_OP
class FushionConvAddParam : public OpParam {
class FusionConvAddParam : public OpParam {
public:
FushionConvAddParam(const VariableNameMap &inputs,
FusionConvAddParam(const VariableNameMap &inputs,
const VariableNameMap &outputs, const AttributeMap &attrs,
const Scope &scope) {
bias_ = InputYFrom<LoDTensor>(inputs, scope);
......@@ -835,16 +835,16 @@ class FushionConvAddParam : public OpParam {
int groups;
};
Print &operator<<(Print &printer, const FushionConvAddParam &conv_param);
Print &operator<<(Print &printer, const FusionConvAddParam &conv_param);
#endif
#ifdef FUSION_CONVADD_RELU_OP
class FushionConvAddReluParam : public FushionConvAddParam {
class FusionConvAddReluParam : public FusionConvAddParam {
public:
FushionConvAddReluParam(const VariableNameMap &inputs,
FusionConvAddReluParam(const VariableNameMap &inputs,
const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope)
: FushionConvAddParam(inputs, outputs, attrs, scope) {}
: FusionConvAddParam(inputs, outputs, attrs, scope) {}
};
#endif
......
......@@ -88,7 +88,7 @@ else ()
target_link_libraries(test-relu-op paddle-mobile)
# gen test
ADD_EXECUTABLE(test-fc-op operators/test_fushion_fc_op.cpp test_helper.h test_include.h)
ADD_EXECUTABLE(test-fc-op operators/test_fusion_fc_op.cpp test_helper.h test_include.h)
target_link_libraries(test-fc-op paddle-mobile)
# gen test log
......
......@@ -23,7 +23,7 @@ int main() {
auto program = loader.Load(g_mobilenet_ssd, true);
paddle_mobile::framework::ProgramOptimize optimize;
// program.originProgram->Description("origin");
auto optimize_program = optimize.FushionOptimize(program.originProgram);
auto optimize_program = optimize.FusionOptimize(program.originProgram);
if (optimize_program != nullptr) {
// optimize_program->Description("optimize");
} else {
......
......@@ -49,8 +49,8 @@ class TestFcOp {
DLOG << " Input Y is : " << op->Input("Y")[0];
DLOG << " Input Y is : " << op->Input("Z")[0];
DLOG << " Output Out is : " << op->Output("Out")[0];
std::shared_ptr<operators::FushionFcOp<Dtype, float>> testOp =
std::make_shared<operators::FushionFcOp<Dtype, float>>(
std::shared_ptr<operators::FusionFcOp<Dtype, float>> testOp =
std::make_shared<operators::FusionFcOp<Dtype, float>>(
op->Type(), op->GetInputs(), op->GetOutputs(),
op->GetAttrMap(), program_.scope);
ops_of_block_[*block_desc.get()].push_back(testOp);
......@@ -119,7 +119,7 @@ int main() {
auto program = loader.Load(g_googlenet);
paddle_mobile::framework::ProgramOptimize optimize;
// program.originProgram->Description("origin");
auto optimize_program = optimize.FushionOptimize(program.originProgram);
auto optimize_program = optimize.FusionOptimize(program.originProgram);
program.optimizeProgram = optimize_program;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册