未验证 提交 89feae07 编写于 作者: D Difer 提交者: GitHub

[CodeStyle][CINN] fix cinn cpplint codestyle `[whitespace/line_length]` (#55020)

上级 4a4a32ee
...@@ -18,11 +18,9 @@ ...@@ -18,11 +18,9 @@
*/ */
#pragma once #pragma once
// clang-format off
#include "paddle/cinn/ir/ir.h" #include "paddle/cinn/ir/ir.h"
#include <ginac/ginac.h> #include <ginac/ginac.h>
// clang-format on
#include <limits> #include <limits>
#include <map> #include <map>
......
...@@ -34,7 +34,8 @@ namespace frontend { ...@@ -34,7 +34,8 @@ namespace frontend {
// clang-format off // clang-format off
// ******************************************* // // ******************************************* //
// Elementwise compute each element in `input` variable, and return the result Variable. // Elementwise compute each element in `input` variable,
// and return the result Variable.
// Variable UNARY_OP(const Variable& x); // Variable UNARY_OP(const Variable& x);
#define NETBUILDER_UNARY_OP_FOREACH(macro__) \ #define NETBUILDER_UNARY_OP_FOREACH(macro__) \
macro__(Sqrt) \ macro__(Sqrt) \
...@@ -112,7 +113,10 @@ namespace frontend { ...@@ -112,7 +113,10 @@ namespace frontend {
// ******************************************* // // ******************************************* //
// Reduce array elements over the given dims. // Reduce array elements over the given dims.
// Variable REDUCE_OP(const Variable& x, const cinn::utils::ShapeType& dim = {}, bool keep_dim = false); // Variable REDUCE_OP(
// const Variable& x,
// const cinn::utils::ShapeType& dim = {},
// bool keep_dim = false);
#define NETBUILDER_REDUCE_OP_FOREACH(macro__) \ #define NETBUILDER_REDUCE_OP_FOREACH(macro__) \
macro__(ReduceSum) \ macro__(ReduceSum) \
macro__(ReduceProd) \ macro__(ReduceProd) \
......
...@@ -80,7 +80,9 @@ class ScheduleBlockRealize; ...@@ -80,7 +80,9 @@ class ScheduleBlockRealize;
macro__(Minus) \ macro__(Minus) \
macro__(Not) \ macro__(Not) \
#define NODETY_OP_FOR_EACH(macro__) NODETY_BINARY_OP_FOR_EACH(macro__) NODETY_UNARY_OP_FOR_EACH(macro__) #define NODETY_OP_FOR_EACH(macro__) \
NODETY_BINARY_OP_FOR_EACH(macro__) \
NODETY_UNARY_OP_FOR_EACH(macro__)
#define NODETY_CONTROL_OP_FOR_EACH(macro__) \ #define NODETY_CONTROL_OP_FOR_EACH(macro__) \
macro__(Cast) \ macro__(Cast) \
......
...@@ -324,126 +324,144 @@ struct ApplyFuncImpl<Return (*)(Args...), impl_fn> { ...@@ -324,126 +324,144 @@ struct ApplyFuncImpl<Return (*)(Args...), impl_fn> {
::cinn::ir::StepKindRegistry::Global()->__REGISTER_OR_GET__(#TypeName) ::cinn::ir::StepKindRegistry::Global()->__REGISTER_OR_GET__(#TypeName)
// register StepKindInfo for every type of scheduling operation // register StepKindInfo for every type of scheduling operation
// clang-format off
CINN_BUILD_STEP_KIND(GetAllBlocks) CINN_BUILD_STEP_KIND(GetAllBlocks)
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER( .SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(
static_cast<std::vector<Expr> (IRSchedule::*)() const>(&IRSchedule::GetAllBlocks)))); static_cast<std::vector<Expr> (IRSchedule::*)() const>(
&IRSchedule::GetAllBlocks))));
CINN_BUILD_STEP_KIND(GetChildBlocks) CINN_BUILD_STEP_KIND(GetChildBlocks)
.Inputs({"expr"}) .Inputs({"expr"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER( .SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(
static_cast<std::vector<Expr> (IRSchedule::*)(const Expr&) const>(&IRSchedule::GetChildBlocks)))); static_cast<std::vector<Expr> (IRSchedule::*)(const Expr&) const>(
&IRSchedule::GetChildBlocks))));
CINN_BUILD_STEP_KIND(GetLoops) CINN_BUILD_STEP_KIND(GetLoops).Inputs({"block"}).SetApplyFn(
.Inputs({"block"}) APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER( static_cast<std::vector<Expr> (IRSchedule::*)(const Expr&) const>(
static_cast<std::vector<Expr> (IRSchedule::*)(const Expr&) const>(&IRSchedule::GetLoops)))); &IRSchedule::GetLoops))));
CINN_BUILD_STEP_KIND(GetLoopsWithName) CINN_BUILD_STEP_KIND(GetLoopsWithName)
.Attrs({"block_name"}) .Attrs({"block_name"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER( .SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(
static_cast<std::vector<Expr> (IRSchedule::*)(const std::string&) const>(&IRSchedule::GetLoops)))); static_cast<std::vector<Expr> (IRSchedule::*)(const std::string&)
const>(&IRSchedule::GetLoops))));
CINN_BUILD_STEP_KIND(GetBlock) CINN_BUILD_STEP_KIND(GetBlock)
.Attrs({"block_name"}) .Attrs({"block_name"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER( .SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(
static_cast<Expr (IRSchedule::*)(const std::string&) const>(&IRSchedule::GetBlock)))); static_cast<Expr (IRSchedule::*)(const std::string&) const>(
&IRSchedule::GetBlock))));
CINN_BUILD_STEP_KIND(Split) CINN_BUILD_STEP_KIND(Split)
.Inputs({"loop", "factors"}) .Inputs({"loop", "factors"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER( .SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(
static_cast<std::vector<Expr> (IRSchedule::*)(const Expr&, const std::vector<Expr>&)>(&IRSchedule::Split)))); static_cast<std::vector<Expr> (IRSchedule::*)(
const Expr&, const std::vector<Expr>&)>(&IRSchedule::Split))));
CINN_BUILD_STEP_KIND(Fuse) CINN_BUILD_STEP_KIND(Fuse).Inputs({"loops"}).SetApplyFn(
.Inputs({"loops"}) APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER( static_cast<Expr (IRSchedule::*)(const std::vector<Expr>&)>(
static_cast<Expr (IRSchedule::*)(const std::vector<Expr>&)>(&IRSchedule::Fuse)))); &IRSchedule::Fuse))));
CINN_BUILD_STEP_KIND(FuseWithName) CINN_BUILD_STEP_KIND(FuseWithName)
.Attrs({"block_name", "loops_index"}) .Attrs({"block_name", "loops_index"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER( .SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(
static_cast<Expr (IRSchedule::*)(const std::string&, const std::vector<int>&)>(&IRSchedule::Fuse)))); static_cast<Expr (IRSchedule::*)(
const std::string&, const std::vector<int>&)>(&IRSchedule::Fuse))));
CINN_BUILD_STEP_KIND(FuseWithBlock) CINN_BUILD_STEP_KIND(FuseWithBlock)
.Inputs({"block"}) .Inputs({"block"})
.Attrs({"loops_index"}) .Attrs({"loops_index"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER( .SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(
static_cast<Expr (IRSchedule::*)(const Expr&, const std::vector<int>&)>(&IRSchedule::Fuse)))); static_cast<Expr (IRSchedule::*)(const Expr&, const std::vector<int>&)>(
&IRSchedule::Fuse))));
CINN_BUILD_STEP_KIND(ComputeAt) CINN_BUILD_STEP_KIND(ComputeAt)
.Inputs({"block", "loop"}) .Inputs({"block", "loop"})
.Attrs({"keep_unit_loops"}) .Attrs({"keep_unit_loops"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::ComputeAt))); .SetApplyFn(
APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::ComputeAt)));
CINN_BUILD_STEP_KIND(SimpleComputeAt) CINN_BUILD_STEP_KIND(SimpleComputeAt)
.Inputs({"block", "loop"}) .Inputs({"block", "loop"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::SimpleComputeAt))); .SetApplyFn(APPLY_FUNC_UNIFORM(
FREE_FUNCTION_CONVERTER(&IRSchedule::SimpleComputeAt)));
CINN_BUILD_STEP_KIND(ReverseComputeAt) CINN_BUILD_STEP_KIND(ReverseComputeAt)
.Inputs({"block", "loop"}) .Inputs({"block", "loop"})
.Attrs({"keep_unit_loops"}) .Attrs({"keep_unit_loops"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::ReverseComputeAt))); .SetApplyFn(APPLY_FUNC_UNIFORM(
FREE_FUNCTION_CONVERTER(&IRSchedule::ReverseComputeAt)));
CINN_BUILD_STEP_KIND(GetRootBlock) CINN_BUILD_STEP_KIND(GetRootBlock)
.Inputs({"expr"}) .Inputs({"expr"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::GetRootBlock))); .SetApplyFn(
APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::GetRootBlock)));
CINN_BUILD_STEP_KIND(CacheRead) CINN_BUILD_STEP_KIND(CacheRead)
.Inputs({"block"}) .Inputs({"block"})
.Attrs({"read_buffer_index", "memory_type"}) .Attrs({"read_buffer_index", "memory_type"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::CacheRead))); .SetApplyFn(
APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::CacheRead)));
CINN_BUILD_STEP_KIND(CacheWrite) CINN_BUILD_STEP_KIND(CacheWrite)
.Inputs({"block"}) .Inputs({"block"})
.Attrs({"write_buffer_index", "memory_type"}) .Attrs({"write_buffer_index", "memory_type"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::CacheWrite))); .SetApplyFn(
APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::CacheWrite)));
CINN_BUILD_STEP_KIND(SyncThreads) CINN_BUILD_STEP_KIND(SyncThreads)
.Inputs({"ir_node"}) .Inputs({"ir_node"})
.Attrs({"after_node"}) .Attrs({"after_node"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::SyncThreads))); .SetApplyFn(
APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::SyncThreads)));
CINN_BUILD_STEP_KIND(SetBuffer) CINN_BUILD_STEP_KIND(SetBuffer)
.Inputs({"block"}) .Inputs({"block"})
.Attrs({"memory_type", "fixed"}) .Attrs({"memory_type", "fixed"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::SetBuffer))); .SetApplyFn(
APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::SetBuffer)));
CINN_BUILD_STEP_KIND(Reorder) CINN_BUILD_STEP_KIND(Reorder).Inputs({"loops"}).SetApplyFn(
.Inputs({"loops"}) APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER( static_cast<Expr (IRSchedule::*)(const std::vector<Expr>&)>(
static_cast<Expr (IRSchedule::*)(const std::vector<Expr>&)>(&IRSchedule::Reorder)))); &IRSchedule::Reorder))));
CINN_BUILD_STEP_KIND(ReorderWithBlock) CINN_BUILD_STEP_KIND(ReorderWithBlock)
.Inputs({"block"}) .Inputs({"block"})
.Attrs({"loops_index"}) .Attrs({"loops_index"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER( .SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(
static_cast<Expr (IRSchedule::*)(const Expr&, const std::vector<int>&)>(&IRSchedule::Reorder)))); static_cast<Expr (IRSchedule::*)(const Expr&, const std::vector<int>&)>(
&IRSchedule::Reorder))));
CINN_BUILD_STEP_KIND(ReorderWithName) CINN_BUILD_STEP_KIND(ReorderWithName)
.Attrs({"block_name", "loops_index"}) .Attrs({"block_name", "loops_index"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER( .SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(
static_cast<Expr (IRSchedule::*)(const std::string&, const std::vector<int>&)>(&IRSchedule::Reorder)))); static_cast<Expr (IRSchedule::*)(const std::string&,
const std::vector<int>&)>(
&IRSchedule::Reorder))));
CINN_BUILD_STEP_KIND(Parallel) CINN_BUILD_STEP_KIND(Parallel).Inputs({"loop"}).SetApplyFn(
.Inputs({"loop"}) APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::Parallel)));
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::Parallel)));
CINN_BUILD_STEP_KIND(Vectorize) CINN_BUILD_STEP_KIND(Vectorize)
.Inputs({"loop"}) .Inputs({"loop"})
.Attrs({"factor"}) .Attrs({"factor"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::Vectorize))); .SetApplyFn(
APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::Vectorize)));
CINN_BUILD_STEP_KIND(Unroll) CINN_BUILD_STEP_KIND(Unroll).Inputs({"loop"}).SetApplyFn(
.Inputs({"loop"}) APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::Unroll)));
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::Unroll)));
CINN_BUILD_STEP_KIND(ComputeInline) CINN_BUILD_STEP_KIND(ComputeInline)
.Inputs({"schedule_block"}) .Inputs({"schedule_block"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::ComputeInline))); .SetApplyFn(APPLY_FUNC_UNIFORM(
FREE_FUNCTION_CONVERTER(&IRSchedule::ComputeInline)));
CINN_BUILD_STEP_KIND(ReverseComputeInline) CINN_BUILD_STEP_KIND(ReverseComputeInline)
.Inputs({"schedule_block"}) .Inputs({"schedule_block"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::ReverseComputeInline))); .SetApplyFn(APPLY_FUNC_UNIFORM(
FREE_FUNCTION_CONVERTER(&IRSchedule::ReverseComputeInline)));
CINN_BUILD_STEP_KIND(Bind) CINN_BUILD_STEP_KIND(Bind)
.Inputs({"loop"}) .Inputs({"loop"})
...@@ -453,23 +471,41 @@ CINN_BUILD_STEP_KIND(Bind) ...@@ -453,23 +471,41 @@ CINN_BUILD_STEP_KIND(Bind)
CINN_BUILD_STEP_KIND(Rfactor) CINN_BUILD_STEP_KIND(Rfactor)
.Inputs({"rf_loop"}) .Inputs({"rf_loop"})
.Attrs({"rf_axis"}) .Attrs({"rf_axis"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::Rfactor))); .SetApplyFn(
APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::Rfactor)));
CINN_BUILD_STEP_KIND(MergeExprs) CINN_BUILD_STEP_KIND(MergeExprs)
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::MergeExprs))); .SetApplyFn(
APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::MergeExprs)));
template <typename AttrType> void Annotate(IRSchedule* ir_sch, const Expr&, const std::string&, AttrType);
template <> void Annotate<int>(IRSchedule* ir_sch, const Expr& block, const std::string& key, int value) { template <typename AttrType>
ir_sch->Annotate(block, key, value); void Annotate(IRSchedule* ir_sch, const Expr&, const std::string&, AttrType);
template <>
void Annotate<int>(IRSchedule* ir_sch,
const Expr& block,
const std::string& key,
int value) {
ir_sch->Annotate(block, key, value);
} }
template <> void Annotate<bool>(IRSchedule* ir_sch, const Expr& block, const std::string& key, bool value) { template <>
ir_sch->Annotate(block, key, value); void Annotate<bool>(IRSchedule* ir_sch,
const Expr& block,
const std::string& key,
bool value) {
ir_sch->Annotate(block, key, value);
} }
template <> void Annotate<float>(IRSchedule* ir_sch, const Expr& block, const std::string& key, float value) { template <>
ir_sch->Annotate(block, key, value); void Annotate<float>(IRSchedule* ir_sch,
const Expr& block,
const std::string& key,
float value) {
ir_sch->Annotate(block, key, value);
} }
void AnnotateStringAttr(IRSchedule* ir_sch, const Expr& block, const std::string& key, const std::string& value) { void AnnotateStringAttr(IRSchedule* ir_sch,
ir_sch->Annotate(block, key, value); const Expr& block,
const std::string& key,
const std::string& value) {
ir_sch->Annotate(block, key, value);
} }
CINN_BUILD_STEP_KIND(AnnotateIntAttr) CINN_BUILD_STEP_KIND(AnnotateIntAttr)
...@@ -495,25 +531,29 @@ CINN_BUILD_STEP_KIND(AnnotateStringAttr) ...@@ -495,25 +531,29 @@ CINN_BUILD_STEP_KIND(AnnotateStringAttr)
CINN_BUILD_STEP_KIND(Unannotate) CINN_BUILD_STEP_KIND(Unannotate)
.Inputs({"block"}) .Inputs({"block"})
.Attrs({"key"}) .Attrs({"key"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::Unannotate))); .SetApplyFn(
APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::Unannotate)));
CINN_BUILD_STEP_KIND(FlattenLoops) CINN_BUILD_STEP_KIND(FlattenLoops)
.Inputs({"loops"}) .Inputs({"loops"})
.Attrs({"force_flat"}) .Attrs({"force_flat"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::FlattenLoops))); .SetApplyFn(
APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::FlattenLoops)));
CINN_BUILD_STEP_KIND(SamplePerfectTile) CINN_BUILD_STEP_KIND(SamplePerfectTile)
.Inputs({"loop"}) .Inputs({"loop"})
.Attrs({"n", "max_innermost_factor", "decision"}) .Attrs({"n", "max_innermost_factor", "decision"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::SamplePerfectTile))); .SetApplyFn(APPLY_FUNC_UNIFORM(
FREE_FUNCTION_CONVERTER(&IRSchedule::SamplePerfectTile)));
CINN_BUILD_STEP_KIND(TagPostSchedule) CINN_BUILD_STEP_KIND(TagPostSchedule)
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::TagPostSchedule))); .SetApplyFn(APPLY_FUNC_UNIFORM(
FREE_FUNCTION_CONVERTER(&IRSchedule::TagPostSchedule)));
CINN_BUILD_STEP_KIND(SampleCategorical) CINN_BUILD_STEP_KIND(SampleCategorical)
.Attrs({"candidates", "probs", "decision"}) .Attrs({"candidates", "probs", "decision"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::SampleCategorical))); .SetApplyFn(APPLY_FUNC_UNIFORM(
// clang-format on FREE_FUNCTION_CONVERTER(&IRSchedule::SampleCategorical)));
// ------ Following codes are about member function implement of the // ------ Following codes are about member function implement of the
// ScheduleDesc class // ScheduleDesc class
......
...@@ -150,7 +150,7 @@ void BindFramework(pybind11::module *m) { ...@@ -150,7 +150,7 @@ void BindFramework(pybind11::module *m) {
t->shape().numel() * t->type().bytes(), t->shape().numel() * t->type().bytes(),
cudaMemcpyDeviceToHost)); cudaMemcpyDeviceToHost));
#else #else
LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!"; LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!";
#endif #endif
} else { } else {
CINN_NOT_IMPLEMENTED CINN_NOT_IMPLEMENTED
...@@ -191,7 +191,7 @@ void BindFramework(pybind11::module *m) { ...@@ -191,7 +191,7 @@ void BindFramework(pybind11::module *m) {
self->shape().numel() * self->type().bytes(), self->shape().numel() * self->type().bytes(),
cudaMemcpyDeviceToHost)); cudaMemcpyDeviceToHost));
#else #else
LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!"; LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!";
#endif #endif
} else { } else {
CINN_NOT_IMPLEMENTED CINN_NOT_IMPLEMENTED
...@@ -225,7 +225,7 @@ void BindFramework(pybind11::module *m) { ...@@ -225,7 +225,7 @@ void BindFramework(pybind11::module *m) {
self->shape().numel() * self->type().bytes(), self->shape().numel() * self->type().bytes(),
cudaMemcpyHostToDevice)); cudaMemcpyHostToDevice));
#else #else
LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!"; LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!";
#endif #endif
} else { } else {
CINN_NOT_IMPLEMENTED CINN_NOT_IMPLEMENTED
......
...@@ -227,7 +227,7 @@ void BindFrontend(pybind11::module *m) { ...@@ -227,7 +227,7 @@ void BindFrontend(pybind11::module *m) {
in_tensor->shape().numel() * dtype.bytes(), in_tensor->shape().numel() * dtype.bytes(),
cudaMemcpyHostToDevice)); cudaMemcpyHostToDevice));
#else #else
LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!"; LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!";
#endif #endif
} else if (target.arch == Target::Arch::X86) { } else if (target.arch == Target::Arch::X86) {
memcpy(data, memcpy(data,
...@@ -320,7 +320,7 @@ void BindFrontend(pybind11::module *m) { ...@@ -320,7 +320,7 @@ void BindFrontend(pybind11::module *m) {
in_tensor->shape().numel() * sizeof(float), in_tensor->shape().numel() * sizeof(float),
cudaMemcpyHostToDevice)); cudaMemcpyHostToDevice));
#else #else
LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!"; LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!";
#endif #endif
} else if (target.arch == Target::Arch::X86) { } else if (target.arch == Target::Arch::X86) {
for (size_t j = 0; j < in_tensor->shape().numel(); j++) { for (size_t j = 0; j < in_tensor->shape().numel(); j++) {
...@@ -369,7 +369,7 @@ void BindFrontend(pybind11::module *m) { ...@@ -369,7 +369,7 @@ void BindFrontend(pybind11::module *m) {
in_tensor->shape().numel() * sizeof(float), in_tensor->shape().numel() * sizeof(float),
cudaMemcpyHostToDevice)); cudaMemcpyHostToDevice));
#else #else
LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!"; LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!";
#endif #endif
} else if (target.arch == Target::Arch::X86) { } else if (target.arch == Target::Arch::X86) {
for (size_t j = 0; j < in_tensor->shape().numel(); j++) { for (size_t j = 0; j < in_tensor->shape().numel(); j++) {
...@@ -419,12 +419,13 @@ void BindFrontend(pybind11::module *m) { ...@@ -419,12 +419,13 @@ void BindFrontend(pybind11::module *m) {
py::class_<NetBuilder, std::shared_ptr<NetBuilder>>(*m, "NetBuilder") py::class_<NetBuilder, std::shared_ptr<NetBuilder>>(*m, "NetBuilder")
.def(py::init<const std::string &>(), py::arg("name") = "") .def(py::init<const std::string &>(), py::arg("name") = "")
// clang-format off // clang-format off
#define PY_REGISTER_CONSTANT_OP(TYPE__) \ #define PY_REGISTER_CONSTANT_OP(TYPE__) \
.def("constant", \ .def("constant", \
static_cast<Variable (NetBuilder::*)(const TYPE__&, const std::string &, const std::string &)>( \ static_cast<Variable (NetBuilder::*)( \
&NetBuilder::template Constant<TYPE__>), \ const TYPE__&, const std::string &, const std::string &)>( \
py::arg("value"), \ &NetBuilder::template Constant<TYPE__>), \
py::arg("name") = "", \ py::arg("value"), \
py::arg("name") = "", \
py::arg("dtype") = "") py::arg("dtype") = "")
EXPAND_CINN_SUPPORT_TYPE(PY_REGISTER_CONSTANT_OP) EXPAND_CINN_SUPPORT_TYPE(PY_REGISTER_CONSTANT_OP)
#define EXPAND_ONE_VECTOR(TYPE) PY_REGISTER_CONSTANT_OP(std::vector<TYPE>) #define EXPAND_ONE_VECTOR(TYPE) PY_REGISTER_CONSTANT_OP(std::vector<TYPE>)
...@@ -446,23 +447,25 @@ void BindFrontend(pybind11::module *m) { ...@@ -446,23 +447,25 @@ void BindFrontend(pybind11::module *m) {
#undef EXPAND_QUINTIC_VECTOR #undef EXPAND_QUINTIC_VECTOR
#undef EXPAND_SEXTIC_VECTOR #undef EXPAND_SEXTIC_VECTOR
#undef PY_REGISTER_CONSTANT_OP #undef PY_REGISTER_CONSTANT_OP
#define PY_REGISTER_FILLCONSTANT_OP(TYPE__) \ #define PY_REGISTER_FILLCONSTANT_OP(TYPE__) \
.def("fill_constant", \ .def("fill_constant", \
static_cast<Variable (NetBuilder::*)( \ static_cast<Variable (NetBuilder::*)( \
const std::vector<int> &, TYPE__, const std::string &, const std::string &, bool)>( \ const std::vector<int> &, TYPE__, \
&NetBuilder::FillConstant<TYPE__>), \ const std::string &, \
py::arg("shape"), \ const std::string &, bool)>( \
py::arg("value"), \ &NetBuilder::FillConstant<TYPE__>), \
py::arg("name") = "", \ py::arg("shape"), \
py::arg("dtype"), \ py::arg("value"), \
py::arg("force_cpu") = false) \ py::arg("name") = "", \
.def("fill_constant", \ py::arg("dtype"), \
static_cast<Variable (NetBuilder::*)( \ py::arg("force_cpu") = false) \
const std::vector<int> &, TYPE__, const std::string &, bool)>( \ .def("fill_constant", \
&NetBuilder::template FillConstant<TYPE__>), \ static_cast<Variable (NetBuilder::*)( \
py::arg("shape"), \ const std::vector<int> &, TYPE__, const std::string &, bool)>( \
py::arg("value"), \ &NetBuilder::template FillConstant<TYPE__>), \
py::arg("name") = "", \ py::arg("shape"), \
py::arg("value"), \
py::arg("name") = "", \
py::arg("force_cpu") = false) py::arg("force_cpu") = false)
EXPAND_CINN_SUPPORT_TYPE(PY_REGISTER_FILLCONSTANT_OP) EXPAND_CINN_SUPPORT_TYPE(PY_REGISTER_FILLCONSTANT_OP)
#undef PY_REGISTER_FILLCONSTANT_OP #undef PY_REGISTER_FILLCONSTANT_OP
...@@ -471,7 +474,8 @@ void BindFrontend(pybind11::module *m) { ...@@ -471,7 +474,8 @@ void BindFrontend(pybind11::module *m) {
NETBUILDER_UNARY_OP_FOREACH(PY_REGISTER_UNARY_FUNC) NETBUILDER_UNARY_OP_FOREACH(PY_REGISTER_UNARY_FUNC)
#undef PY_REGISTER_UNARY_FUNC #undef PY_REGISTER_UNARY_FUNC
#define PY_REGISTER_BINARY_FUNC(func_name__) \ #define PY_REGISTER_BINARY_FUNC(func_name__) \
.def(SnakeName(#func_name__), &NetBuilder::func_name__, py::arg("x"), py::arg("y"), py::arg("axis") = -1) .def(SnakeName(#func_name__), &NetBuilder::func_name__, py::arg("x"), \
py::arg("y"), py::arg("axis") = -1)
NETBUILDER_BINARY_OP_FOREACH(PY_REGISTER_BINARY_FUNC) NETBUILDER_BINARY_OP_FOREACH(PY_REGISTER_BINARY_FUNC)
#undef PY_REGISTER_BINARY_FUNC #undef PY_REGISTER_BINARY_FUNC
#define PY_REGISTER_REDUCE_FUNC(func_name__) \ #define PY_REGISTER_REDUCE_FUNC(func_name__) \
......
...@@ -47,7 +47,7 @@ BIN_FULLSIZE = 5348678856 ...@@ -47,7 +47,7 @@ BIN_FULLSIZE = 5348678856
def preprocess(img): def preprocess(img):
img_width, img_height = img.size img_width, img_height = img.size
img = img.resize((RESIZE_W, RESIZE_H), Image.ANTIALIAS) img = img.resize((RESIZE_W, RESIZE_H), Image.LANCZOS)
img = np.array(img) img = np.array(img)
# HWC to CHW # HWC to CHW
if len(img.shape) == 3: if len(img.shape) == 3:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册