未验证 提交 89feae07 编写于 作者: D Difer 提交者: GitHub

[CodeStyle][CINN] fix cinn cpplint codestyle `[whitespace/line_length]` (#55020)

上级 4a4a32ee
......@@ -18,11 +18,9 @@
*/
#pragma once
// clang-format off
#include "paddle/cinn/ir/ir.h"
#include <ginac/ginac.h>
// clang-format on
#include <limits>
#include <map>
......
......@@ -34,7 +34,8 @@ namespace frontend {
// clang-format off
// ******************************************* //
// Elementwise compute each element in `input` variable, and return the result Variable.
// Elementwise compute each element in `input` variable,
// and return the result Variable.
// Variable UNARY_OP(const Variable& x);
#define NETBUILDER_UNARY_OP_FOREACH(macro__) \
macro__(Sqrt) \
......@@ -112,7 +113,10 @@ namespace frontend {
// ******************************************* //
// Reduce array elements over the given dims.
// Variable REDUCE_OP(const Variable& x, const cinn::utils::ShapeType& dim = {}, bool keep_dim = false);
// Variable REDUCE_OP(
// const Variable& x,
// const cinn::utils::ShapeType& dim = {},
// bool keep_dim = false);
#define NETBUILDER_REDUCE_OP_FOREACH(macro__) \
macro__(ReduceSum) \
macro__(ReduceProd) \
......
......@@ -80,7 +80,9 @@ class ScheduleBlockRealize;
macro__(Minus) \
macro__(Not) \
#define NODETY_OP_FOR_EACH(macro__) NODETY_BINARY_OP_FOR_EACH(macro__) NODETY_UNARY_OP_FOR_EACH(macro__)
#define NODETY_OP_FOR_EACH(macro__) \
NODETY_BINARY_OP_FOR_EACH(macro__) \
NODETY_UNARY_OP_FOR_EACH(macro__)
#define NODETY_CONTROL_OP_FOR_EACH(macro__) \
macro__(Cast) \
......
......@@ -324,126 +324,144 @@ struct ApplyFuncImpl<Return (*)(Args...), impl_fn> {
::cinn::ir::StepKindRegistry::Global()->__REGISTER_OR_GET__(#TypeName)
// register StepKindInfo for every type of scheduling operation
// clang-format off
CINN_BUILD_STEP_KIND(GetAllBlocks)
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(
static_cast<std::vector<Expr> (IRSchedule::*)() const>(&IRSchedule::GetAllBlocks))));
static_cast<std::vector<Expr> (IRSchedule::*)() const>(
&IRSchedule::GetAllBlocks))));
CINN_BUILD_STEP_KIND(GetChildBlocks)
.Inputs({"expr"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(
static_cast<std::vector<Expr> (IRSchedule::*)(const Expr&) const>(&IRSchedule::GetChildBlocks))));
static_cast<std::vector<Expr> (IRSchedule::*)(const Expr&) const>(
&IRSchedule::GetChildBlocks))));
CINN_BUILD_STEP_KIND(GetLoops)
.Inputs({"block"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(
static_cast<std::vector<Expr> (IRSchedule::*)(const Expr&) const>(&IRSchedule::GetLoops))));
CINN_BUILD_STEP_KIND(GetLoops).Inputs({"block"}).SetApplyFn(
APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(
static_cast<std::vector<Expr> (IRSchedule::*)(const Expr&) const>(
&IRSchedule::GetLoops))));
CINN_BUILD_STEP_KIND(GetLoopsWithName)
.Attrs({"block_name"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(
static_cast<std::vector<Expr> (IRSchedule::*)(const std::string&) const>(&IRSchedule::GetLoops))));
static_cast<std::vector<Expr> (IRSchedule::*)(const std::string&)
const>(&IRSchedule::GetLoops))));
CINN_BUILD_STEP_KIND(GetBlock)
.Attrs({"block_name"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(
static_cast<Expr (IRSchedule::*)(const std::string&) const>(&IRSchedule::GetBlock))));
static_cast<Expr (IRSchedule::*)(const std::string&) const>(
&IRSchedule::GetBlock))));
CINN_BUILD_STEP_KIND(Split)
.Inputs({"loop", "factors"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(
static_cast<std::vector<Expr> (IRSchedule::*)(const Expr&, const std::vector<Expr>&)>(&IRSchedule::Split))));
static_cast<std::vector<Expr> (IRSchedule::*)(
const Expr&, const std::vector<Expr>&)>(&IRSchedule::Split))));
CINN_BUILD_STEP_KIND(Fuse)
.Inputs({"loops"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(
static_cast<Expr (IRSchedule::*)(const std::vector<Expr>&)>(&IRSchedule::Fuse))));
CINN_BUILD_STEP_KIND(Fuse).Inputs({"loops"}).SetApplyFn(
APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(
static_cast<Expr (IRSchedule::*)(const std::vector<Expr>&)>(
&IRSchedule::Fuse))));
CINN_BUILD_STEP_KIND(FuseWithName)
.Attrs({"block_name", "loops_index"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(
static_cast<Expr (IRSchedule::*)(const std::string&, const std::vector<int>&)>(&IRSchedule::Fuse))));
static_cast<Expr (IRSchedule::*)(
const std::string&, const std::vector<int>&)>(&IRSchedule::Fuse))));
CINN_BUILD_STEP_KIND(FuseWithBlock)
.Inputs({"block"})
.Attrs({"loops_index"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(
static_cast<Expr (IRSchedule::*)(const Expr&, const std::vector<int>&)>(&IRSchedule::Fuse))));
static_cast<Expr (IRSchedule::*)(const Expr&, const std::vector<int>&)>(
&IRSchedule::Fuse))));
CINN_BUILD_STEP_KIND(ComputeAt)
.Inputs({"block", "loop"})
.Attrs({"keep_unit_loops"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::ComputeAt)));
.SetApplyFn(
APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::ComputeAt)));
CINN_BUILD_STEP_KIND(SimpleComputeAt)
.Inputs({"block", "loop"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::SimpleComputeAt)));
.SetApplyFn(APPLY_FUNC_UNIFORM(
FREE_FUNCTION_CONVERTER(&IRSchedule::SimpleComputeAt)));
CINN_BUILD_STEP_KIND(ReverseComputeAt)
.Inputs({"block", "loop"})
.Attrs({"keep_unit_loops"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::ReverseComputeAt)));
.SetApplyFn(APPLY_FUNC_UNIFORM(
FREE_FUNCTION_CONVERTER(&IRSchedule::ReverseComputeAt)));
CINN_BUILD_STEP_KIND(GetRootBlock)
.Inputs({"expr"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::GetRootBlock)));
.SetApplyFn(
APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::GetRootBlock)));
CINN_BUILD_STEP_KIND(CacheRead)
.Inputs({"block"})
.Attrs({"read_buffer_index", "memory_type"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::CacheRead)));
.SetApplyFn(
APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::CacheRead)));
CINN_BUILD_STEP_KIND(CacheWrite)
.Inputs({"block"})
.Attrs({"write_buffer_index", "memory_type"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::CacheWrite)));
.SetApplyFn(
APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::CacheWrite)));
CINN_BUILD_STEP_KIND(SyncThreads)
.Inputs({"ir_node"})
.Attrs({"after_node"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::SyncThreads)));
.SetApplyFn(
APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::SyncThreads)));
CINN_BUILD_STEP_KIND(SetBuffer)
.Inputs({"block"})
.Attrs({"memory_type", "fixed"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::SetBuffer)));
.SetApplyFn(
APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::SetBuffer)));
CINN_BUILD_STEP_KIND(Reorder)
.Inputs({"loops"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(
static_cast<Expr (IRSchedule::*)(const std::vector<Expr>&)>(&IRSchedule::Reorder))));
CINN_BUILD_STEP_KIND(Reorder).Inputs({"loops"}).SetApplyFn(
APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(
static_cast<Expr (IRSchedule::*)(const std::vector<Expr>&)>(
&IRSchedule::Reorder))));
CINN_BUILD_STEP_KIND(ReorderWithBlock)
.Inputs({"block"})
.Attrs({"loops_index"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(
static_cast<Expr (IRSchedule::*)(const Expr&, const std::vector<int>&)>(&IRSchedule::Reorder))));
static_cast<Expr (IRSchedule::*)(const Expr&, const std::vector<int>&)>(
&IRSchedule::Reorder))));
CINN_BUILD_STEP_KIND(ReorderWithName)
.Attrs({"block_name", "loops_index"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(
static_cast<Expr (IRSchedule::*)(const std::string&, const std::vector<int>&)>(&IRSchedule::Reorder))));
static_cast<Expr (IRSchedule::*)(const std::string&,
const std::vector<int>&)>(
&IRSchedule::Reorder))));
CINN_BUILD_STEP_KIND(Parallel)
.Inputs({"loop"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::Parallel)));
CINN_BUILD_STEP_KIND(Parallel).Inputs({"loop"}).SetApplyFn(
APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::Parallel)));
CINN_BUILD_STEP_KIND(Vectorize)
.Inputs({"loop"})
.Attrs({"factor"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::Vectorize)));
.SetApplyFn(
APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::Vectorize)));
CINN_BUILD_STEP_KIND(Unroll)
.Inputs({"loop"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::Unroll)));
CINN_BUILD_STEP_KIND(Unroll).Inputs({"loop"}).SetApplyFn(
APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::Unroll)));
CINN_BUILD_STEP_KIND(ComputeInline)
.Inputs({"schedule_block"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::ComputeInline)));
.SetApplyFn(APPLY_FUNC_UNIFORM(
FREE_FUNCTION_CONVERTER(&IRSchedule::ComputeInline)));
CINN_BUILD_STEP_KIND(ReverseComputeInline)
.Inputs({"schedule_block"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::ReverseComputeInline)));
.SetApplyFn(APPLY_FUNC_UNIFORM(
FREE_FUNCTION_CONVERTER(&IRSchedule::ReverseComputeInline)));
CINN_BUILD_STEP_KIND(Bind)
.Inputs({"loop"})
......@@ -453,23 +471,41 @@ CINN_BUILD_STEP_KIND(Bind)
CINN_BUILD_STEP_KIND(Rfactor)
.Inputs({"rf_loop"})
.Attrs({"rf_axis"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::Rfactor)));
.SetApplyFn(
APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::Rfactor)));
CINN_BUILD_STEP_KIND(MergeExprs)
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::MergeExprs)));
template <typename AttrType> void Annotate(IRSchedule* ir_sch, const Expr&, const std::string&, AttrType);
template <> void Annotate<int>(IRSchedule* ir_sch, const Expr& block, const std::string& key, int value) {
ir_sch->Annotate(block, key, value);
.SetApplyFn(
APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::MergeExprs)));
template <typename AttrType>
void Annotate(IRSchedule* ir_sch, const Expr&, const std::string&, AttrType);
template <>
void Annotate<int>(IRSchedule* ir_sch,
const Expr& block,
const std::string& key,
int value) {
ir_sch->Annotate(block, key, value);
}
template <> void Annotate<bool>(IRSchedule* ir_sch, const Expr& block, const std::string& key, bool value) {
ir_sch->Annotate(block, key, value);
template <>
void Annotate<bool>(IRSchedule* ir_sch,
const Expr& block,
const std::string& key,
bool value) {
ir_sch->Annotate(block, key, value);
}
template <> void Annotate<float>(IRSchedule* ir_sch, const Expr& block, const std::string& key, float value) {
ir_sch->Annotate(block, key, value);
template <>
void Annotate<float>(IRSchedule* ir_sch,
const Expr& block,
const std::string& key,
float value) {
ir_sch->Annotate(block, key, value);
}
void AnnotateStringAttr(IRSchedule* ir_sch, const Expr& block, const std::string& key, const std::string& value) {
ir_sch->Annotate(block, key, value);
void AnnotateStringAttr(IRSchedule* ir_sch,
const Expr& block,
const std::string& key,
const std::string& value) {
ir_sch->Annotate(block, key, value);
}
CINN_BUILD_STEP_KIND(AnnotateIntAttr)
......@@ -495,25 +531,29 @@ CINN_BUILD_STEP_KIND(AnnotateStringAttr)
CINN_BUILD_STEP_KIND(Unannotate)
.Inputs({"block"})
.Attrs({"key"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::Unannotate)));
.SetApplyFn(
APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::Unannotate)));
CINN_BUILD_STEP_KIND(FlattenLoops)
.Inputs({"loops"})
.Attrs({"force_flat"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::FlattenLoops)));
.SetApplyFn(
APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::FlattenLoops)));
CINN_BUILD_STEP_KIND(SamplePerfectTile)
.Inputs({"loop"})
.Attrs({"n", "max_innermost_factor", "decision"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::SamplePerfectTile)));
.SetApplyFn(APPLY_FUNC_UNIFORM(
FREE_FUNCTION_CONVERTER(&IRSchedule::SamplePerfectTile)));
CINN_BUILD_STEP_KIND(TagPostSchedule)
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::TagPostSchedule)));
.SetApplyFn(APPLY_FUNC_UNIFORM(
FREE_FUNCTION_CONVERTER(&IRSchedule::TagPostSchedule)));
CINN_BUILD_STEP_KIND(SampleCategorical)
.Attrs({"candidates", "probs", "decision"})
.SetApplyFn(APPLY_FUNC_UNIFORM(FREE_FUNCTION_CONVERTER(&IRSchedule::SampleCategorical)));
// clang-format on
.SetApplyFn(APPLY_FUNC_UNIFORM(
FREE_FUNCTION_CONVERTER(&IRSchedule::SampleCategorical)));
// ------ Following codes are about member function implement of the
// ScheduleDesc class
......
......@@ -150,7 +150,7 @@ void BindFramework(pybind11::module *m) {
t->shape().numel() * t->type().bytes(),
cudaMemcpyDeviceToHost));
#else
LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!";
LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!";
#endif
} else {
CINN_NOT_IMPLEMENTED
......@@ -191,7 +191,7 @@ void BindFramework(pybind11::module *m) {
self->shape().numel() * self->type().bytes(),
cudaMemcpyDeviceToHost));
#else
LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!";
LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!";
#endif
} else {
CINN_NOT_IMPLEMENTED
......@@ -225,7 +225,7 @@ void BindFramework(pybind11::module *m) {
self->shape().numel() * self->type().bytes(),
cudaMemcpyHostToDevice));
#else
LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!";
LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!";
#endif
} else {
CINN_NOT_IMPLEMENTED
......
......@@ -227,7 +227,7 @@ void BindFrontend(pybind11::module *m) {
in_tensor->shape().numel() * dtype.bytes(),
cudaMemcpyHostToDevice));
#else
LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!";
LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!";
#endif
} else if (target.arch == Target::Arch::X86) {
memcpy(data,
......@@ -320,7 +320,7 @@ void BindFrontend(pybind11::module *m) {
in_tensor->shape().numel() * sizeof(float),
cudaMemcpyHostToDevice));
#else
LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!";
LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!";
#endif
} else if (target.arch == Target::Arch::X86) {
for (size_t j = 0; j < in_tensor->shape().numel(); j++) {
......@@ -369,7 +369,7 @@ void BindFrontend(pybind11::module *m) {
in_tensor->shape().numel() * sizeof(float),
cudaMemcpyHostToDevice));
#else
LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!";
LOG(FATAL) <<"To use CUDA backends, you need to set WITH_CUDA ON!";
#endif
} else if (target.arch == Target::Arch::X86) {
for (size_t j = 0; j < in_tensor->shape().numel(); j++) {
......@@ -419,12 +419,13 @@ void BindFrontend(pybind11::module *m) {
py::class_<NetBuilder, std::shared_ptr<NetBuilder>>(*m, "NetBuilder")
.def(py::init<const std::string &>(), py::arg("name") = "")
// clang-format off
#define PY_REGISTER_CONSTANT_OP(TYPE__) \
.def("constant", \
static_cast<Variable (NetBuilder::*)(const TYPE__&, const std::string &, const std::string &)>( \
&NetBuilder::template Constant<TYPE__>), \
py::arg("value"), \
py::arg("name") = "", \
#define PY_REGISTER_CONSTANT_OP(TYPE__) \
.def("constant", \
static_cast<Variable (NetBuilder::*)( \
const TYPE__&, const std::string &, const std::string &)>( \
&NetBuilder::template Constant<TYPE__>), \
py::arg("value"), \
py::arg("name") = "", \
py::arg("dtype") = "")
EXPAND_CINN_SUPPORT_TYPE(PY_REGISTER_CONSTANT_OP)
#define EXPAND_ONE_VECTOR(TYPE) PY_REGISTER_CONSTANT_OP(std::vector<TYPE>)
......@@ -446,23 +447,25 @@ void BindFrontend(pybind11::module *m) {
#undef EXPAND_QUINTIC_VECTOR
#undef EXPAND_SEXTIC_VECTOR
#undef PY_REGISTER_CONSTANT_OP
#define PY_REGISTER_FILLCONSTANT_OP(TYPE__) \
.def("fill_constant", \
static_cast<Variable (NetBuilder::*)( \
const std::vector<int> &, TYPE__, const std::string &, const std::string &, bool)>( \
&NetBuilder::FillConstant<TYPE__>), \
py::arg("shape"), \
py::arg("value"), \
py::arg("name") = "", \
py::arg("dtype"), \
py::arg("force_cpu") = false) \
.def("fill_constant", \
static_cast<Variable (NetBuilder::*)( \
const std::vector<int> &, TYPE__, const std::string &, bool)>( \
&NetBuilder::template FillConstant<TYPE__>), \
py::arg("shape"), \
py::arg("value"), \
py::arg("name") = "", \
#define PY_REGISTER_FILLCONSTANT_OP(TYPE__) \
.def("fill_constant", \
static_cast<Variable (NetBuilder::*)( \
const std::vector<int> &, TYPE__, \
const std::string &, \
const std::string &, bool)>( \
&NetBuilder::FillConstant<TYPE__>), \
py::arg("shape"), \
py::arg("value"), \
py::arg("name") = "", \
py::arg("dtype"), \
py::arg("force_cpu") = false) \
.def("fill_constant", \
static_cast<Variable (NetBuilder::*)( \
const std::vector<int> &, TYPE__, const std::string &, bool)>( \
&NetBuilder::template FillConstant<TYPE__>), \
py::arg("shape"), \
py::arg("value"), \
py::arg("name") = "", \
py::arg("force_cpu") = false)
EXPAND_CINN_SUPPORT_TYPE(PY_REGISTER_FILLCONSTANT_OP)
#undef PY_REGISTER_FILLCONSTANT_OP
......@@ -471,7 +474,8 @@ void BindFrontend(pybind11::module *m) {
NETBUILDER_UNARY_OP_FOREACH(PY_REGISTER_UNARY_FUNC)
#undef PY_REGISTER_UNARY_FUNC
#define PY_REGISTER_BINARY_FUNC(func_name__) \
.def(SnakeName(#func_name__), &NetBuilder::func_name__, py::arg("x"), py::arg("y"), py::arg("axis") = -1)
.def(SnakeName(#func_name__), &NetBuilder::func_name__, py::arg("x"), \
py::arg("y"), py::arg("axis") = -1)
NETBUILDER_BINARY_OP_FOREACH(PY_REGISTER_BINARY_FUNC)
#undef PY_REGISTER_BINARY_FUNC
#define PY_REGISTER_REDUCE_FUNC(func_name__) \
......
......@@ -47,7 +47,7 @@ BIN_FULLSIZE = 5348678856
def preprocess(img):
img_width, img_height = img.size
img = img.resize((RESIZE_W, RESIZE_H), Image.ANTIALIAS)
img = img.resize((RESIZE_W, RESIZE_H), Image.LANCZOS)
img = np.array(img)
# HWC to CHW
if len(img.shape) == 3:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册