未验证 提交 b2df4c76 编写于 作者: S Sing_chan 提交者: GitHub

remove .clang-format in paddle/fluid to use the same config (#43678)

上级 f586110d
......@@ -24,4 +24,6 @@ Standard: Cpp11
AllowAllParametersOfDeclarationOnNextLine: true
BinPackParameters: false
BinPackArguments: false
IncludeBlocks: Preserve
IncludeIsMainSourceRegex: (\.cu)$
...
---
Language: Cpp
BasedOnStyle: Google
Standard: Cpp11
...
......@@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
// clang-format off
#include "paddle/fluid/framework/paddle2cinn/cinn_graph_symbolization.h"
#include <algorithm>
......@@ -31,7 +30,6 @@ limitations under the License. */
#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/errors.h"
// clang-format on
namespace paddle {
namespace framework {
......@@ -79,7 +77,8 @@ FeedInfoMap CinnGraphSymbolization::GetFeedInfoMapFromInput() const {
for (auto& feed_pair : input_tensors_) {
const auto& feed_name = feed_pair.first;
const auto* tensor = feed_pair.second;
PADDLE_ENFORCE_NE(tensor, nullptr,
PADDLE_ENFORCE_NE(tensor,
nullptr,
platform::errors::PreconditionNotMet(
"The input variable %s's tensor cannot be NULL,"
"we need the variable's dtype and shape from tensor.",
......@@ -96,7 +95,8 @@ FeedInfoMap CinnGraphSymbolization::GetFeedInfoMapFromInput() const {
}
PADDLE_ENFORCE_NE(
feed_map[feed_name].shape.size(), 0UL,
feed_map[feed_name].shape.size(),
0UL,
platform::errors::PreconditionNotMet(
"The input variable %s's tensor shape cannot be empty,"
"we need the variable's dtype and shape from tensor.",
......@@ -136,7 +136,8 @@ CinnGraphSymbolization::CreateCinnScope(const FeedInfoMap& feed_map) {
for (const auto& param_name : parameter_names) {
PADDLE_ENFORCE_GT(
feed_map.count(param_name), 0UL,
feed_map.count(param_name),
0UL,
platform::errors::NotFound("Cannot find parameter %s from input list,"
"please add the tensor into input.",
param_name.c_str()));
......@@ -162,8 +163,8 @@ CinnGraphSymbolization::CreateCinnScope(const FeedInfoMap& feed_map) {
std::vector<Node*> CinnGraphSymbolization::TopologicalSort() const {
std::unordered_set<Node*> op_nodes;
std::for_each(graph_.Nodes().begin(), graph_.Nodes().end(),
[&op_nodes](Node* n) {
std::for_each(
graph_.Nodes().begin(), graph_.Nodes().end(), [&op_nodes](Node* n) {
if (n->IsOp()) {
op_nodes.emplace(n);
}
......@@ -210,7 +211,8 @@ std::vector<Node*> CinnGraphSymbolization::TopologicalSort() const {
}
}
PADDLE_ENFORCE_EQ(sorted_ops.size(), op_nodes.size(),
PADDLE_ENFORCE_EQ(sorted_ops.size(),
op_nodes.size(),
platform::errors::PreconditionNotMet(
"The sorting graph contains cycles."));
return sorted_ops;
......@@ -234,7 +236,8 @@ void CinnGraphSymbolization::RunOp(const CinnOpDesc& op_desc,
const OpMapperContext& ctx) const {
const auto& op_type = op_desc.Type();
auto* kernel = ::cinn::frontend::OpMapperRegistry::Global()->Find(op_type);
PADDLE_ENFORCE_NE(kernel, nullptr,
PADDLE_ENFORCE_NE(kernel,
nullptr,
platform::errors::NotFound(
"Op %s is Not Supported by CINN, please register"
" this op in the CINN repo.",
......@@ -256,10 +259,12 @@ std::unordered_set<std::string> CinnGraphSymbolization::GetFetchIds() const {
std::unordered_set<std::string> fetch_names;
fetch_names.reserve(fetch_var_names_.size());
std::for_each(
fetch_var_names_.begin(), fetch_var_names_.end(),
fetch_var_names_.begin(),
fetch_var_names_.end(),
[this, &fetch_names](const std::string& name) {
PADDLE_ENFORCE_EQ(
var_model_to_program_map_.count(name), 1,
var_model_to_program_map_.count(name),
1,
platform::errors::PreconditionNotMet(
"Cannot find %s in var_model_to_program_map_", name.c_str()));
fetch_names.insert(var_model_to_program_map_.at(name));
......@@ -276,8 +281,12 @@ std::unordered_set<std::string> CinnGraphSymbolization::GetFetchIds() const {
auto feed_map = GetFeedInfoMapFromInput();
auto cinn_scope = CreateCinnScope(feed_map);
OpMapperContext ctx(*cinn_scope, target_, &builder, &var_map_,
&var_model_to_program_map_, &fetch_var_names_);
OpMapperContext ctx(*cinn_scope,
target_,
&builder,
&var_map_,
&var_model_to_program_map_,
&fetch_var_names_);
// add all tensor's feed info into context
for (auto& feed_pair : feed_map) {
ctx.AddFeedInfo(feed_pair.first, feed_pair.second);
......
......@@ -14,7 +14,6 @@ limitations under the License. */
#pragma once
// clang-format off
#include <map>
#include <string>
#include <unordered_map>
......@@ -27,7 +26,6 @@ limitations under the License. */
#include "cinn/frontend/net_builder.h"
#include "cinn/frontend/op_mapper_registry.h"
// clang-format on
namespace paddle {
namespace framework {
......@@ -65,7 +63,8 @@ namespace paddle2cinn {
class CinnGraphSymbolization {
public:
CinnGraphSymbolization(
int64_t graph_id, const ir::Graph& graph,
int64_t graph_id,
const ir::Graph& graph,
const ::cinn::common::Target& target,
const std::map<std::string, const LoDTensor*>& input_tensors)
: graph_id_(graph_id),
......
......@@ -14,8 +14,6 @@
#pragma once
// The headers cant be sorted by clang-format or compilint error occurs.
// clang-format off
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/op_desc.h"
#include "paddle/fluid/framework/program_desc.h"
......@@ -26,7 +24,6 @@
#include "cinn/frontend/paddle/cpp/op_desc.h"
#include "cinn/frontend/paddle/cpp/program_desc.h"
#include "cinn/frontend/paddle/cpp/var_desc.h"
// clang-format on
namespace paddle {
namespace framework {
......
......@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// clang-format off
#include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h"
#include <llvm/ADT/TypeSwitch.h>
......@@ -61,7 +60,6 @@ void InfrtDialect::initialize() {
#include "paddle/infrt/dialect/infrt/ir/test_kernels.cpp.inc"
>();
}
// clang-format on
/// Parse a type registered to this dialect.
mlir::Type InfrtDialect::parseType(::mlir::DialectAsmParser &parser) const {
......
......@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// clang-format off
#include <gtest/gtest.h>
#include <llvm/Support/CommandLine.h>
#include <mlir/Pass/PassManager.h>
......@@ -50,7 +49,6 @@
#include "paddle/infrt/dialect/phi/ir/infrt_phi_tensor.h"
#include "paddle/infrt/dialect/phi/ir/phi_base.h"
#include "paddle/infrt/dialect/phi/ir/phi_kernels.h"
// clang-format on
static llvm::cl::list<std::string> cl_shared_libs( // NOLINT
"shared_libs",
......
......@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// clang-format off
#include "paddle/phi/kernels/gelu_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
......@@ -21,7 +20,6 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/broadcast_function.h"
#include "paddle/phi/kernels/gpu/gelu_funcs.h"
// clang-format on
DECLARE_bool(use_fast_math);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册