提交 e0d4e04b 编写于 作者: T Tao Luo

fix some compiler warning

test=develop
上级 8ea13e33
...@@ -412,7 +412,7 @@ void DetachDeletedNodes(framework::ir::Graph *graph) { ...@@ -412,7 +412,7 @@ void DetachDeletedNodes(framework::ir::Graph *graph) {
void SubGraphFuser::ReplaceNodesWithSubGraphs() { void SubGraphFuser::ReplaceNodesWithSubGraphs() {
auto subgraphs = SubgraphDetector(graph_, node_inside_subgraph_teller_)(); auto subgraphs = SubgraphDetector(graph_, node_inside_subgraph_teller_)();
for (auto &subgraph : subgraphs) { for (auto &subgraph : subgraphs) {
if (subgraph.size() <= min_subgraph_size_) continue; if (subgraph.size() <= (size_t)min_subgraph_size_) continue;
LOG(INFO) << "detect a subgraph size " << subgraph.size(); LOG(INFO) << "detect a subgraph size " << subgraph.size();
std::unordered_set<Node *> subgraph_uniq(subgraph.begin(), subgraph.end()); std::unordered_set<Node *> subgraph_uniq(subgraph.begin(), subgraph.end());
// replace this sub-graph with the first node. Two steps: 1. Create a Block // replace this sub-graph with the first node. Two steps: 1. Create a Block
......
...@@ -114,7 +114,7 @@ void TensorRtSubgraphPass::CreateTensorRTOp(framework::ir::Node *node, ...@@ -114,7 +114,7 @@ void TensorRtSubgraphPass::CreateTensorRTOp(framework::ir::Node *node,
// it is either an OP's input or an OP's output. // it is either an OP's input or an OP's output.
auto &subgraph_nodes = *Agent(node).subgraph(); auto &subgraph_nodes = *Agent(node).subgraph();
for (int index = 0; index < block_desc.OpSize(); index++) { for (size_t index = 0; index < block_desc.OpSize(); index++) {
framework::proto::OpDesc *op = block_desc.Op(index)->Proto(); framework::proto::OpDesc *op = block_desc.Op(index)->Proto();
auto correspond_node = subgraph_nodes[index]; auto correspond_node = subgraph_nodes[index];
PADDLE_ENFORCE_EQ(correspond_node->Name(), op->type()); PADDLE_ENFORCE_EQ(correspond_node->Name(), op->type());
......
...@@ -69,7 +69,7 @@ struct DataRecord { ...@@ -69,7 +69,7 @@ struct DataRecord {
num_lines++; num_lines++;
std::vector<std::string> data; std::vector<std::string> data;
split(line, ',', &data); split(line, ',', &data);
CHECK_EQ(data.size(), 2 * MAX_TURN_NUM + 3); CHECK_EQ(data.size(), (size_t)(2 * MAX_TURN_NUM + 3));
// load turn data // load turn data
std::vector<int64_t> turns_tmp[MAX_TURN_NUM]; std::vector<int64_t> turns_tmp[MAX_TURN_NUM];
for (int i = 0; i < MAX_TURN_NUM; ++i) { for (int i = 0; i < MAX_TURN_NUM; ++i) {
......
...@@ -38,7 +38,7 @@ class HashOp : public framework::OperatorWithKernel { ...@@ -38,7 +38,7 @@ class HashOp : public framework::OperatorWithKernel {
std::vector<int64_t> out_dims; std::vector<int64_t> out_dims;
out_dims.reserve(dims.size() + 1); out_dims.reserve(dims.size() + 1);
// copy all dims except the last one // copy all dims except the last one
for (size_t i = 0u; i != dims.size() - 1; ++i) { for (int i = 0u; i != dims.size() - 1; ++i) {
out_dims.emplace_back(dims[i]); out_dims.emplace_back(dims[i]);
} }
int num_hash = ctx->Attrs().Get<int>("num_hash"); int num_hash = ctx->Attrs().Get<int>("num_hash");
......
...@@ -244,7 +244,7 @@ typename std::enable_if< ...@@ -244,7 +244,7 @@ typename std::enable_if<
std::is_same<DeviceContext, platform::CPUDeviceContext>::value>::type std::is_same<DeviceContext, platform::CPUDeviceContext>::value>::type
elementwise_add_to(const DeviceContext& ctx, BlasT<DeviceContext, T>* blas, elementwise_add_to(const DeviceContext& ctx, BlasT<DeviceContext, T>* blas,
size_t data_len, const T* in, T* out) { size_t data_len, const T* in, T* out) {
for (int64_t i = 0; i < data_len; i++) { for (size_t i = 0; i < data_len; i++) {
out[i] += in[i]; out[i] += in[i];
} }
} }
......
...@@ -70,11 +70,11 @@ void TestSequencePoolingSum(const paddle::framework::LoD& lod) { ...@@ -70,11 +70,11 @@ void TestSequencePoolingSum(const paddle::framework::LoD& lod) {
EXPECT_EQ(in_grad.lod(), lod); EXPECT_EQ(in_grad.lod(), lod);
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
for (int64_t i = 0; i < in_grad.lod()[0].size() - 1; ++i) { for (size_t i = 0; i < in_grad.lod()[0].size() - 1; ++i) {
int64_t begin = in_grad.lod()[0][i]; int64_t begin = in_grad.lod()[0][i];
int64_t end = in_grad.lod()[0][i + 1]; int64_t end = in_grad.lod()[0][i + 1];
paddle::framework::Tensor tmp = in_grad.Slice(begin, end); paddle::framework::Tensor tmp = in_grad.Slice(begin, end);
for (int64_t j = 0; j != tmp.numel() / second_dim; ++j) { for (size_t j = 0; j != tmp.numel() / second_dim; ++j) {
for (int64_t m = 0; m != second_dim; ++m) { for (int64_t m = 0; m != second_dim; ++m) {
EXPECT_EQ(tmp.data<T>()[m + j * second_dim], EXPECT_EQ(tmp.data<T>()[m + j * second_dim],
out_grad.data<T>()[m + i * second_dim]); out_grad.data<T>()[m + i * second_dim]);
...@@ -82,11 +82,11 @@ void TestSequencePoolingSum(const paddle::framework::LoD& lod) { ...@@ -82,11 +82,11 @@ void TestSequencePoolingSum(const paddle::framework::LoD& lod) {
} }
} }
} else { } else {
for (int64_t i = 0; i < cpu_in_grad.lod()[0].size() - 1; ++i) { for (size_t i = 0; i < cpu_in_grad.lod()[0].size() - 1; ++i) {
int64_t begin = cpu_in_grad.lod()[0][i]; int64_t begin = cpu_in_grad.lod()[0][i];
int64_t end = cpu_in_grad.lod()[0][i + 1]; int64_t end = cpu_in_grad.lod()[0][i + 1];
paddle::framework::Tensor tmp = cpu_in_grad.Slice(begin, end); paddle::framework::Tensor tmp = cpu_in_grad.Slice(begin, end);
for (int64_t j = 0; j != tmp.numel() / second_dim; ++j) { for (size_t j = 0; j != tmp.numel() / second_dim; ++j) {
for (int64_t m = 0; m != second_dim; ++m) { for (int64_t m = 0; m != second_dim; ++m) {
EXPECT_EQ(tmp.data<T>()[m + j * second_dim], EXPECT_EQ(tmp.data<T>()[m + j * second_dim],
cpu_out_grad.data<T>()[m + i * second_dim]); cpu_out_grad.data<T>()[m + i * second_dim]);
......
...@@ -43,11 +43,11 @@ class MergeIdsOpKernel : public framework::OpKernel<T> { ...@@ -43,11 +43,11 @@ class MergeIdsOpKernel : public framework::OpKernel<T> {
PADDLE_ENFORCE_EQ(ids.size(), outs.size(), PADDLE_ENFORCE_EQ(ids.size(), outs.size(),
"the number of Ids and Out should be the same"); "the number of Ids and Out should be the same");
int row_ids_size = 0; size_t row_ids_size = 0;
int row_size = 0; int row_size = 0;
int embedding_size = 0; int embedding_size = 0;
for (int i = 0; i < x_tensors.size(); ++i) { for (size_t i = 0; i < x_tensors.size(); ++i) {
const auto *x_tensor = x_tensors[i]; const auto *x_tensor = x_tensors[i];
const auto *row_id = row_ids[i]; const auto *row_id = row_ids[i];
...@@ -66,7 +66,7 @@ class MergeIdsOpKernel : public framework::OpKernel<T> { ...@@ -66,7 +66,7 @@ class MergeIdsOpKernel : public framework::OpKernel<T> {
std::unordered_map<int64_t, std::tuple<int64_t, int64_t>> std::unordered_map<int64_t, std::tuple<int64_t, int64_t>>
selected_rows_idx_map; selected_rows_idx_map;
for (int i = 0; i < x_tensors.size(); ++i) { for (size_t i = 0; i < x_tensors.size(); ++i) {
const auto *row_id = row_ids[i]; const auto *row_id = row_ids[i];
for (int j = 0; j < row_id->numel(); ++j) { for (int j = 0; j < row_id->numel(); ++j) {
...@@ -78,7 +78,7 @@ class MergeIdsOpKernel : public framework::OpKernel<T> { ...@@ -78,7 +78,7 @@ class MergeIdsOpKernel : public framework::OpKernel<T> {
PADDLE_ENFORCE_EQ(row_ids_size, selected_rows_idx_map.size(), PADDLE_ENFORCE_EQ(row_ids_size, selected_rows_idx_map.size(),
"the rows and tensor map size should be the same"); "the rows and tensor map size should be the same");
for (int i = 0; i < outs.size(); ++i) { for (size_t i = 0; i < outs.size(); ++i) {
auto *out_ids = ids[i]; auto *out_ids = ids[i];
auto *out = outs[i]; auto *out = outs[i];
......
...@@ -38,7 +38,7 @@ class RefByTrainerIdKernel : public framework::OpKernel<T> { ...@@ -38,7 +38,7 @@ class RefByTrainerIdKernel : public framework::OpKernel<T> {
} else { } else {
trainer_id = *trainer_id_data; trainer_id = *trainer_id_data;
} }
PADDLE_ENFORCE_LT(trainer_id, in_list.size()); PADDLE_ENFORCE_LT((size_t)trainer_id, in_list.size());
out->mutable_data<T>(context.GetPlace()); out->mutable_data<T>(context.GetPlace());
out->ShareDataWith(*(in_list[trainer_id])); out->ShareDataWith(*(in_list[trainer_id]));
} }
......
...@@ -64,7 +64,7 @@ class SplitIdsOpKernel : public framework::OpKernel<T> { ...@@ -64,7 +64,7 @@ class SplitIdsOpKernel : public framework::OpKernel<T> {
out_ids.resize(outs.size()); out_ids.resize(outs.size());
// split id by their shard_num. // split id by their shard_num.
for (int i = 0; i < all_ids.size(); ++i) { for (size_t i = 0; i < all_ids.size(); ++i) {
T id = all_ids[i]; T id = all_ids[i];
size_t shard_id = static_cast<size_t>(id) % shard_num; size_t shard_id = static_cast<size_t>(id) % shard_num;
out_ids[shard_id].push_back(id); out_ids[shard_id].push_back(id);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册