From 67cbb3e3b6bc5a00b66b3fb1c2de4991ad2e4a21 Mon Sep 17 00:00:00 2001 From: wanghaox Date: Wed, 13 Dec 2017 18:50:03 +0800 Subject: [PATCH 001/217] detection map evaluator for SSD --- paddle/operators/detection_map_op.cc | 77 +++++ paddle/operators/detection_map_op.cu | 20 ++ paddle/operators/detection_map_op.h | 316 ++++++++++++++++++ paddle/operators/math/detection_util.cc | 22 ++ paddle/operators/math/detection_util.cu | 23 ++ paddle/operators/math/detection_util.h | 128 +++++++ .../v2/fluid/tests/test_detection_map_op.py | 155 +++++++++ 7 files changed, 741 insertions(+) create mode 100644 paddle/operators/detection_map_op.cc create mode 100644 paddle/operators/detection_map_op.cu create mode 100644 paddle/operators/detection_map_op.h create mode 100644 paddle/operators/math/detection_util.cc create mode 100644 paddle/operators/math/detection_util.cu create mode 100644 paddle/operators/math/detection_util.h create mode 100644 python/paddle/v2/fluid/tests/test_detection_map_op.py diff --git a/paddle/operators/detection_map_op.cc b/paddle/operators/detection_map_op.cc new file mode 100644 index 0000000000..b59d3bfad9 --- /dev/null +++ b/paddle/operators/detection_map_op.cc @@ -0,0 +1,77 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/detection_map_op.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +class DetectionMAPOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + auto map_dim = framework::make_ddim({1}); + ctx->SetOutputDim("MAP", map_dim); + } + + protected: + framework::OpKernelType GetKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("Label")->type()), + ctx.device_context()); + } +}; + +class DetectionMAPOpMaker : public framework::OpProtoAndCheckerMaker { + public: + DetectionMAPOpMaker(framework::OpProto* proto, + framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("Detect", "The detection output."); + AddInput("Label", "The label data."); + AddOutput("MAP", "The MAP evaluate result of the detection."); + + AddAttr("overlap_threshold", "The overlap threshold.") + .SetDefault(.3f); + AddAttr("evaluate_difficult", + "Switch to control whether the difficult data is evaluated.") + .SetDefault(true); + AddAttr("ap_type", + "The AP algorithm type, 'Integral' or '11point'.") + .SetDefault("Integral"); + + AddComment(R"DOC( +Detection MAP Operator. + +Detection MAP evaluator for SSD(Single Shot MultiBox Detector) algorithm. +Please get more information from the following papers: +https://arxiv.org/abs/1512.02325. + +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_WITHOUT_GRADIENT(detection_map, ops::DetectionMAPOp, + ops::DetectionMAPOpMaker); +REGISTER_OP_CPU_KERNEL( + detection_map, ops::DetectionMAPOpKernel, + ops::DetectionMAPOpKernel); diff --git a/paddle/operators/detection_map_op.cu b/paddle/operators/detection_map_op.cu new file mode 100644 index 0000000000..ab9a992c36 --- /dev/null +++ b/paddle/operators/detection_map_op.cu @@ -0,0 +1,20 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/detection_map_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL( + detection_map, ops::DetectionMAPOpKernel, + ops::DetectionMAPOpKernel); diff --git a/paddle/operators/detection_map_op.h b/paddle/operators/detection_map_op.h new file mode 100644 index 0000000000..3e862abda6 --- /dev/null +++ b/paddle/operators/detection_map_op.h @@ -0,0 +1,316 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/op_registry.h" +#include "paddle/operators/math/detection_util.h" +#include "paddle/operators/math/math_function.h" + +namespace paddle { +namespace operators { + +template +inline void GetAccumulation(std::vector> in_pairs, + std::vector* accu_vec) { + std::stable_sort(in_pairs.begin(), in_pairs.end(), + math::SortScorePairDescend); + accu_vec->clear(); + size_t sum = 0; + for (size_t i = 0; i < in_pairs.size(); ++i) { + // auto score = in_pairs[i].first; + auto count = in_pairs[i].second; + sum += count; + accu_vec->push_back(sum); + } +} + +template +class DetectionMAPOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* input_label = ctx.Input("Label"); + auto* input_detect = ctx.Input("Detect"); + auto* map_out = ctx.Output("MAP"); + + float overlap_threshold = ctx.Attr("overlap_threshold"); + float evaluate_difficult = ctx.Attr("evaluate_difficult"); + std::string ap_type = ctx.Attr("ap_type"); + + auto label_lod = input_label->lod(); + PADDLE_ENFORCE_EQ(label_lod.size(), 1UL, + "Only support one level sequence now."); + auto batch_size = label_lod[0].size() - 1; + + std::vector>>> gt_bboxes; + + std::vector< + std::map>>>> + detect_bboxes; + + if (platform::is_gpu_place(ctx.GetPlace())) { + framework::LoDTensor input_label_cpu; + framework::Tensor input_detect_cpu; + input_label_cpu.set_lod(input_label->lod()); + input_label_cpu.Resize(input_label->dims()); + input_detect_cpu.Resize(input_detect->dims()); + input_label_cpu.mutable_data(platform::CPUPlace()); + input_detect_cpu.mutable_data(platform::CPUPlace()); + framework::CopyFrom(*input_label, platform::CPUPlace(), + ctx.device_context(), &input_label_cpu); + framework::CopyFrom(*input_detect, platform::CPUPlace(), + ctx.device_context(), &input_detect_cpu); + GetBBoxes(input_label_cpu, input_detect_cpu, gt_bboxes, detect_bboxes); + } else { + GetBBoxes(*input_label, *input_detect, gt_bboxes, detect_bboxes); + } + + std::map label_pos_count; + std::map>> true_pos; + std::map>> false_pos; + + CalcTrueAndFalsePositive(batch_size, evaluate_difficult, overlap_threshold, + gt_bboxes, detect_bboxes, label_pos_count, + true_pos, false_pos); + + T map = CalcMAP(ap_type, label_pos_count, true_pos, false_pos); + + T* map_data = nullptr; + framework::Tensor map_cpu; + map_out->mutable_data(ctx.GetPlace()); + if (platform::is_gpu_place(ctx.GetPlace())) { + map_data = map_cpu.mutable_data(map_out->dims(), platform::CPUPlace()); + map_data[0] = map; + framework::CopyFrom(map_cpu, platform::CPUPlace(), ctx.device_context(), + map_out); + } else { + map_data = map_out->mutable_data(ctx.GetPlace()); + map_data[0] = map; + } + } + + protected: + void GetBBoxes( + const framework::LoDTensor& input_label, + const framework::Tensor& input_detect, + std::vector>>>& + gt_bboxes, + std::vector< + std::map>>>>& + detect_bboxes) const { + const T* label_data = input_label.data(); + const T* detect_data = input_detect.data(); + + auto label_lod = input_label.lod(); + auto batch_size = label_lod[0].size() - 1; + auto label_index = label_lod[0]; + + for (size_t n = 0; n < batch_size; ++n) { + std::map>> bboxes; + for (int i = label_index[n]; i < label_index[n + 1]; ++i) { + std::vector> bbox; + math::GetBBoxFromLabelData(label_data + i * 6, 1, bbox); + int label = static_cast(label_data[i * 6]); + bboxes[label].push_back(bbox[0]); + } + gt_bboxes.push_back(bboxes); + } + + size_t n = 0; + size_t detect_box_count = input_detect.dims()[0]; + for (size_t img_id = 0; img_id < batch_size; ++img_id) { + std::map>>> bboxes; + size_t cur_img_id = static_cast((detect_data + n * 7)[0]); + while (cur_img_id == img_id && n < detect_box_count) { + std::vector label; + std::vector score; + std::vector> bbox; + math::GetBBoxFromDetectData(detect_data + n * 7, 1, label, score, + bbox); + bboxes[label[0]].push_back(std::make_pair(score[0], bbox[0])); + ++n; + cur_img_id = static_cast((detect_data + n * 7)[0]); + } + detect_bboxes.push_back(bboxes); + } + } + + void CalcTrueAndFalsePositive( + size_t batch_size, bool evaluate_difficult, float overlap_threshold, + const std::vector>>>& + gt_bboxes, + const std::vector< + std::map>>>>& + detect_bboxes, + std::map& label_pos_count, + std::map>>& true_pos, + std::map>>& false_pos) const { + for (size_t n = 0; n < batch_size; ++n) { + auto image_gt_bboxes = gt_bboxes[n]; + for (auto it = image_gt_bboxes.begin(); it != image_gt_bboxes.end(); + ++it) { + size_t count = 0; + auto labeled_bboxes = it->second; + if (evaluate_difficult) { + count = labeled_bboxes.size(); + } else { + for (size_t i = 0; i < labeled_bboxes.size(); ++i) + if (!(labeled_bboxes[i].is_difficult)) ++count; + } + if (count == 0) { + continue; + } + int label = it->first; + if (label_pos_count.find(label) == label_pos_count.end()) { + label_pos_count[label] = count; + } else { + label_pos_count[label] += count; + } + } + } + + for (size_t n = 0; n < detect_bboxes.size(); ++n) { + auto image_gt_bboxes = gt_bboxes[n]; + auto detections = detect_bboxes[n]; + + if (image_gt_bboxes.size() == 0) { + for (auto it = detections.begin(); it != detections.end(); ++it) { + auto pred_bboxes = it->second; + int label = it->first; + for (size_t i = 0; i < pred_bboxes.size(); ++i) { + auto score = pred_bboxes[i].first; + true_pos[label].push_back(std::make_pair(score, 0)); + false_pos[label].push_back(std::make_pair(score, 1)); + } + } + continue; + } + + for (auto it = detections.begin(); it != detections.end(); ++it) { + int label = it->first; + auto pred_bboxes = it->second; + if (image_gt_bboxes.find(label) == image_gt_bboxes.end()) { + for (size_t i = 0; i < pred_bboxes.size(); ++i) { + auto score = pred_bboxes[i].first; + true_pos[label].push_back(std::make_pair(score, 0)); + false_pos[label].push_back(std::make_pair(score, 1)); + } + continue; + } + + auto matched_bboxes = image_gt_bboxes.find(label)->second; + std::vector visited(matched_bboxes.size(), false); + // Sort detections in descend order based on scores + std::sort(pred_bboxes.begin(), pred_bboxes.end(), + math::SortScorePairDescend>); + for (size_t i = 0; i < pred_bboxes.size(); ++i) { + float max_overlap = -1.0; + size_t max_idx = 0; + auto score = pred_bboxes[i].first; + for (size_t j = 0; j < matched_bboxes.size(); ++j) { + float overlap = + JaccardOverlap(pred_bboxes[i].second, matched_bboxes[j]); + if (overlap > max_overlap) { + max_overlap = overlap; + max_idx = j; + } + } + if (max_overlap > overlap_threshold) { + bool match_evaluate_difficult = + evaluate_difficult || + (!evaluate_difficult && !matched_bboxes[max_idx].is_difficult); + if (match_evaluate_difficult) { + if (!visited[max_idx]) { + true_pos[label].push_back(std::make_pair(score, 1)); + false_pos[label].push_back(std::make_pair(score, 0)); + visited[max_idx] = true; + } else { + true_pos[label].push_back(std::make_pair(score, 0)); + false_pos[label].push_back(std::make_pair(score, 1)); + } + } + } else { + true_pos[label].push_back(std::make_pair(score, 0)); + false_pos[label].push_back(std::make_pair(score, 1)); + } + } + } + } + } + + T CalcMAP( + std::string ap_type, const std::map& label_pos_count, + const std::map>>& true_pos, + const std::map>>& false_pos) const { + T mAP = 0.0; + int count = 0; + for (auto it = label_pos_count.begin(); it != label_pos_count.end(); ++it) { + int label = it->first; + int label_num_pos = it->second; + if (label_num_pos == 0 || true_pos.find(label) == true_pos.end()) + continue; + auto label_true_pos = true_pos.find(label)->second; + auto label_false_pos = false_pos.find(label)->second; + // Compute average precision. + std::vector tp_sum; + GetAccumulation(label_true_pos, &tp_sum); + std::vector fp_sum; + GetAccumulation(label_false_pos, &fp_sum); + std::vector precision, recall; + size_t num = tp_sum.size(); + // Compute Precision. + for (size_t i = 0; i < num; ++i) { + // CHECK_LE(tpCumSum[i], labelNumPos); + precision.push_back(static_cast(tp_sum[i]) / + static_cast(tp_sum[i] + fp_sum[i])); + recall.push_back(static_cast(tp_sum[i]) / label_num_pos); + } + // VOC2007 style + if (ap_type == "11point") { + std::vector max_precisions(11, 0.0); + int start_idx = num - 1; + for (int j = 10; j >= 0; --j) + for (int i = start_idx; i >= 0; --i) { + if (recall[i] < j / 10.) { + start_idx = i; + if (j > 0) max_precisions[j - 1] = max_precisions[j]; + break; + } else { + if (max_precisions[j] < precision[i]) + max_precisions[j] = precision[i]; + } + } + for (int j = 10; j >= 0; --j) mAP += max_precisions[j] / 11; + ++count; + } else if (ap_type == "Integral") { + // Nature integral + float average_precisions = 0.; + float prev_recall = 0.; + for (size_t i = 0; i < num; ++i) { + if (fabs(recall[i] - prev_recall) > 1e-6) + average_precisions += precision[i] * fabs(recall[i] - prev_recall); + prev_recall = recall[i]; + } + mAP += average_precisions; + ++count; + } else { + LOG(FATAL) << "Unkown ap version: " << ap_type; + } + } + if (count != 0) mAP /= count; + return mAP * 100; + } +}; // namespace operators + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/detection_util.cc b/paddle/operators/math/detection_util.cc new file mode 100644 index 0000000000..4131a0cb0e --- /dev/null +++ b/paddle/operators/math/detection_util.cc @@ -0,0 +1,22 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/detection_util.h" +#include "paddle/operators/math/math_function.h" + +namespace paddle { +namespace operators { +namespace math {} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/detection_util.cu b/paddle/operators/math/detection_util.cu new file mode 100644 index 0000000000..d2bb992396 --- /dev/null +++ b/paddle/operators/math/detection_util.cu @@ -0,0 +1,23 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/detection_util.h" +#include "paddle/operators/math/math_function.h" +#include "paddle/platform/cuda_helper.h" + +namespace paddle { +namespace operators { +namespace math {} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/detection_util.h b/paddle/operators/math/detection_util.h new file mode 100644 index 0000000000..2a4dadc545 --- /dev/null +++ b/paddle/operators/math/detection_util.h @@ -0,0 +1,128 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#pragma once +#include "paddle/framework/selected_rows.h" +#include "paddle/platform/device_context.h" + +namespace paddle { +namespace operators { +namespace math { + +template +struct BBox { + BBox(T x_min, T y_min, T x_max, T y_max) + : x_min(x_min), + y_min(y_min), + x_max(x_max), + y_max(y_max), + is_difficult(false) {} + + BBox() {} + + T get_width() const { return x_max - x_min; } + + T get_height() const { return y_max - y_min; } + + T get_center_x() const { return (x_min + x_max) / 2; } + + T get_center_y() const { return (y_min + y_max) / 2; } + + T get_area() const { return get_width() * get_height(); } + + // coordinate of bounding box + T x_min; + T y_min; + T x_max; + T y_max; + // whether difficult object (e.g. object with heavy occlusion is difficult) + bool is_difficult; +}; + +template +void GetBBoxFromDetectData(const T* detect_data, const size_t num_bboxes, + std::vector& labels, std::vector& scores, + std::vector>& bboxes) { + size_t out_offset = bboxes.size(); + labels.resize(out_offset + num_bboxes); + scores.resize(out_offset + num_bboxes); + bboxes.resize(out_offset + num_bboxes); + for (size_t i = 0; i < num_bboxes; ++i) { + labels[out_offset + i] = *(detect_data + i * 7 + 1); + scores[out_offset + i] = *(detect_data + i * 7 + 2); + BBox bbox; + bbox.x_min = *(detect_data + i * 7 + 3); + bbox.y_min = *(detect_data + i * 7 + 4); + bbox.x_max = *(detect_data + i * 7 + 5); + bbox.y_max = *(detect_data + i * 7 + 6); + bboxes[out_offset + i] = bbox; + }; +} + +template +void GetBBoxFromLabelData(const T* label_data, const size_t num_bboxes, + std::vector>& bboxes) { + size_t out_offset = bboxes.size(); + bboxes.resize(bboxes.size() + num_bboxes); + for (size_t i = 0; i < num_bboxes; ++i) { + BBox bbox; + bbox.x_min = *(label_data + i * 6 + 1); + bbox.y_min = *(label_data + i * 6 + 2); + bbox.x_max = *(label_data + i * 6 + 3); + bbox.y_max = *(label_data + i * 6 + 4); + T is_difficult = *(label_data + i * 6 + 5); + if (std::abs(is_difficult - 0.0) < 1e-6) + bbox.is_difficult = false; + else + bbox.is_difficult = true; + bboxes[out_offset + i] = bbox; + } +} + +template +inline float JaccardOverlap(const BBox& bbox1, const BBox& bbox2) { + if (bbox2.x_min > bbox1.x_max || bbox2.x_max < bbox1.x_min || + bbox2.y_min > bbox1.y_max || bbox2.y_max < bbox1.y_min) { + return 0.0; + } else { + float inter_x_min = std::max(bbox1.x_min, bbox2.x_min); + float inter_y_min = std::max(bbox1.y_min, bbox2.y_min); + float inter_x_max = std::min(bbox1.x_max, bbox2.x_max); + float inter_y_max = std::min(bbox1.y_max, bbox2.y_max); + + float inter_width = inter_x_max - inter_x_min; + float inter_height = inter_y_max - inter_y_min; + float inter_area = inter_width * inter_height; + + float bbox_area1 = bbox1.get_area(); + float bbox_area2 = bbox2.get_area(); + + return inter_area / (bbox_area1 + bbox_area2 - inter_area); + } +} + +template +bool SortScorePairDescend(const std::pair& pair1, + const std::pair& pair2) { + return pair1.first > pair2.first; +} + +// template <> +// bool SortScorePairDescend(const std::pair& pair1, +// const std::pair& pair2) { +// return pair1.first > pair2.first; +// } + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/python/paddle/v2/fluid/tests/test_detection_map_op.py b/python/paddle/v2/fluid/tests/test_detection_map_op.py new file mode 100644 index 0000000000..50ce3afbb9 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_detection_map_op.py @@ -0,0 +1,155 @@ +import unittest +import numpy as np +import sys +import collections +import math +from op_test import OpTest + + +class TestDetectionMAPOp(OpTest): + def set_data(self): + self.init_test_case() + + self.mAP = [self.calc_map(self.tf_pos)] + self.label = np.array(self.label).astype('float32') + self.detect = np.array(self.detect).astype('float32') + self.mAP = np.array(self.mAP).astype('float32') + + self.inputs = { + 'Label': (self.label, self.label_lod), + 'Detect': self.detect + } + + self.attrs = { + 'overlap_threshold': self.overlap_threshold, + 'evaluate_difficult': self.evaluate_difficult, + 'ap_type': self.ap_type + } + + self.outputs = {'MAP': self.mAP} + + def init_test_case(self): + self.overlap_threshold = 0.3 + self.evaluate_difficult = True + self.ap_type = "Integral" + + self.label_lod = [[0, 2, 4]] + # label xmin ymin xmax ymax difficult + self.label = [[1, 0.1, 0.1, 0.3, 0.3, 0], [1, 0.6, 0.6, 0.8, 0.8, 1], + [2, 0.3, 0.3, 0.6, 0.5, 0], [1, 0.7, 0.1, 0.9, 0.3, 0]] + + # image_id label score xmin ymin xmax ymax difficult + self.detect = [ + [0, 1, 0.3, 0.1, 0.0, 0.4, 0.3], [0, 1, 0.7, 0.0, 0.1, 0.2, 0.3], + [0, 1, 0.9, 0.7, 0.6, 0.8, 0.8], [1, 2, 0.8, 0.2, 0.1, 0.4, 0.4], + [1, 2, 0.1, 0.4, 0.3, 0.7, 0.5], [1, 1, 0.2, 0.8, 0.1, 1.0, 0.3], + [1, 3, 0.2, 0.8, 0.1, 1.0, 0.3] + ] + + # image_id label score false_pos false_pos + # [-1, 1, 3, -1, -1], + # [-1, 2, 1, -1, -1] + self.tf_pos = [[0, 1, 0.9, 1, 0], [0, 1, 0.7, 1, 0], [0, 1, 0.3, 0, 1], + [1, 1, 0.2, 1, 0], [1, 2, 0.8, 0, 1], [1, 2, 0.1, 1, 0], + [1, 3, 0.2, 0, 1]] + + def calc_map(self, tf_pos): + mAP = 0.0 + count = 0 + + class_pos_count = {} + true_pos = {} + false_pos = {} + + def get_accumulation(pos_list): + sorted_list = sorted(pos_list, key=lambda pos: pos[0], reverse=True) + sum = 0 + accu_list = [] + for (score, count) in sorted_list: + sum += count + accu_list.append(sum) + return accu_list + + label_count = collections.Counter() + for (label, xmin, ymin, xmax, ymax, difficult) in self.label: + if self.evaluate_difficult: + label_count[label] += 1 + elif not difficult: + label_count[label] += 1 + + true_pos = collections.defaultdict(list) + false_pos = collections.defaultdict(list) + for (image_id, label, score, tp, fp) in tf_pos: + true_pos[label].append([score, tp]) + false_pos[label].append([score, fp]) + + for (label, label_pos_num) in label_count.items(): + if label_pos_num == 0 or label not in true_pos: + continue + + label_true_pos = true_pos[label] + label_false_pos = false_pos[label] + + accu_tp_sum = get_accumulation(label_true_pos) + accu_fp_sum = get_accumulation(label_false_pos) + + precision = [] + recall = [] + + for i in range(len(accu_tp_sum)): + precision.append( + float(accu_tp_sum[i]) / + float(accu_tp_sum[i] + accu_fp_sum[i])) + recall.append(float(accu_tp_sum[i]) / label_pos_num) + + if self.ap_type == "11point": + max_precisions = [11.0, 0.0] + start_idx = len(accu_tp_sum) - 1 + for j in range(10, 0, -1): + for i in range(start_idx, 0, -1): + if recall[i] < j / 10.0: + start_idx = i + if j > 0: + max_precisions[j - 1] = max_precisions[j] + break + else: + if max_precisions[j] < accu_precision[i]: + max_precisions[j] = accu_precision[i] + for j in range(10, 0, -1): + mAP += max_precisions[j] / 11 + count += 1 + elif self.ap_type == "Integral": + average_precisions = 0.0 + prev_recall = 0.0 + for i in range(len(accu_tp_sum)): + if math.fabs(recall[i] - prev_recall) > 1e-6: + average_precisions += precision[i] * \ + math.fabs(recall[i] - prev_recall) + prev_recall = recall[i] + + mAP += average_precisions + count += 1 + + if count != 0: mAP /= count + return mAP * 100.0 + + def setUp(self): + self.op_type = "detection_map" + self.set_data() + + def test_check_output(self): + self.check_output() + + +class TestDetectionMAPOpSkipDiff(TestDetectionMAPOp): + def init_test_case(self): + super(TestDetectionMAPOpSkipDiff, self).init_test_case() + + self.evaluate_difficult = False + + self.tf_pos = [[0, 1, 0.7, 1, 0], [0, 1, 0.3, 0, 1], [1, 1, 0.2, 1, 0], + [1, 2, 0.8, 0, 1], [1, 2, 0.1, 1, 0], [1, 3, 0.2, 0, 1]] + + +if __name__ == '__main__': + unittest.main() -- GitLab From 26f03ea13d14a28c199185aa1fd5feda84d4eb6e Mon Sep 17 00:00:00 2001 From: wanghaox Date: Tue, 30 Jan 2018 19:49:50 +0800 Subject: [PATCH 002/217] update detection_map operator --- paddle/operators/detection_map_op.cc | 73 ++++- paddle/operators/detection_map_op.cu | 20 -- paddle/operators/detection_map_op.h | 249 +++++++++--------- paddle/operators/math/detection_util.cc | 22 -- paddle/operators/math/detection_util.cu | 23 -- paddle/operators/math/detection_util.h | 128 --------- .../v2/fluid/tests/test_detection_map_op.py | 71 ++--- 7 files changed, 231 insertions(+), 355 deletions(-) delete mode 100644 paddle/operators/detection_map_op.cu delete mode 100644 paddle/operators/math/detection_util.cc delete mode 100644 paddle/operators/math/detection_util.cu delete mode 100644 paddle/operators/math/detection_util.h diff --git a/paddle/operators/detection_map_op.cc b/paddle/operators/detection_map_op.cc index b59d3bfad9..aa47cb3c80 100644 --- a/paddle/operators/detection_map_op.cc +++ b/paddle/operators/detection_map_op.cc @@ -24,6 +24,29 @@ class DetectionMAPOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Detection"), + "Input(Detection) of DetectionMAPOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Label"), + "Input(Label) of DetectionMAPOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("MAP"), + "Output(MAP) of DetectionMAPOp should not be null."); + + auto det_dims = ctx->GetInputDim("Detection"); + PADDLE_ENFORCE_EQ(det_dims.size(), 2UL, + "The rank of Input(Detection) must be 2, " + "the shape is [N, 6]."); + PADDLE_ENFORCE_EQ(det_dims[1], 6UL, + "The shape is of Input(Detection) [N, 6]."); + auto label_dims = ctx->GetInputDim("Label"); + PADDLE_ENFORCE_EQ(label_dims.size(), 2UL, + "The rank of Input(Label) must be 2, " + "the shape is [N, 6]."); + PADDLE_ENFORCE_EQ(label_dims[1], 6UL, + "The shape is of Input(Label) [N, 6]."); + + auto ap_type = GetAPType(ctx->Attrs().Get("ap_type")); + PADDLE_ENFORCE_NE(ap_type, APType::kNone, + "The ap_type should be 'integral' or '11point."); auto map_dim = framework::make_ddim({1}); ctx->SetOutputDim("MAP", map_dim); } @@ -42,25 +65,49 @@ class DetectionMAPOpMaker : public framework::OpProtoAndCheckerMaker { DetectionMAPOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("Detect", "The detection output."); - AddInput("Label", "The label data."); - AddOutput("MAP", "The MAP evaluate result of the detection."); - - AddAttr("overlap_threshold", "The overlap threshold.") + AddInput("Label", + "(LoDTensor) A 2-D LoDTensor with shape[N, 6] represents the" + "Labeled ground-truth data. Each row has 6 values: " + "[label, is_difficult, xmin, ymin, xmax, ymax], N is the total " + "number of ground-truth data in this mini-batch. For each " + "instance, the offsets in first dimension are called LoD, " + "the number of offset is N + 1, if LoD[i + 1] - LoD[i] == 0, " + "means there is no ground-truth data."); + AddInput("Detection", + "(LoDTensor) A 2-D LoDTensor with shape [M, 6] represents the " + "detections. Each row has 6 values: " + "[label, confidence, xmin, ymin, xmax, ymax], M is the total " + "number of detections in this mini-batch. For each instance, " + "the offsets in first dimension are called LoD, the number of " + "offset is N + 1, if LoD[i + 1] - LoD[i] == 0, means there is " + "no detected data."); + AddOutput("MAP", + "(Tensor) A tensor with shape [1], store the mAP evaluate " + "result of the detection."); + + AddAttr("overlap_threshold", + "(float) " + "The jaccard overlap threshold of detection output and " + "ground-truth data.") .SetDefault(.3f); AddAttr("evaluate_difficult", + "(bool, default true) " "Switch to control whether the difficult data is evaluated.") .SetDefault(true); AddAttr("ap_type", - "The AP algorithm type, 'Integral' or '11point'.") - .SetDefault("Integral"); - + "(string, default 'integral') " + "The AP algorithm type, 'integral' or '11point'.") + .SetDefault("integral") + .InEnum({"integral", "11point"}); AddComment(R"DOC( -Detection MAP Operator. - -Detection MAP evaluator for SSD(Single Shot MultiBox Detector) algorithm. -Please get more information from the following papers: -https://arxiv.org/abs/1512.02325. +Detection mAP evaluate operator. +The general steps are as follows. First, calculate the true positive and + false positive according to the input of detection and labels, then + calculate the mAP evaluate value. + Supporting '11 point' and 'integral' mAP algorithm. Please get more information + from the following articles: + https://sanchom.wordpress.com/tag/average-precision/ + https://arxiv.org/abs/1512.02325 )DOC"); } diff --git a/paddle/operators/detection_map_op.cu b/paddle/operators/detection_map_op.cu deleted file mode 100644 index ab9a992c36..0000000000 --- a/paddle/operators/detection_map_op.cu +++ /dev/null @@ -1,20 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/operators/detection_map_op.h" - -namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL( - detection_map, ops::DetectionMAPOpKernel, - ops::DetectionMAPOpKernel); diff --git a/paddle/operators/detection_map_op.h b/paddle/operators/detection_map_op.h index 3e862abda6..d29a6968e4 100644 --- a/paddle/operators/detection_map_op.h +++ b/paddle/operators/detection_map_op.h @@ -13,22 +13,37 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include "paddle/framework/eigen.h" #include "paddle/framework/op_registry.h" -#include "paddle/operators/math/detection_util.h" -#include "paddle/operators/math/math_function.h" namespace paddle { namespace operators { +enum APType { kNone = 0, kIntegral, k11point }; + +APType GetAPType(std::string str) { + if (str == "integral") { + return APType::kIntegral; + } else if (str == "11point") { + return APType::k11point; + } else { + return APType::kNone; + } +} + +template +inline bool SortScorePairDescend(const std::pair& pair1, + const std::pair& pair2) { + return pair1.first > pair2.first; +} + template inline void GetAccumulation(std::vector> in_pairs, std::vector* accu_vec) { - std::stable_sort(in_pairs.begin(), in_pairs.end(), - math::SortScorePairDescend); + std::stable_sort(in_pairs.begin(), in_pairs.end(), SortScorePairDescend); accu_vec->clear(); size_t sum = 0; for (size_t i = 0; i < in_pairs.size(); ++i) { - // auto score = in_pairs[i].first; auto count = in_pairs[i].second; sum += count; accu_vec->push_back(sum); @@ -39,126 +54,125 @@ template class DetectionMAPOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - auto* input_label = ctx.Input("Label"); - auto* input_detect = ctx.Input("Detect"); - auto* map_out = ctx.Output("MAP"); + auto* in_detect = ctx.Input("Detection"); + auto* in_label = ctx.Input("Label"); + auto* out_map = ctx.Output("MAP"); float overlap_threshold = ctx.Attr("overlap_threshold"); float evaluate_difficult = ctx.Attr("evaluate_difficult"); - std::string ap_type = ctx.Attr("ap_type"); + auto ap_type = GetAPType(ctx.Attr("ap_type")); - auto label_lod = input_label->lod(); + auto label_lod = in_label->lod(); + auto detect_lod = in_detect->lod(); PADDLE_ENFORCE_EQ(label_lod.size(), 1UL, "Only support one level sequence now."); - auto batch_size = label_lod[0].size() - 1; - - std::vector>>> gt_bboxes; - - std::vector< - std::map>>>> - detect_bboxes; - - if (platform::is_gpu_place(ctx.GetPlace())) { - framework::LoDTensor input_label_cpu; - framework::Tensor input_detect_cpu; - input_label_cpu.set_lod(input_label->lod()); - input_label_cpu.Resize(input_label->dims()); - input_detect_cpu.Resize(input_detect->dims()); - input_label_cpu.mutable_data(platform::CPUPlace()); - input_detect_cpu.mutable_data(platform::CPUPlace()); - framework::CopyFrom(*input_label, platform::CPUPlace(), - ctx.device_context(), &input_label_cpu); - framework::CopyFrom(*input_detect, platform::CPUPlace(), - ctx.device_context(), &input_detect_cpu); - GetBBoxes(input_label_cpu, input_detect_cpu, gt_bboxes, detect_bboxes); - } else { - GetBBoxes(*input_label, *input_detect, gt_bboxes, detect_bboxes); - } + PADDLE_ENFORCE_EQ(label_lod[0].size(), detect_lod[0].size(), + "The batch_size of input(Label) and input(Detection) " + "must be the same."); + + std::vector>> gt_boxes; + std::vector>>> detect_boxes; + + GetBoxes(*in_label, *in_detect, gt_boxes, detect_boxes); std::map label_pos_count; std::map>> true_pos; std::map>> false_pos; - CalcTrueAndFalsePositive(batch_size, evaluate_difficult, overlap_threshold, - gt_bboxes, detect_bboxes, label_pos_count, - true_pos, false_pos); + CalcTrueAndFalsePositive(gt_boxes, detect_boxes, evaluate_difficult, + overlap_threshold, label_pos_count, true_pos, + false_pos); T map = CalcMAP(ap_type, label_pos_count, true_pos, false_pos); - T* map_data = nullptr; - framework::Tensor map_cpu; - map_out->mutable_data(ctx.GetPlace()); - if (platform::is_gpu_place(ctx.GetPlace())) { - map_data = map_cpu.mutable_data(map_out->dims(), platform::CPUPlace()); - map_data[0] = map; - framework::CopyFrom(map_cpu, platform::CPUPlace(), ctx.device_context(), - map_out); + T* map_data = out_map->mutable_data(ctx.GetPlace()); + map_data[0] = map; + } + + protected: + struct Box { + Box(T xmin, T ymin, T xmax, T ymax) + : xmin(xmin), ymin(ymin), xmax(xmax), ymax(ymax), is_difficult(false) {} + + T xmin, ymin, xmax, ymax; + bool is_difficult; + }; + + inline T JaccardOverlap(const Box& box1, const Box& box2) const { + if (box2.xmin > box1.xmax || box2.xmax < box1.xmin || + box2.ymin > box1.ymax || box2.ymax < box1.ymin) { + return 0.0; } else { - map_data = map_out->mutable_data(ctx.GetPlace()); - map_data[0] = map; + T inter_xmin = std::max(box1.xmin, box2.xmin); + T inter_ymin = std::max(box1.ymin, box2.ymin); + T inter_xmax = std::min(box1.xmax, box2.xmax); + T inter_ymax = std::min(box1.ymax, box2.ymax); + + T inter_width = inter_xmax - inter_xmin; + T inter_height = inter_ymax - inter_ymin; + T inter_area = inter_width * inter_height; + + T bbox_area1 = (box1.xmax - box1.xmin) * (box1.ymax - box1.ymin); + T bbox_area2 = (box2.xmax - box2.xmin) * (box2.ymax - box2.ymin); + + return inter_area / (bbox_area1 + bbox_area2 - inter_area); } } - protected: - void GetBBoxes( - const framework::LoDTensor& input_label, - const framework::Tensor& input_detect, - std::vector>>>& - gt_bboxes, - std::vector< - std::map>>>>& - detect_bboxes) const { - const T* label_data = input_label.data(); - const T* detect_data = input_detect.data(); + void GetBoxes(const framework::LoDTensor& input_label, + const framework::LoDTensor& input_detect, + std::vector>>& gt_boxes, + std::vector>>>& + detect_boxes) const { + auto labels = framework::EigenTensor::From(input_label); + auto detect = framework::EigenTensor::From(input_detect); auto label_lod = input_label.lod(); - auto batch_size = label_lod[0].size() - 1; + auto detect_lod = input_detect.lod(); + + int batch_size = label_lod[0].size() - 1; auto label_index = label_lod[0]; - for (size_t n = 0; n < batch_size; ++n) { - std::map>> bboxes; + for (int n = 0; n < batch_size; ++n) { + std::map> boxes; for (int i = label_index[n]; i < label_index[n + 1]; ++i) { - std::vector> bbox; - math::GetBBoxFromLabelData(label_data + i * 6, 1, bbox); - int label = static_cast(label_data[i * 6]); - bboxes[label].push_back(bbox[0]); + Box box(labels(i, 2), labels(i, 3), labels(i, 4), labels(i, 5)); + int label = labels(i, 0); + auto is_difficult = labels(i, 1); + if (std::abs(is_difficult - 0.0) < 1e-6) + box.is_difficult = false; + else + box.is_difficult = true; + boxes[label].push_back(box); } - gt_bboxes.push_back(bboxes); + gt_boxes.push_back(boxes); } - size_t n = 0; - size_t detect_box_count = input_detect.dims()[0]; - for (size_t img_id = 0; img_id < batch_size; ++img_id) { - std::map>>> bboxes; - size_t cur_img_id = static_cast((detect_data + n * 7)[0]); - while (cur_img_id == img_id && n < detect_box_count) { - std::vector label; - std::vector score; - std::vector> bbox; - math::GetBBoxFromDetectData(detect_data + n * 7, 1, label, score, - bbox); - bboxes[label[0]].push_back(std::make_pair(score[0], bbox[0])); - ++n; - cur_img_id = static_cast((detect_data + n * 7)[0]); + auto detect_index = detect_lod[0]; + for (int n = 0; n < batch_size; ++n) { + std::map>> boxes; + for (int i = detect_index[n]; i < detect_index[n + 1]; ++i) { + Box box(detect(i, 2), detect(i, 3), detect(i, 4), detect(i, 5)); + int label = detect(i, 0); + auto score = detect(i, 1); + boxes[label].push_back(std::make_pair(score, box)); } - detect_bboxes.push_back(bboxes); + detect_boxes.push_back(boxes); } } void CalcTrueAndFalsePositive( - size_t batch_size, bool evaluate_difficult, float overlap_threshold, - const std::vector>>>& - gt_bboxes, - const std::vector< - std::map>>>>& - detect_bboxes, + const std::vector>>& gt_boxes, + const std::vector>>>& + detect_boxes, + bool evaluate_difficult, float overlap_threshold, std::map& label_pos_count, std::map>>& true_pos, std::map>>& false_pos) const { - for (size_t n = 0; n < batch_size; ++n) { - auto image_gt_bboxes = gt_bboxes[n]; - for (auto it = image_gt_bboxes.begin(); it != image_gt_bboxes.end(); - ++it) { + int batch_size = gt_boxes.size(); + for (int n = 0; n < batch_size; ++n) { + auto image_gt_boxes = gt_boxes[n]; + for (auto it = image_gt_boxes.begin(); it != image_gt_boxes.end(); ++it) { size_t count = 0; auto labeled_bboxes = it->second; if (evaluate_difficult) { @@ -179,16 +193,16 @@ class DetectionMAPOpKernel : public framework::OpKernel { } } - for (size_t n = 0; n < detect_bboxes.size(); ++n) { - auto image_gt_bboxes = gt_bboxes[n]; - auto detections = detect_bboxes[n]; + for (size_t n = 0; n < detect_boxes.size(); ++n) { + auto image_gt_boxes = gt_boxes[n]; + auto detections = detect_boxes[n]; - if (image_gt_bboxes.size() == 0) { + if (image_gt_boxes.size() == 0) { for (auto it = detections.begin(); it != detections.end(); ++it) { - auto pred_bboxes = it->second; + auto pred_boxes = it->second; int label = it->first; - for (size_t i = 0; i < pred_bboxes.size(); ++i) { - auto score = pred_bboxes[i].first; + for (size_t i = 0; i < pred_boxes.size(); ++i) { + auto score = pred_boxes[i].first; true_pos[label].push_back(std::make_pair(score, 0)); false_pos[label].push_back(std::make_pair(score, 1)); } @@ -198,28 +212,27 @@ class DetectionMAPOpKernel : public framework::OpKernel { for (auto it = detections.begin(); it != detections.end(); ++it) { int label = it->first; - auto pred_bboxes = it->second; - if (image_gt_bboxes.find(label) == image_gt_bboxes.end()) { - for (size_t i = 0; i < pred_bboxes.size(); ++i) { - auto score = pred_bboxes[i].first; + auto pred_boxes = it->second; + if (image_gt_boxes.find(label) == image_gt_boxes.end()) { + for (size_t i = 0; i < pred_boxes.size(); ++i) { + auto score = pred_boxes[i].first; true_pos[label].push_back(std::make_pair(score, 0)); false_pos[label].push_back(std::make_pair(score, 1)); } continue; } - auto matched_bboxes = image_gt_bboxes.find(label)->second; + auto matched_bboxes = image_gt_boxes.find(label)->second; std::vector visited(matched_bboxes.size(), false); // Sort detections in descend order based on scores - std::sort(pred_bboxes.begin(), pred_bboxes.end(), - math::SortScorePairDescend>); - for (size_t i = 0; i < pred_bboxes.size(); ++i) { - float max_overlap = -1.0; + std::sort(pred_boxes.begin(), pred_boxes.end(), + SortScorePairDescend); + for (size_t i = 0; i < pred_boxes.size(); ++i) { + T max_overlap = -1.0; size_t max_idx = 0; - auto score = pred_bboxes[i].first; + auto score = pred_boxes[i].first; for (size_t j = 0; j < matched_bboxes.size(); ++j) { - float overlap = - JaccardOverlap(pred_bboxes[i].second, matched_bboxes[j]); + T overlap = JaccardOverlap(pred_boxes[i].second, matched_bboxes[j]); if (overlap > max_overlap) { max_overlap = overlap; max_idx = j; @@ -249,7 +262,7 @@ class DetectionMAPOpKernel : public framework::OpKernel { } T CalcMAP( - std::string ap_type, const std::map& label_pos_count, + APType ap_type, const std::map& label_pos_count, const std::map>>& true_pos, const std::map>>& false_pos) const { T mAP = 0.0; @@ -266,18 +279,18 @@ class DetectionMAPOpKernel : public framework::OpKernel { GetAccumulation(label_true_pos, &tp_sum); std::vector fp_sum; GetAccumulation(label_false_pos, &fp_sum); - std::vector precision, recall; + std::vector precision, recall; size_t num = tp_sum.size(); // Compute Precision. for (size_t i = 0; i < num; ++i) { // CHECK_LE(tpCumSum[i], labelNumPos); - precision.push_back(static_cast(tp_sum[i]) / - static_cast(tp_sum[i] + fp_sum[i])); - recall.push_back(static_cast(tp_sum[i]) / label_num_pos); + precision.push_back(static_cast(tp_sum[i]) / + static_cast(tp_sum[i] + fp_sum[i])); + recall.push_back(static_cast(tp_sum[i]) / label_num_pos); } // VOC2007 style - if (ap_type == "11point") { - std::vector max_precisions(11, 0.0); + if (ap_type == APType::k11point) { + std::vector max_precisions(11, 0.0); int start_idx = num - 1; for (int j = 10; j >= 0; --j) for (int i = start_idx; i >= 0; --i) { @@ -292,7 +305,7 @@ class DetectionMAPOpKernel : public framework::OpKernel { } for (int j = 10; j >= 0; --j) mAP += max_precisions[j] / 11; ++count; - } else if (ap_type == "Integral") { + } else if (ap_type == APType::kIntegral) { // Nature integral float average_precisions = 0.; float prev_recall = 0.; diff --git a/paddle/operators/math/detection_util.cc b/paddle/operators/math/detection_util.cc deleted file mode 100644 index 4131a0cb0e..0000000000 --- a/paddle/operators/math/detection_util.cc +++ /dev/null @@ -1,22 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/operators/math/detection_util.h" -#include "paddle/operators/math/math_function.h" - -namespace paddle { -namespace operators { -namespace math {} // namespace math -} // namespace operators -} // namespace paddle diff --git a/paddle/operators/math/detection_util.cu b/paddle/operators/math/detection_util.cu deleted file mode 100644 index d2bb992396..0000000000 --- a/paddle/operators/math/detection_util.cu +++ /dev/null @@ -1,23 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/operators/math/detection_util.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/platform/cuda_helper.h" - -namespace paddle { -namespace operators { -namespace math {} // namespace math -} // namespace operators -} // namespace paddle diff --git a/paddle/operators/math/detection_util.h b/paddle/operators/math/detection_util.h deleted file mode 100644 index 2a4dadc545..0000000000 --- a/paddle/operators/math/detection_util.h +++ /dev/null @@ -1,128 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ -#pragma once -#include "paddle/framework/selected_rows.h" -#include "paddle/platform/device_context.h" - -namespace paddle { -namespace operators { -namespace math { - -template -struct BBox { - BBox(T x_min, T y_min, T x_max, T y_max) - : x_min(x_min), - y_min(y_min), - x_max(x_max), - y_max(y_max), - is_difficult(false) {} - - BBox() {} - - T get_width() const { return x_max - x_min; } - - T get_height() const { return y_max - y_min; } - - T get_center_x() const { return (x_min + x_max) / 2; } - - T get_center_y() const { return (y_min + y_max) / 2; } - - T get_area() const { return get_width() * get_height(); } - - // coordinate of bounding box - T x_min; - T y_min; - T x_max; - T y_max; - // whether difficult object (e.g. object with heavy occlusion is difficult) - bool is_difficult; -}; - -template -void GetBBoxFromDetectData(const T* detect_data, const size_t num_bboxes, - std::vector& labels, std::vector& scores, - std::vector>& bboxes) { - size_t out_offset = bboxes.size(); - labels.resize(out_offset + num_bboxes); - scores.resize(out_offset + num_bboxes); - bboxes.resize(out_offset + num_bboxes); - for (size_t i = 0; i < num_bboxes; ++i) { - labels[out_offset + i] = *(detect_data + i * 7 + 1); - scores[out_offset + i] = *(detect_data + i * 7 + 2); - BBox bbox; - bbox.x_min = *(detect_data + i * 7 + 3); - bbox.y_min = *(detect_data + i * 7 + 4); - bbox.x_max = *(detect_data + i * 7 + 5); - bbox.y_max = *(detect_data + i * 7 + 6); - bboxes[out_offset + i] = bbox; - }; -} - -template -void GetBBoxFromLabelData(const T* label_data, const size_t num_bboxes, - std::vector>& bboxes) { - size_t out_offset = bboxes.size(); - bboxes.resize(bboxes.size() + num_bboxes); - for (size_t i = 0; i < num_bboxes; ++i) { - BBox bbox; - bbox.x_min = *(label_data + i * 6 + 1); - bbox.y_min = *(label_data + i * 6 + 2); - bbox.x_max = *(label_data + i * 6 + 3); - bbox.y_max = *(label_data + i * 6 + 4); - T is_difficult = *(label_data + i * 6 + 5); - if (std::abs(is_difficult - 0.0) < 1e-6) - bbox.is_difficult = false; - else - bbox.is_difficult = true; - bboxes[out_offset + i] = bbox; - } -} - -template -inline float JaccardOverlap(const BBox& bbox1, const BBox& bbox2) { - if (bbox2.x_min > bbox1.x_max || bbox2.x_max < bbox1.x_min || - bbox2.y_min > bbox1.y_max || bbox2.y_max < bbox1.y_min) { - return 0.0; - } else { - float inter_x_min = std::max(bbox1.x_min, bbox2.x_min); - float inter_y_min = std::max(bbox1.y_min, bbox2.y_min); - float inter_x_max = std::min(bbox1.x_max, bbox2.x_max); - float inter_y_max = std::min(bbox1.y_max, bbox2.y_max); - - float inter_width = inter_x_max - inter_x_min; - float inter_height = inter_y_max - inter_y_min; - float inter_area = inter_width * inter_height; - - float bbox_area1 = bbox1.get_area(); - float bbox_area2 = bbox2.get_area(); - - return inter_area / (bbox_area1 + bbox_area2 - inter_area); - } -} - -template -bool SortScorePairDescend(const std::pair& pair1, - const std::pair& pair2) { - return pair1.first > pair2.first; -} - -// template <> -// bool SortScorePairDescend(const std::pair& pair1, -// const std::pair& pair2) { -// return pair1.first > pair2.first; -// } - -} // namespace math -} // namespace operators -} // namespace paddle diff --git a/python/paddle/v2/fluid/tests/test_detection_map_op.py b/python/paddle/v2/fluid/tests/test_detection_map_op.py index 50ce3afbb9..bb545031ae 100644 --- a/python/paddle/v2/fluid/tests/test_detection_map_op.py +++ b/python/paddle/v2/fluid/tests/test_detection_map_op.py @@ -10,14 +10,14 @@ class TestDetectionMAPOp(OpTest): def set_data(self): self.init_test_case() - self.mAP = [self.calc_map(self.tf_pos)] + self.mAP = [self.calc_map(self.tf_pos, self.tf_pos_lod)] self.label = np.array(self.label).astype('float32') self.detect = np.array(self.detect).astype('float32') self.mAP = np.array(self.mAP).astype('float32') self.inputs = { 'Label': (self.label, self.label_lod), - 'Detect': self.detect + 'Detection': (self.detect, self.detect_lod) } self.attrs = { @@ -31,29 +31,29 @@ class TestDetectionMAPOp(OpTest): def init_test_case(self): self.overlap_threshold = 0.3 self.evaluate_difficult = True - self.ap_type = "Integral" + self.ap_type = "integral" self.label_lod = [[0, 2, 4]] - # label xmin ymin xmax ymax difficult - self.label = [[1, 0.1, 0.1, 0.3, 0.3, 0], [1, 0.6, 0.6, 0.8, 0.8, 1], - [2, 0.3, 0.3, 0.6, 0.5, 0], [1, 0.7, 0.1, 0.9, 0.3, 0]] + # label difficult xmin ymin xmax ymax + self.label = [[1, 0, 0.1, 0.1, 0.3, 0.3], [1, 1, 0.6, 0.6, 0.8, 0.8], + [2, 0, 0.3, 0.3, 0.6, 0.5], [1, 0, 0.7, 0.1, 0.9, 0.3]] - # image_id label score xmin ymin xmax ymax difficult + # label score xmin ymin xmax ymax difficult + self.detect_lod = [[0, 3, 7]] self.detect = [ - [0, 1, 0.3, 0.1, 0.0, 0.4, 0.3], [0, 1, 0.7, 0.0, 0.1, 0.2, 0.3], - [0, 1, 0.9, 0.7, 0.6, 0.8, 0.8], [1, 2, 0.8, 0.2, 0.1, 0.4, 0.4], - [1, 2, 0.1, 0.4, 0.3, 0.7, 0.5], [1, 1, 0.2, 0.8, 0.1, 1.0, 0.3], - [1, 3, 0.2, 0.8, 0.1, 1.0, 0.3] + [1, 0.3, 0.1, 0.0, 0.4, 0.3], [1, 0.7, 0.0, 0.1, 0.2, 0.3], + [1, 0.9, 0.7, 0.6, 0.8, 0.8], [2, 0.8, 0.2, 0.1, 0.4, 0.4], + [2, 0.1, 0.4, 0.3, 0.7, 0.5], [1, 0.2, 0.8, 0.1, 1.0, 0.3], + [3, 0.2, 0.8, 0.1, 1.0, 0.3] ] - # image_id label score false_pos false_pos - # [-1, 1, 3, -1, -1], - # [-1, 2, 1, -1, -1] - self.tf_pos = [[0, 1, 0.9, 1, 0], [0, 1, 0.7, 1, 0], [0, 1, 0.3, 0, 1], - [1, 1, 0.2, 1, 0], [1, 2, 0.8, 0, 1], [1, 2, 0.1, 1, 0], - [1, 3, 0.2, 0, 1]] + # label score true_pos false_pos + self.tf_pos_lod = [[0, 3, 7]] + self.tf_pos = [[1, 0.9, 1, 0], [1, 0.7, 1, 0], [1, 0.3, 0, 1], + [1, 0.2, 1, 0], [2, 0.8, 0, 1], [2, 0.1, 1, 0], + [3, 0.2, 0, 1]] - def calc_map(self, tf_pos): + def calc_map(self, tf_pos, tf_pos_lod): mAP = 0.0 count = 0 @@ -71,7 +71,7 @@ class TestDetectionMAPOp(OpTest): return accu_list label_count = collections.Counter() - for (label, xmin, ymin, xmax, ymax, difficult) in self.label: + for (label, difficult, xmin, ymin, xmax, ymax) in self.label: if self.evaluate_difficult: label_count[label] += 1 elif not difficult: @@ -79,7 +79,7 @@ class TestDetectionMAPOp(OpTest): true_pos = collections.defaultdict(list) false_pos = collections.defaultdict(list) - for (image_id, label, score, tp, fp) in tf_pos: + for (label, score, tp, fp) in tf_pos: true_pos[label].append([score, tp]) false_pos[label].append([score, fp]) @@ -103,22 +103,22 @@ class TestDetectionMAPOp(OpTest): recall.append(float(accu_tp_sum[i]) / label_pos_num) if self.ap_type == "11point": - max_precisions = [11.0, 0.0] + max_precisions = [0.0] * 11 start_idx = len(accu_tp_sum) - 1 - for j in range(10, 0, -1): - for i in range(start_idx, 0, -1): - if recall[i] < j / 10.0: + for j in range(10, -1, -1): + for i in range(start_idx, -1, -1): + if recall[i] < float(j) / 10.0: start_idx = i if j > 0: max_precisions[j - 1] = max_precisions[j] break - else: - if max_precisions[j] < accu_precision[i]: - max_precisions[j] = accu_precision[i] - for j in range(10, 0, -1): + else: + if max_precisions[j] < precision[i]: + max_precisions[j] = precision[i] + for j in range(10, -1, -1): mAP += max_precisions[j] / 11 count += 1 - elif self.ap_type == "Integral": + elif self.ap_type == "integral": average_precisions = 0.0 prev_recall = 0.0 for i in range(len(accu_tp_sum)): @@ -147,8 +147,17 @@ class TestDetectionMAPOpSkipDiff(TestDetectionMAPOp): self.evaluate_difficult = False - self.tf_pos = [[0, 1, 0.7, 1, 0], [0, 1, 0.3, 0, 1], [1, 1, 0.2, 1, 0], - [1, 2, 0.8, 0, 1], [1, 2, 0.1, 1, 0], [1, 3, 0.2, 0, 1]] + self.tf_pos_lod = [[0, 2, 6]] + # label score true_pos false_pos + self.tf_pos = [[1, 0.7, 1, 0], [1, 0.3, 0, 1], [1, 0.2, 1, 0], + [2, 0.8, 0, 1], [2, 0.1, 1, 0], [3, 0.2, 0, 1]] + + +class TestDetectionMAPOp11Point(TestDetectionMAPOp): + def init_test_case(self): + super(TestDetectionMAPOp11Point, self).init_test_case() + + self.ap_type = "11point" if __name__ == '__main__': -- GitLab From c32040c3f2402b86580d3a5a8280175a276bd5aa Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Thu, 1 Feb 2018 21:08:59 +0800 Subject: [PATCH 003/217] WIP: remove fan_in --- paddle/operators/listen_and_serv_op.cc | 50 +++----------- .../paddle/v2/fluid/distribute_transpiler.py | 65 ++++++++++++++----- python/paddle/v2/fluid/framework.py | 35 ++++++++++ .../notest_recognize_digits_conv_dist.py | 13 ++-- 4 files changed, 101 insertions(+), 62 deletions(-) diff --git a/paddle/operators/listen_and_serv_op.cc b/paddle/operators/listen_and_serv_op.cc index 099f6b2373..5814a7bee9 100644 --- a/paddle/operators/listen_and_serv_op.cc +++ b/paddle/operators/listen_and_serv_op.cc @@ -75,13 +75,6 @@ class ListenAndServOp : public framework::OperatorBase { server_thread_->join(); } - std::string GetGradVarNameForTrainer(const std::string &varname) const { - if (grads_counter_.find(varname) == grads_counter_.end()) { - grads_counter_[varname] = 0; - } - return string::Sprintf("%s.trainer_%d", varname, grads_counter_[varname]++); - } - void Run(const framework::Scope &scope, const platform::Place &dev_place) const override { platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); @@ -91,9 +84,8 @@ class ListenAndServOp : public framework::OperatorBase { // FIXME(Yancey1989): initialize rpc server with lazy mode. rpc_service_->SetScope(&recv_scope); rpc_service_->SetDevCtx(&dev_ctx); - auto param_list = Attr>("ParamList"); - auto grad_list = Attr>("GradList"); - auto fan_in = Attr("Fanin"); + auto ins = Inputs("X"); + auto fan_in = ins.size(); auto *block = Attr(kOptimizeBlock); auto *program = block->Program(); @@ -109,35 +101,21 @@ class ListenAndServOp : public framework::OperatorBase { int batch_barrier = 0; while (batch_barrier != fan_in) { const detail::MessageWithName &v = rpc_service_->Get(); - auto grad_var_name = v.first; - if (grad_var_name == LISTEN_TERMINATE_MESSAGE) { + auto recv_var_name = v.first; + if (recv_var_name == LISTEN_TERMINATE_MESSAGE) { LOG(INFO) << "received terminate message and exit"; exit_flag = true; break; - } else if (grad_var_name == BATCH_BARRIER_MESSAGE) { + } else if (recv_var_name == BATCH_BARRIER_MESSAGE) { VLOG(3) << "recv batch barrier message"; batch_barrier++; continue; } else { - // receive a variable + VLOG(3) << "received grad: " << recv_var_name; recv_var_cnt++; - auto it = - std::find(grad_list.begin(), grad_list.end(), grad_var_name); - std::string param_var_name; - if (it != grad_list.end()) { - param_var_name = param_list[it - grad_list.begin()]; - } else { - LOG(ERROR) << "grad has no paired param:" << grad_var_name; - } - VLOG(3) << "received grad: " << grad_var_name - << " updating param: " << param_var_name; - - if (fan_in > 1) { - grad_var_name = this->GetGradVarNameForTrainer(grad_var_name); - } - auto *var = recv_scope.FindVar(grad_var_name); + auto *var = recv_scope.FindVar(recv_var_name); if (var == nullptr) { - LOG(ERROR) << "Can not find server side var: " << grad_var_name; + LOG(ERROR) << "Can not find server side var: " << recv_var_name; PADDLE_THROW("Can not find server side var"); } detail::DeserializeFromMessage(v.second, dev_ctx, var); @@ -171,6 +149,7 @@ class ListenAndServOpMaker : public framework::OpProtoAndCheckerMaker { public: ListenAndServOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "(Tensor) Variables that server recv.").AsDuplicable(); AddComment(R"DOC( ListenAndServ operator @@ -184,17 +163,6 @@ from send_op and send back variables to recv_op. .AddCustomChecker([](const std::string &ip) { return !ip.empty(); }); AddAttr(kOptimizeBlock, "BlockID to run on server side."); - AddAttr>( - "ParamList", "type list of string", - "grad->param name mapping to find which parameters to optimize.") - .SetDefault({}); - AddAttr>( - "GradList", "type list of string", - "grad->param name mapping to find which parameters to optimize.") - .SetDefault({}); - AddAttr("Fanin", "type int", - "Number of trainers in the current cluster job") - .SetDefault(1); } }; diff --git a/python/paddle/v2/fluid/distribute_transpiler.py b/python/paddle/v2/fluid/distribute_transpiler.py index 121b407cae..4533405e46 100644 --- a/python/paddle/v2/fluid/distribute_transpiler.py +++ b/python/paddle/v2/fluid/distribute_transpiler.py @@ -82,6 +82,7 @@ class DistributeTranspiler: def transpile(self, optimize_ops, params_grads, + trainer_id, program=None, pservers="127.0.0.1:6174", trainers=1, @@ -98,10 +99,19 @@ class DistributeTranspiler: :param optimize_ops: op list of optimization, should be the return value of Optimizer.minimize :type optimize_ops: list + :param params_grads: list of tuple(weight, gradient) + :type params_grads: list + :param trainer_id: one unique id for each trainer in a job. + :type trainer_id: int :param program: program to optimize, default is default_main_program + :type program: Program :param pservers: parameter server endpoints like "m1:6174,m2:6174" :type pservers: string - :return: return a list of programs + :param trainers: total number of workers/trainers in the job + :type trainers: int + :param split_method: A function to determin how to split variables + to different servers equally. + :type split_method: function """ assert (callable(split_method)) if program is None: @@ -109,6 +119,11 @@ class DistributeTranspiler: self.program = program self.trainers = trainers self.optimize_ops = optimize_ops + # TODO(typhoonzero): currently trainer_id is fetched from cluster system + # like Kubernetes, we should port this to use etcd later when developing + # fluid distributed training with fault-tolerance. + self.trainer_id = trainer_id + # steps to transpile: # 1. split variable to multiple blocks, aligned by product(dim[1:]) (width). # 2. modify trainer program add split_op to each Grad. @@ -189,10 +204,17 @@ class DistributeTranspiler: block_map[varname].append((long(offset), long(size))) for varname, splited in block_map.iteritems(): orig_var = program.global_block().vars[varname] - var_mapping[varname] = [] + if len(splited) == 1: - var_mapping[varname] = [orig_var] + # rename var to the trainer_id var + new_var_name = "%s.trainer_%d" % \ + (orig_var.name, self.trainer_id) + program.global_block().rename_var(varname, new_var_name) + var_mapping[varname] = \ + [program.global_block().var(new_var_name)] continue + + var_mapping[varname] = [] orig_shape = orig_var.shape orig_dim1_flatten = 1 if len(orig_shape) >= 2: @@ -205,11 +227,13 @@ class DistributeTranspiler: if len(orig_shape) >= 2: splited_shape.extend(orig_shape[1:]) var = program.global_block().create_var( - name="%s.block%d" % (varname, i), + name="%s.block%d.trainer_%d" % + (varname, i, self.trainer_id), psersistable=False, dtype=orig_var.dtype, shape=splited_shape) # flattend splited var var_mapping[varname].append(var) + program.global_block().sync_with_cpp() return var_mapping def _clone_var(self, block, var): @@ -449,6 +473,7 @@ class DistributeTranspiler: """ # step5 pserver_program = Program() + recv_inputs = [] for v in self.param_grad_ep_mapping[endpoint]["params"]: self._clone_var(pserver_program.global_block(), v) for v in self.param_grad_ep_mapping[endpoint]["grads"]: @@ -457,13 +482,19 @@ class DistributeTranspiler: pserver_program.global_block().create_var( name=v.name, persistable=True, dtype=v.dtype, shape=v.shape) for trainer_id in xrange(self.trainers): + # change client side var name to origin name by + # removing ".trainer_%d" suffix + suff_idx = v.name.find(".trainer_") + if suff_idx >= 0: + orig_var_name = v.name[:suff_idx] print("create variable for program: %s.trainer_%d" % - (v.name, trainer_id)) - pserver_program.global_block().create_var( - name="%s.trainer_%d" % (v.name, trainer_id), + (orig_var_name, trainer_id)) + var = pserver_program.global_block().create_var( + name="%s.trainer_%d" % (orig_var_name, trainer_id), persistable=True, dtype=v.dtype, shape=v.shape) + recv_inputs.append(var) # step6 optimize_sub_program = Program() # Iterate through the ops and append ops as needed @@ -481,20 +512,20 @@ class DistributeTranspiler: # Append the listen_and_serv op pserver_program.global_block().append_op( type="listen_and_serv", - inputs={}, + inputs={'X': recv_inputs}, outputs={}, attrs={ "OptimizeBlock": optimize_sub_program.global_block(), "endpoint": endpoint, - "ParamList": [ - p.name - for p in self.param_grad_ep_mapping[endpoint]["params"] - ], - "GradList": [ - p.name - for p in self.param_grad_ep_mapping[endpoint]["grads"] - ], - "Fanin": self.trainers + # "ParamList": [ + # p.name + # for p in self.param_grad_ep_mapping[endpoint]["params"] + # ], + # "GradList": [ + # p.name + # for p in self.param_grad_ep_mapping[endpoint]["grads"] + # ], + # "Fanin": self.trainers }) pserver_program.sync_with_cpp() return pserver_program diff --git a/python/paddle/v2/fluid/framework.py b/python/paddle/v2/fluid/framework.py index ae98e299a4..415960f512 100644 --- a/python/paddle/v2/fluid/framework.py +++ b/python/paddle/v2/fluid/framework.py @@ -282,6 +282,10 @@ class Variable(object): def name(self): return self.desc.name() + @name.setter + def name(self, new_name): + self.desc.set_name(new_name) + @property def shape(self): # convert to tuple, make it as same as numpy API. @@ -530,6 +534,12 @@ class Operator(object): """ return self.desc.input(name) + def rename_input(self, old_name, new_name): + self.desc.rename_input(old_name, new_name) + + def rename_output(self, old_name, new_name): + self.desc.rename_output(old_name, new_name) + @property def input_names(self): """ @@ -539,6 +549,14 @@ class Operator(object): """ return self.desc.input_names() + @property + def input_arg_names(self): + return self.desc.input_arg_names() + + @property + def output_arg_names(self): + return self.desc.output_arg_names() + def output(self, name): """ Get output arguments by the output parameter name @@ -716,6 +734,22 @@ class Block(object): def has_var(self, name): return name in self.vars + def rename_var(self, name, new_name): + """ + Rename variable in vars and ops' inputs and outputs + """ + if not self.has_var(name): + raise ValueError("var %s is not in current" % name) + orig_var = self.var(name) + del self.vars[name] + orig_var.name = new_name + self.vars[new_name] = orig_var + for op in self.ops: + if name in op.input_arg_names: + op.rename_input(name, new_name) + if name in op.output_arg_names: + op.rename_output(name, new_name) + def create_parameter(self, *args, **kwargs): global_block = self.program.global_block() param = Parameter(global_block, *args, **kwargs) @@ -803,6 +837,7 @@ class Block(object): for p in other.iter_parameters(): assert isinstance(p, Parameter) v = self.vars.get(p.name, None) + print("var shape to copy", v) if v is None: raise ValueError("copy_param_info_from should be invoked with " "same topology") diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py b/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py index f18ca05c78..2461ebdf08 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py @@ -58,14 +58,19 @@ trainers = int(os.getenv("TRAINERS")) # total trainer count current_endpoint = os.getenv("SERVER_ENDPOINT") # current pserver endpoint training_role = os.getenv("TRAINING_ROLE", "TRAINER") # get the training role: trainer/pserver +if not current_endpoint: + print("need env SERVER_ENDPOINT") + exit(1) + t = fluid.DistributeTranspiler() t.transpile( - optimize_ops, params_grads, pservers=pserver_endpoints, trainers=trainers) + optimize_ops, + params_grads, + 0, + pservers=pserver_endpoints, + trainers=trainers) if training_role == "PSERVER": - if not current_endpoint: - print("need env SERVER_ENDPOINT") - exit(1) pserver_prog = t.get_pserver_program(current_endpoint) pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) exe.run(pserver_startup) -- GitLab From f2e92c5d1371c49761fa10bbe430f939e8ceea10 Mon Sep 17 00:00:00 2001 From: qiao hai-jun Date: Sat, 3 Feb 2018 17:24:07 +0800 Subject: [PATCH 004/217] Update start_mpi_train.sh --- .../cluster_train_v2/openmpi/start_mpi_train.sh | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/paddle/scripts/cluster_train_v2/openmpi/start_mpi_train.sh b/paddle/scripts/cluster_train_v2/openmpi/start_mpi_train.sh index c645495448..2a7f463627 100644 --- a/paddle/scripts/cluster_train_v2/openmpi/start_mpi_train.sh +++ b/paddle/scripts/cluster_train_v2/openmpi/start_mpi_train.sh @@ -15,10 +15,14 @@ PADDLE_CLUSTER_TRAIN=True env # start pserver -stdbuf -oL nohup paddle pserver --port=$PADDLE_INIT_PORT --ports_num=$PADDLE_INIT_PORTS_NUM \ - --ports_num_for_sparse=$PADDLE_INIT_PORTS_NUM_FOR_SPARSE --nics=$NICS \ +stdbuf -oL nohup paddle pserver \ + --port=$PADDLE_INIT_PORT \ + --ports_num=$PADDLE_INIT_PORTS_NUM \ + --ports_num_for_sparse=$PADDLE_INIT_PORTS_NUM_FOR_SPARSE \ + --nics=$NICS \ --comment=paddle_cluster_pserver \ - --num_gradient_servers=$PADDLE_INIT_NUM_GRADIENT_SERVERS &> logs/pserver.log & + --num_gradient_servers=$PADDLE_INIT_NUM_GRADIENT_SERVERS \ + &> logs/pserver.log & # start trainer # NOTE: train.py will use the above environment variables as configuration -- GitLab From 7ccbdb1b274308c9c11df06d4f8db2d07e491ea9 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Tue, 6 Feb 2018 15:04:37 +0800 Subject: [PATCH 005/217] for test --- paddle/framework/block_desc.cc | 24 +++++++++++++++++++ paddle/framework/block_desc.h | 2 ++ paddle/pybind/protobuf.cc | 6 +++++ .../paddle/v2/fluid/distribute_transpiler.py | 2 +- python/paddle/v2/fluid/framework.py | 14 ++++------- 5 files changed, 37 insertions(+), 11 deletions(-) diff --git a/paddle/framework/block_desc.cc b/paddle/framework/block_desc.cc index dd2ed87252..8579582e7e 100644 --- a/paddle/framework/block_desc.cc +++ b/paddle/framework/block_desc.cc @@ -42,6 +42,30 @@ bool BlockDesc::HasVar(const std::string &name) const { return vars_.find(name) != vars_.end(); } +void BlockDesc::RenameVar(const std::string &old_name, + const std::string &new_name) { + if (this->HasVar(old_name)) { + auto *var = this->Var(old_name); + var->SetName(new_name); + vars_[new_name].reset(var); + vars_.erase(old_name); + // rename inputs and outputs + for (const auto &op : ops_) { + auto *it = op.get(); + for (auto in_name : it->InputArgumentNames()) { + if (in_name == old_name) { + it->RenameInput(old_name, new_name); + } + } + for (auto out_name : it->OutputArgumentNames()) { + if (out_name == old_name) { + it->RenameOutput(old_name, new_name); + } + } + } + } +} + VarDesc *BlockDesc::FindVarRecursive(const std::string &name) const { if (name == kEmptyVarName) return nullptr; diff --git a/paddle/framework/block_desc.h b/paddle/framework/block_desc.h index 4b609e4bcb..e87a543909 100644 --- a/paddle/framework/block_desc.h +++ b/paddle/framework/block_desc.h @@ -55,6 +55,8 @@ class BlockDesc { bool HasVar(const std::string &var_name) const; + void RenameVar(const std::string &old_name, const std::string &new_name); + VarDesc *FindVarRecursive(const std::string &name_bytes) const; VarDesc &FindRecursiveOrCreateVar(const std::string &name_bytes); diff --git a/paddle/pybind/protobuf.cc b/paddle/pybind/protobuf.cc index 371d6119d4..f39dc47262 100644 --- a/paddle/pybind/protobuf.cc +++ b/paddle/pybind/protobuf.cc @@ -171,6 +171,12 @@ void BindBlockDesc(py::module &m) { std::string name = byte_name; return self.HasVar(name); }) + .def("rename_var", + [](BlockDesc &self, py::bytes byte_name, py::bytes byte_name_new) { + std::string name = byte_name; + std::string new_name = byte_name_new; + return self.RenameVar(name, new_name); + }) .def("has_var_recursive", [](BlockDesc &self, py::bytes byte_name) { std::string name = byte_name; diff --git a/python/paddle/v2/fluid/distribute_transpiler.py b/python/paddle/v2/fluid/distribute_transpiler.py index 4533405e46..89e467b0bd 100644 --- a/python/paddle/v2/fluid/distribute_transpiler.py +++ b/python/paddle/v2/fluid/distribute_transpiler.py @@ -203,7 +203,7 @@ class DistributeTranspiler: block_map[varname] = [] block_map[varname].append((long(offset), long(size))) for varname, splited in block_map.iteritems(): - orig_var = program.global_block().vars[varname] + orig_var = program.global_block().var(varname) if len(splited) == 1: # rename var to the trainer_id var diff --git a/python/paddle/v2/fluid/framework.py b/python/paddle/v2/fluid/framework.py index 415960f512..5e7dd98373 100644 --- a/python/paddle/v2/fluid/framework.py +++ b/python/paddle/v2/fluid/framework.py @@ -740,15 +740,9 @@ class Block(object): """ if not self.has_var(name): raise ValueError("var %s is not in current" % name) - orig_var = self.var(name) - del self.vars[name] - orig_var.name = new_name - self.vars[new_name] = orig_var - for op in self.ops: - if name in op.input_arg_names: - op.rename_input(name, new_name) - if name in op.output_arg_names: - op.rename_output(name, new_name) + self.desc.rename_var(name, new_name) + self.sync_with_cpp() + print("renamed var: ", self.var(new_name)) def create_parameter(self, *args, **kwargs): global_block = self.program.global_block() @@ -837,7 +831,7 @@ class Block(object): for p in other.iter_parameters(): assert isinstance(p, Parameter) v = self.vars.get(p.name, None) - print("var shape to copy", v) + print("var shape to copy", v, p) if v is None: raise ValueError("copy_param_info_from should be invoked with " "same topology") -- GitLab From 67881ad22a40628db7a5eb73a1f01c4df11ee9e1 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Tue, 6 Feb 2018 22:21:47 +0000 Subject: [PATCH 006/217] compile with nccl2 --- CMakeLists.txt | 1 - Dockerfile | 2 +- paddle/platform/CMakeLists.txt | 2 +- paddle/platform/dynload/CMakeLists.txt | 2 +- paddle/scripts/docker/build.sh | 5 ++++- 5 files changed, 7 insertions(+), 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 49334279f6..3b4c7e65c6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -141,7 +141,6 @@ include(external/boost) # download boost include(external/any) # download libn::any include(external/eigen) # download eigen3 include(external/pybind11) # download pybind11 -include(external/nccl) include(external/cares) include(external/grpc) diff --git a/Dockerfile b/Dockerfile index 6ac9901ac6..ed559ca5c4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -22,7 +22,7 @@ COPY ./paddle/scripts/docker/root/ /root/ RUN apt-get update && \ apt-get install -y \ - git python-pip python-dev openssh-server bison libnccl-dev \ + git python-pip python-dev openssh-server bison \ wget unzip unrar tar xz-utils bzip2 gzip coreutils ntp \ curl sed grep graphviz libjpeg-dev zlib1g-dev \ python-matplotlib gcc-4.8 g++-4.8 \ diff --git a/paddle/platform/CMakeLists.txt b/paddle/platform/CMakeLists.txt index d68caea997..83164f07aa 100644 --- a/paddle/platform/CMakeLists.txt +++ b/paddle/platform/CMakeLists.txt @@ -1,5 +1,5 @@ if(WITH_GPU) - cc_library(enforce SRCS enforce.cc DEPS nccl) + cc_library(enforce SRCS enforce.cc DEPS) else() cc_library(enforce SRCS enforce.cc) endif() diff --git a/paddle/platform/dynload/CMakeLists.txt b/paddle/platform/dynload/CMakeLists.txt index cf2081b434..264b4ebf2c 100644 --- a/paddle/platform/dynload/CMakeLists.txt +++ b/paddle/platform/dynload/CMakeLists.txt @@ -1,4 +1,4 @@ cc_library(dynamic_loader SRCS dynamic_loader.cc DEPS glog gflags enforce) nv_library(dynload_cuda SRCS cublas.cc cudnn.cc curand.cc nccl.cc - DEPS dynamic_loader nccl) + DEPS dynamic_loader) cc_library(dynload_warpctc SRCS warpctc.cc DEPS dynamic_loader warpctc) diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index ba496db5f8..26ecb128eb 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -34,6 +34,7 @@ function cmake_gen() { Configuring cmake in /paddle/build ... -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE:-Release} ${PYTHON_FLAGS} + -DWITH_DSO=ON -DWITH_DOC=OFF -DWITH_GPU=${WITH_GPU:-OFF} -DWITH_DISTRIBUTE=${WITH_DISTRIBUTE:-OFF} @@ -57,6 +58,7 @@ EOF cmake .. \ -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE:-Release} \ ${PYTHON_FLAGS} \ + -DWITH_DSO=ON \ -DWITH_DOC=OFF \ -DWITH_GPU=${WITH_GPU:-OFF} \ -DWITH_DISTRIBUTE=${WITH_DISTRIBUTE:-OFF} \ @@ -173,7 +175,7 @@ EOF if [[ ${WITH_GPU} == "ON" ]]; then NCCL_DEPS="apt-get install -y libnccl-dev &&" else - NCCL_DEPS="" + NCCL_DEPS="" fi cat >> /paddle/build/Dockerfile < Date: Wed, 7 Feb 2018 23:53:15 +0000 Subject: [PATCH 007/217] add ncclGroup; it is necessary in nccl2 --- paddle/platform/nccl_test.cu | 2 ++ 1 file changed, 2 insertions(+) diff --git a/paddle/platform/nccl_test.cu b/paddle/platform/nccl_test.cu index ef6d845874..5a75ff3382 100644 --- a/paddle/platform/nccl_test.cu +++ b/paddle/platform/nccl_test.cu @@ -89,6 +89,7 @@ TEST(NCCL, all_reduce) { VLOG(1) << "Invoking ncclAllReduce"; + dynload::ncclGroupStart(); for (int i = 0; i < dev_count; ++i) { VLOG(1) << "Invoking ncclAllReduce with device " << i; SetDeviceId(i); @@ -97,6 +98,7 @@ TEST(NCCL, all_reduce) { ncclSum, comms[i], data[i]->dev_ctx.stream())); VLOG(1) << "Invoked ncclAllReduce for device " << i; } + dynload::ncclGroupEnd(); VLOG(1) << "Invoked ncclAllReduce"; -- GitLab From dd6b59da6beca7ee66ad86ae7899a63a8cf57a6e Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Thu, 8 Feb 2018 13:56:41 +0800 Subject: [PATCH 008/217] add Python interface of prior_boxes --- python/paddle/v2/fluid/layers/nn.py | 152 +++++++++++++++++++++++++++- 1 file changed, 151 insertions(+), 1 deletion(-) diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index a79479f469..891d89a24b 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -14,7 +14,7 @@ """ All layers just related to the neural network. """ - +import math from ..layer_helper import LayerHelper from ..initializer import Normal, Constant from ..framework import Variable @@ -65,6 +65,7 @@ __all__ = [ 'beam_search', 'row_conv', 'multiplex', + 'prior_boxes', ] @@ -2993,3 +2994,152 @@ def multiplex(inputs, index): 'Ids': index}, outputs={'Out': [out]}) return out + + +def prior_box(input, + image, + min_sizes, + max_sizes, + aspect_ratios, + variance, + flip, + clip, + step_w, + step_h, + offset, + name=None): + """ + **Prior_box** + + """ + helper = LayerHelper("prior_box", **locals()) + dtype = helper.input_dtype() + + box = helper.create_tmp_variable(dtype) + var = helper.create_tmp_variable(dtype) + helper.append_op( + type="prior_box", + inputs={"Input": input, + "Image": image}, + outputs={"Boxes": box, + "Variances": var}, + attrs={ + 'min_sizes': min_sizes, + 'max_sizes': max_sizes, + 'aspect_ratios': aspect_ratios, + 'variances': variance, + 'flip': flip, + 'clip': clip, + 'step_w': step_w, + 'step_h': step_h, + 'offset': offset + }) + return box, var + + +def prior_boxes(input_layers, + image, + min_ratio, + max_ratio, + steps, + aspect_ratios, + min_dim, + step_w=None, + step_h=None, + offset=0.5, + variance=[0.1], + flip=True, + clip=True, + name=None): + """ + **Prior_boxes** + e.g. + prior_boxes( + input_layers = [conv1, conv2, conv3, conv4, conv5, conv6], + image = data, + min_ratio = 0.2, + max_ratio = 0.9, + steps = [8, 16, 32, 64, 100, 300], + aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]], + min_dim = 300, + offset = 0.5, + variance = [0.1], + flip=True, + clip=True) + """ + assert isinstance(input_layers, list), 'input_layer should be a list.' + assert not step_h and not steps, '' + assert not step_w and not steps, '' + + num_layer = len(input_layers) + assert num_layer > 2 # TODO(zcd): currently, num_layer must be bigger than two. + + min_sizes = [] + max_sizes = [] + if num_layer > 2: + step = int(math.floor((max_ratio - min_ratio) / (num_layer - 2))) + for ratio in xrange(min_ratio, max_ratio + 1, step): + min_sizes.append(min_dim * ratio) + max_sizes.append(min_dim * (ratio + step)) + min_sizes = [min_dim * .10] + min_sizes + max_sizes = [min_dim * .20] + max_sizes + + if step_h: + assert isinstance(step_h,list) and len(step_h) == num_layer, \ + 'step_h should be list and input_layers and step_h should have same length' + if step_w: + assert isinstance(step_w,list) and len(step_w) == num_layer, \ + 'step_w should be list and input_layers and step_w should have same length' + if steps: + assert isinstance(steps,list) and len(step_w) == num_layer, \ + 'steps should be list and input_layers and step_w should have same length' + step_w = steps + step_h = steps + if aspect_ratios: + assert isinstance(aspect_ratios, list) and len(aspect_ratios) == num_layer, \ + 'aspect_ratios should be list and input_layers and aspect_ratios should ' \ + 'have same length' + + helper = LayerHelper("prior_box", **locals()) + dtype = helper.input_dtype() + + box_results = [] + var_results = [] + for i, input in enumerate(input_layers): + min_size = min_sizes[i] + max_size = max_sizes[i] + if isinstance(min_size, list): + min_size = [min_size] + if isinstance(max_size, list): + max_size = [max_size] + if aspect_ratios: + aspect_ratio = aspect_ratios[i] + if isinstance(aspect_ratio, list): + aspect_ratio = [aspect_ratio] + + box, var = prior_box(input, image, min_size, max_size, aspect_ratios, + variance, flip, clip, step_w[i], step_h[i], offset) + + box_results.append(box) + var_results.append(var) + + if len(box_results) == 1: + box = box_results[0] + var = var_results[0] + else: + axis = 1 + box = helper.create_tmp_variable(dtype) + helper.append_op( + type="concat", + inputs={"X": box_results}, + outputs={"Out": box}, + attrs={'axis': axis}) + + var = helper.create_tmp_variable(dtype) + helper.append_op( + type="concat", + inputs={"X": var_results}, + outputs={"Out": var}, + attrs={'axis': axis}) + + return box, var -- GitLab From 45467d806d4aacfc46f82da91b81804478c391bb Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Thu, 8 Feb 2018 17:01:21 +0800 Subject: [PATCH 009/217] improve split and concat op --- paddle/framework/ddim.cc | 10 +++ paddle/framework/ddim.h | 2 + paddle/operators/concat_op.h | 81 ++++++++++++++----- paddle/operators/split_op.h | 42 +++++++--- python/paddle/v2/fluid/tests/test_split_op.py | 12 +-- 5 files changed, 111 insertions(+), 36 deletions(-) diff --git a/paddle/framework/ddim.cc b/paddle/framework/ddim.cc index 8b6f42b82d..c9d020680d 100644 --- a/paddle/framework/ddim.cc +++ b/paddle/framework/ddim.cc @@ -314,5 +314,15 @@ DDim stride(const DDim& ddim) { } return framework::make_ddim(strides); } + +DDim stride_numel(const framework::DDim& ddim) { + std::vector strides(ddim.size()); + strides[ddim.size() - 1] = ddim[ddim.size() - 1]; + for (int i = ddim.size() - 2; i >= 0; --i) { + strides[i] = strides[i + 1] * ddim[i]; + } + return framework::make_ddim(strides); +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/ddim.h b/paddle/framework/ddim.h index 4ca5e49566..ff3efaee83 100644 --- a/paddle/framework/ddim.h +++ b/paddle/framework/ddim.h @@ -125,6 +125,8 @@ DDim flatten_to_2d(const DDim& src, int num_col_dims); DDim flatten_to_1d(const DDim& src); DDim stride(const DDim& ddim); + +DDim stride_numel(const DDim& ddim); } // namespace framework } // namespace paddle diff --git a/paddle/operators/concat_op.h b/paddle/operators/concat_op.h index de4011585a..92ee8d3b18 100644 --- a/paddle/operators/concat_op.h +++ b/paddle/operators/concat_op.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include +#include "paddle/framework/ddim.h" #include "paddle/framework/op_registry.h" -#include "paddle/operators/strided_memcpy.h" namespace paddle { namespace operators { @@ -28,17 +28,38 @@ class ConcatKernel : public framework::OpKernel { auto ins = ctx.MultiInput("X"); auto* out = ctx.Output("Out"); int64_t axis = static_cast(ctx.Attr("axis")); - const size_t n = ins.size(); + auto place = ctx.GetPlace(); + out->mutable_data(place); + + auto out_stride = framework::stride_numel(out->dims()); + int64_t before = out_stride[0] / out_stride[axis]; + int64_t out_after = out_stride[axis]; + size_t output_offset = 0; - out->mutable_data(ctx.GetPlace()); - auto out_stride = framework::stride(out->dims()); - for (size_t i = 0; i < n; i++) { - auto& in = ins[i]; - auto axis_dim = in->dims()[axis]; - auto in_stride = framework::stride(in->dims()); - StridedMemcpy(ctx.device_context(), in->data(), in_stride, - in->dims(), out_stride, out->data() + output_offset); - output_offset += axis_dim * in_stride[axis]; + for (auto* in : ins) { + auto in_stride = framework::stride_numel(in->dims()); + int64_t in_after = in_stride[axis]; + for (int64_t i = 0; i < before; ++i) { + if (platform::is_cpu_place(place)) { + auto& cpu_place = boost::get(place); + memory::Copy( + cpu_place, out->data() + output_offset + i * out_after, + cpu_place, in->data() + i * in_after, sizeof(T) * in_after); + } else { +#ifdef PADDLE_WITH_CUDA + auto& gpu_place = boost::get(place); + auto& cuda_ctx = + reinterpret_cast(dev_ctx); + memory::Copy(gpu_place, out->data() + + output_offset + i * out_after, + gpu_place, in->data() + i * in_after, + sizeof(T) * in_after, cuda_ctx.stream())); +#else + PADDLE_THROW("Paddle is not compiled with GPU"); +#endif + } + } + output_offset += in_after; } } }; @@ -50,17 +71,37 @@ class ConcatGradKernel : public framework::OpKernel { auto* in = ctx.Input(framework::GradVarName("Out")); auto outs = ctx.MultiOutput(framework::GradVarName("X")); int64_t axis = static_cast(ctx.Attr("axis")); - const size_t n = outs.size(); size_t input_offset = 0; - auto in_stride = framework::stride(in->dims()); - for (size_t i = 0; i < n; i++) { - auto& out = outs[i]; + auto in_stride = framework::stride_numel(in->dims()); + auto place = ctx.GetPlace(); + + // numel before the specified axis + int64_t before = in_stride[0] / in_stride[axis]; + int64_t in_after = in_stride[axis]; + for (auto& out : outs) { out->mutable_data(ctx.GetPlace()); - size_t axis_dim = out->dims()[axis]; - auto out_stride = framework::stride(out->dims()); - StridedMemcpy(ctx.device_context(), in->data() + input_offset, - in_stride, out->dims(), out_stride, out->data()); - input_offset += axis_dim * in_stride[axis]; + auto out_stride = framework::stride_numel(out->dims()); + int64_t out_after = out_stride[axis]; + for (int64_t i = 0; i < before; ++i) { + if (platform::is_cpu_place(place)) { + auto& cpu_place = boost::get(place); + memory::Copy(cpu_place, out->data() + i * out_after, cpu_place, + in->data() + input_offset + i * in_after, + sizeof(T) * out_after); + } else { +#ifdef PADDLE_WITH_CUDA + auto& gpu_place = boost::get(place); + auto& cuda_ctx = + reinterpret_cast(dev_ctx); + memory::Copy(gpu_place, out->data() + i * out_after, gpu_place, + in->data() + input_offset + i * in_after, + sizeof(T) * out_after, cuda_ctx.stream()); +#else + PADDLE_THROW("Paddle is not compiled with GPU"); +#endif + } + } + input_offset += out_after; } } }; diff --git a/paddle/operators/split_op.h b/paddle/operators/split_op.h index a38c435d53..7fe9357eb5 100644 --- a/paddle/operators/split_op.h +++ b/paddle/operators/split_op.h @@ -14,9 +14,10 @@ limitations under the License. */ #pragma once +#include #include +#include "paddle/framework/ddim.h" #include "paddle/framework/op_registry.h" -#include "paddle/operators/strided_memcpy.h" namespace paddle { namespace operators { @@ -25,20 +26,41 @@ template class SplitOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { + // auto start = std::chrono::steady_clock::now(); auto* in = ctx.Input("X"); auto outs = ctx.MultiOutput("Out"); - auto in_stride = framework::stride(in->dims()); + auto in_stride = framework::stride_numel(in->dims()); int64_t axis = static_cast(ctx.Attr("axis")); - const size_t n = outs.size(); + auto place = ctx.GetPlace(); + + // numel before the specified axis + int64_t before = in_stride[0] / in_stride[axis]; + int64_t in_after = in_stride[axis]; size_t input_offset = 0; - for (size_t i = 0; i < n; i++) { - auto& out = outs[i]; + for (auto& out : outs) { out->mutable_data(ctx.GetPlace()); - size_t axis_dim = out->dims()[axis]; - auto out_stride = framework::stride(out->dims()); - StridedMemcpy(ctx.device_context(), in->data() + input_offset, - in_stride, out->dims(), out_stride, out->data()); - input_offset += axis_dim * in_stride[axis]; + auto out_stride = framework::stride_numel(out->dims()); + int64_t out_after = out_stride[axis]; + for (int64_t i = 0; i < before; ++i) { + if (platform::is_cpu_place(place)) { + auto& cpu_place = boost::get(place); + memory::Copy(cpu_place, out->data() + i * out_after, cpu_place, + in->data() + input_offset + i * in_after, + sizeof(T) * out_after); + } else { +#ifdef PADDLE_WITH_CUDA + auto& gpu_place = boost::get(place); + auto& cuda_ctx = + reinterpret_cast(dev_ctx); + memory::Copy(gpu_place, out->data() + i * out_after, gpu_place, + in->data() + input_offset + i * in_after, + sizeof(T) * out_after, cuda_ctx.stream()); +#else + PADDLE_THROW("Paddle is not compiled with GPU"); +#endif + } + } + input_offset += out_after; } } }; diff --git a/python/paddle/v2/fluid/tests/test_split_op.py b/python/paddle/v2/fluid/tests/test_split_op.py index b80b64c41b..b0fe111f3b 100644 --- a/python/paddle/v2/fluid/tests/test_split_op.py +++ b/python/paddle/v2/fluid/tests/test_split_op.py @@ -20,19 +20,19 @@ from op_test import OpTest class TestSplitOp(OpTest): def setUp(self): self.op_type = "split" - axis = 0 - x = np.random.random((4, 2, 5)).astype('float32') - out = np.split(x, [1, 3], axis) + axis = 1 + x = np.random.random((4, 5, 6)).astype('float32') + out = np.split(x, [2, 3], axis) self.inputs = {'X': x} - self.attrs = {'axis': axis, 'sections': [1, 2, 1]} + self.attrs = {'axis': axis, 'sections': [2, 1, 2]} self.outputs = {'Out': [('out%d' % i, out[i]) \ for i in xrange(len(out))]} def test_check_output(self): self.check_output() - def test_check_grad(self): - self.check_grad(['X'], ['out0', 'out1', 'out2']) + #def test_check_grad(self): + # self.check_grad(['X'], ['out0', 'out1', 'out2']) if __name__ == '__main__': -- GitLab From 5ca0b7628d90098298604ecf4f62d4845db99b7d Mon Sep 17 00:00:00 2001 From: wanghaox Date: Thu, 8 Feb 2018 17:43:32 +0800 Subject: [PATCH 010/217] add OutPosCount for detection_map op --- paddle/operators/detection_map_op.cc | 47 ++++++- paddle/operators/detection_map_op.h | 132 +++++++++++++++++- .../v2/fluid/tests/test_detection_map_op.py | 111 +++++++++++++-- 3 files changed, 271 insertions(+), 19 deletions(-) diff --git a/paddle/operators/detection_map_op.cc b/paddle/operators/detection_map_op.cc index 553adb215d..1ab691eb4f 100644 --- a/paddle/operators/detection_map_op.cc +++ b/paddle/operators/detection_map_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -28,6 +28,12 @@ class DetectionMAPOp : public framework::OperatorWithKernel { "Input(Detection) of DetectionMAPOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) of DetectionMAPOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("OutPosCount"), + "Output(OutPosCount) of DetectionMAPOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("OutTruePos"), + "Output(OutTruePos) of DetectionMAPOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("OutFalsePos"), + "Output(OutFalsePos) of DetectionMAPOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("MAP"), "Output(MAP) of DetectionMAPOp should not be null."); @@ -44,9 +50,6 @@ class DetectionMAPOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(label_dims[1], 6UL, "The shape is of Input(Label) [N, 6]."); - auto ap_type = GetAPType(ctx->Attrs().Get("ap_type")); - PADDLE_ENFORCE_NE(ap_type, APType::kNone, - "The ap_type should be 'integral' or '11point."); auto map_dim = framework::make_ddim({1}); ctx->SetOutputDim("MAP", map_dim); } @@ -55,7 +58,8 @@ class DetectionMAPOp : public framework::OperatorWithKernel { framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( - framework::ToDataType(ctx.Input("Label")->type()), + framework::ToDataType( + ctx.Input("Detection")->type()), ctx.device_context()); } }; @@ -80,6 +84,33 @@ class DetectionMAPOpMaker : public framework::OpProtoAndCheckerMaker { "the offsets in first dimension are called LoD, the number of " "offset is N + 1, if LoD[i + 1] - LoD[i] == 0, means there is " "no detected data."); + AddInput("PosCount", + "(Tensor) A tensor with shape [Ncls, 1], store the " + "input positive example count of each class.") + .AsDispensable(); + AddInput("TruePos", + "(LodTensor) A 2-D LodTensor with shape [Ntp, 2], store the " + "input true positive example of each class.") + .AsDispensable(); + AddInput("FalsePos", + "(LodTensor) A 2-D LodTensor with shape [Nfp, 2], store the " + "input false positive example of each class.") + .AsDispensable(); + AddOutput("OutPosCount", + "(Tensor) A tensor with shape [Ncls, 1], store the " + "positive example count of each class. It combines the input " + "input(PosCount) and the positive example count computed from " + "input(Detection) and input(Label)."); + AddOutput("OutTruePos", + "(LodTensor) A LodTensor with shape [Ntp', 2], store the " + "true positive example of each class. It combines the " + "input(TruePos) and the true positive examples computed from " + "input(Detection) and input(Label)."); + AddOutput("OutFalsePos", + "(LodTensor) A LodTensor with shape [Nfp', 2], store the " + "false positive example of each class. It combines the " + "input(FalsePos) and the false positive examples computed from " + "input(Detection) and input(Label)."); AddOutput("MAP", "(Tensor) A tensor with shape [1], store the mAP evaluate " "result of the detection."); @@ -97,7 +128,11 @@ class DetectionMAPOpMaker : public framework::OpProtoAndCheckerMaker { "(string, default 'integral') " "The AP algorithm type, 'integral' or '11point'.") .SetDefault("integral") - .InEnum({"integral", "11point"}); + .InEnum({"integral", "11point"}) + .AddCustomChecker([](const std::string& ap_type) { + PADDLE_ENFORCE_NE(GetAPType(ap_type), APType::kNone, + "The ap_type should be 'integral' or '11point."); + }); AddComment(R"DOC( Detection mAP evaluate operator. The general steps are as follows. First, calculate the true positive and diff --git a/paddle/operators/detection_map_op.h b/paddle/operators/detection_map_op.h index d29a6968e4..fd0ddd10aa 100644 --- a/paddle/operators/detection_map_op.h +++ b/paddle/operators/detection_map_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -58,6 +58,14 @@ class DetectionMAPOpKernel : public framework::OpKernel { auto* in_label = ctx.Input("Label"); auto* out_map = ctx.Output("MAP"); + auto* in_pos_count = ctx.Input("PosCount"); + auto* in_true_pos = ctx.Input("TruePos"); + auto* in_false_pos = ctx.Input("FalsePos"); + + auto* out_pos_count = ctx.Output("OutPosCount"); + auto* out_true_pos = ctx.Output("OutTruePos"); + auto* out_false_pos = ctx.Output("OutFalsePos"); + float overlap_threshold = ctx.Attr("overlap_threshold"); float evaluate_difficult = ctx.Attr("evaluate_difficult"); auto ap_type = GetAPType(ctx.Attr("ap_type")); @@ -79,12 +87,20 @@ class DetectionMAPOpKernel : public framework::OpKernel { std::map>> true_pos; std::map>> false_pos; + if (in_pos_count != nullptr) { + GetInputPos(*in_pos_count, *in_true_pos, *in_false_pos, label_pos_count, + true_pos, false_pos); + } + CalcTrueAndFalsePositive(gt_boxes, detect_boxes, evaluate_difficult, overlap_threshold, label_pos_count, true_pos, false_pos); T map = CalcMAP(ap_type, label_pos_count, true_pos, false_pos); + GetOutputPos(ctx, label_pos_count, true_pos, false_pos, *out_pos_count, + *out_true_pos, *out_false_pos); + T* map_data = out_map->mutable_data(ctx.GetPlace()); map_data[0] = map; } @@ -161,6 +177,119 @@ class DetectionMAPOpKernel : public framework::OpKernel { } } + void GetOutputPos( + const framework::ExecutionContext& ctx, + const std::map& label_pos_count, + const std::map>>& true_pos, + const std::map>>& false_pos, + framework::Tensor& output_pos_count, + framework::LoDTensor& output_true_pos, + framework::LoDTensor& output_false_pos) const { + int max_class_id = 0; + int true_pos_count = 0; + int false_pos_count = 0; + for (auto it = label_pos_count.begin(); it != label_pos_count.end(); ++it) { + int label = it->first; + if (label > max_class_id) max_class_id = label; + int label_num_pos = it->second; + if (label_num_pos == 0 || true_pos.find(label) == true_pos.end()) + continue; + auto label_true_pos = true_pos.find(label)->second; + auto label_false_pos = false_pos.find(label)->second; + true_pos_count += label_true_pos.size(); + false_pos_count += label_false_pos.size(); + } + + int* pos_count_data = output_pos_count.mutable_data( + framework::make_ddim({max_class_id + 1, 1}), ctx.GetPlace()); + T* true_pos_data = output_true_pos.mutable_data( + framework::make_ddim({true_pos_count, 2}), ctx.GetPlace()); + T* false_pos_data = output_false_pos.mutable_data( + framework::make_ddim({false_pos_count, 2}), ctx.GetPlace()); + true_pos_count = 0; + false_pos_count = 0; + std::vector true_pos_starts = {0}; + std::vector false_pos_starts = {0}; + for (int i = 0; i <= max_class_id; ++i) { + auto it_count = label_pos_count.find(i); + pos_count_data[i] = 0; + if (it_count != label_pos_count.end()) { + pos_count_data[i] = it_count->second; + } + auto it_true_pos = true_pos.find(i); + if (it_true_pos != true_pos.end()) { + const std::vector>& true_pos_vec = + it_true_pos->second; + for (const std::pair& tp : true_pos_vec) { + true_pos_data[true_pos_count * 2] = tp.first; + true_pos_data[true_pos_count * 2 + 1] = static_cast(tp.second); + true_pos_count++; + } + } + true_pos_starts.push_back(true_pos_count); + + auto it_false_pos = false_pos.find(i); + if (it_false_pos != false_pos.end()) { + const std::vector>& false_pos_vec = + it_false_pos->second; + for (const std::pair& fp : false_pos_vec) { + false_pos_data[false_pos_count * 2] = fp.first; + false_pos_data[false_pos_count * 2 + 1] = static_cast(fp.second); + false_pos_count++; + } + } + false_pos_starts.push_back(false_pos_count); + } + + framework::LoD true_pos_lod; + true_pos_lod.emplace_back(true_pos_starts); + framework::LoD false_pos_lod; + false_pos_lod.emplace_back(false_pos_starts); + + output_true_pos.set_lod(true_pos_lod); + output_false_pos.set_lod(false_pos_lod); + return; + } + + void GetInputPos( + const framework::Tensor& input_pos_count, + const framework::LoDTensor& input_true_pos, + const framework::LoDTensor& input_false_pos, + std::map& label_pos_count, + std::map>>& true_pos, + std::map>>& false_pos) const { + constexpr T kEPS = static_cast(1e-6); + int class_number = input_pos_count.dims()[0]; + const int* pos_count_data = input_pos_count.data(); + for (int i = 0; i < class_number; ++i) { + label_pos_count[i] = pos_count_data[i]; + } + + const T* true_pos_data = input_true_pos.data(); + auto true_pos_data_lod = input_true_pos.lod(); + for (int i = 0; i < true_pos_data_lod.size(); ++i) { + for (int j = true_pos_data_lod[0][i]; j < true_pos_data_lod[0][i + 1]; + ++j) { + T score = true_pos_data[j * 2]; + int flag = 1; + if (true_pos_data[j * 2 + 1] < kEPS) flag = 0; + true_pos[i].push_back(std::make_pair(score, flag)); + } + } + const T* false_pos_data = input_false_pos.data(); + auto false_pos_data_lod = input_false_pos.lod(); + for (int i = 0; i < false_pos_data_lod.size(); ++i) { + for (int j = false_pos_data_lod[0][i]; j < false_pos_data_lod[0][i + 1]; + ++j) { + T score = false_pos_data[j * 2]; + int flag = 1; + if (false_pos_data[j * 2 + 1] < kEPS) flag = 0; + false_pos[i].push_back(std::make_pair(score, flag)); + } + } + return; + } + void CalcTrueAndFalsePositive( const std::vector>>& gt_boxes, const std::vector>>>& @@ -283,7 +412,6 @@ class DetectionMAPOpKernel : public framework::OpKernel { size_t num = tp_sum.size(); // Compute Precision. for (size_t i = 0; i < num; ++i) { - // CHECK_LE(tpCumSum[i], labelNumPos); precision.push_back(static_cast(tp_sum[i]) / static_cast(tp_sum[i] + fp_sum[i])); recall.push_back(static_cast(tp_sum[i]) / label_num_pos); diff --git a/python/paddle/v2/fluid/tests/test_detection_map_op.py b/python/paddle/v2/fluid/tests/test_detection_map_op.py index db8012334a..ec57ca4ad5 100644 --- a/python/paddle/v2/fluid/tests/test_detection_map_op.py +++ b/python/paddle/v2/fluid/tests/test_detection_map_op.py @@ -29,10 +29,24 @@ class TestDetectionMAPOp(OpTest): self.detect = np.array(self.detect).astype('float32') self.mAP = np.array(self.mAP).astype('float32') - self.inputs = { - 'Label': (self.label, self.label_lod), - 'Detection': (self.detect, self.detect_lod) - } + if (len(self.class_pos_count) > 0): + self.class_pos_count = np.array(self.class_pos_count).astype( + 'int32') + self.true_pos = np.array(self.true_pos).astype('float32') + self.false_pos = np.array(self.false_pos).astype('float32') + + self.inputs = { + 'Label': (self.label, self.label_lod), + 'Detection': (self.detect, self.detect_lod), + 'PosCount': self.class_pos_count, + 'TruePos': (self.true_pos, self.true_pos_lod), + 'FalsePos': (self.false_pos, self.false_pos_lod) + } + else: + self.inputs = { + 'Label': (self.label, self.label_lod), + 'Detection': (self.detect, self.detect_lod), + } self.attrs = { 'overlap_threshold': self.overlap_threshold, @@ -40,7 +54,17 @@ class TestDetectionMAPOp(OpTest): 'ap_type': self.ap_type } - self.outputs = {'MAP': self.mAP} + self.out_class_pos_count = np.array(self.out_class_pos_count).astype( + 'int') + self.out_true_pos = np.array(self.out_true_pos).astype('float32') + self.out_false_pos = np.array(self.out_false_pos).astype('float32') + + self.outputs = { + 'MAP': self.mAP, + 'OutPosCount': self.out_class_pos_count, + 'OutTruePos': (self.out_true_pos, self.out_true_pos_lod), + 'OutFalsePos': (self.out_false_pos, self.out_false_pos_lod) + } def init_test_case(self): self.overlap_threshold = 0.3 @@ -67,13 +91,64 @@ class TestDetectionMAPOp(OpTest): [1, 0.2, 1, 0], [2, 0.8, 0, 1], [2, 0.1, 1, 0], [3, 0.2, 0, 1]] + self.class_pos_count = [] + self.true_pos_lod = [[]] + self.true_pos = [[]] + self.false_pos_lod = [[]] + self.false_pos = [[]] + def calc_map(self, tf_pos, tf_pos_lod): mAP = 0.0 count = 0 - class_pos_count = {} - true_pos = {} - false_pos = {} + def get_input_pos(class_pos_count, true_pos, true_pos_lod, false_pos, + false_pos_lod): + class_pos_count_dict = collections.Counter() + true_pos_dict = collections.defaultdict(list) + false_pos_dict = collections.defaultdict(list) + for i, count in enumerate(class_pos_count): + class_pos_count_dict[i] = count + + for i in range(len(true_pos_lod[0]) - 1): + start = true_pos_lod[0][i] + end = true_pos_lod[0][i + 1] + for j in range(start, end): + true_pos_dict[i].append(true_pos[j]) + + for i in range(len(false_pos_lod[0]) - 1): + start = false_pos_lod[0][i] + end = false_pos_lod[0][i + 1] + for j in range(start, end): + false_pos_dict[i].append(false_pos[j]) + + return class_pos_count_dict, true_pos_dict, false_pos_dict + + def get_output_pos(label_count, true_pos, false_pos): + max_label = 0 + for (label, label_pos_num) in label_count.items(): + if max_label < label: + max_label = label + + label_number = max_label + 1 + + out_class_pos_count = [] + out_true_pos_lod = [0] + out_true_pos = [] + out_false_pos_lod = [0] + out_false_pos = [] + + for i in range(label_number): + out_class_pos_count.append([label_count[i]]) + true_pos_list = true_pos[i] + out_true_pos += true_pos_list + out_true_pos_lod.append(len(out_true_pos)) + false_pos_list = false_pos[i] + out_false_pos += false_pos_list + out_false_pos_lod.append(len(out_false_pos)) + + return out_class_pos_count, out_true_pos, [ + out_true_pos_lod + ], out_false_pos, [out_false_pos_lod] def get_accumulation(pos_list): sorted_list = sorted(pos_list, key=lambda pos: pos[0], reverse=True) @@ -84,7 +159,9 @@ class TestDetectionMAPOp(OpTest): accu_list.append(sum) return accu_list - label_count = collections.Counter() + label_count, true_pos, false_pos = get_input_pos( + self.class_pos_count, self.true_pos, self.true_pos_lod, + self.false_pos, self.false_pos_lod) for (label, difficult, xmin, ymin, xmax, ymax) in self.label: if self.evaluate_difficult: label_count[label] += 1 @@ -143,8 +220,10 @@ class TestDetectionMAPOp(OpTest): mAP += average_precisions count += 1 - - if count != 0: mAP /= count + self.out_class_pos_count, self.out_true_pos, self.out_true_pos_lod, self.out_false_pos, self.out_false_pos_lod = get_output_pos( + label_count, true_pos, false_pos) + if count != 0: + mAP /= count return mAP * 100.0 def setUp(self): @@ -174,5 +253,15 @@ class TestDetectionMAPOp11Point(TestDetectionMAPOp): self.ap_type = "11point" +class TestDetectionMAPOpMultiBatch(TestDetectionMAPOp): + def init_test_case(self): + super(TestDetectionMAPOpMultiBatch, self).init_test_case() + self.class_pos_count = [0, 2, 1] + self.true_pos_lod = [[0, 0, 3, 5]] + self.true_pos = [[0.7, 1.], [0.3, 0.], [0.2, 1.], [0.8, 0.], [0.1, 1.]] + self.false_pos_lod = [[0, 0, 3, 5]] + self.false_pos = [[0.7, 0.], [0.3, 1.], [0.2, 0.], [0.8, 1.], [0.1, 0.]] + + if __name__ == '__main__': unittest.main() -- GitLab From c976fac1991283ec0e50dd82d03ddf24cdb57ff6 Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Thu, 8 Feb 2018 18:59:18 +0800 Subject: [PATCH 011/217] update by comment --- python/paddle/v2/fluid/tests/test_split_op.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/paddle/v2/fluid/tests/test_split_op.py b/python/paddle/v2/fluid/tests/test_split_op.py index b0fe111f3b..50347d2df4 100644 --- a/python/paddle/v2/fluid/tests/test_split_op.py +++ b/python/paddle/v2/fluid/tests/test_split_op.py @@ -31,8 +31,8 @@ class TestSplitOp(OpTest): def test_check_output(self): self.check_output() - #def test_check_grad(self): - # self.check_grad(['X'], ['out0', 'out1', 'out2']) + def test_check_grad(self): + self.check_grad(['X'], ['out0', 'out1', 'out2']) if __name__ == '__main__': -- GitLab From 19749d52348669cbf2cd000a67b2ffe790384e8c Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Thu, 8 Feb 2018 16:01:41 +0800 Subject: [PATCH 012/217] refine prior_box --- paddle/operators/prior_box_op.cc | 20 ++-- paddle/operators/prior_box_op.h | 8 +- python/paddle/v2/fluid/layers/nn.py | 94 ++++++++++++++----- .../v2/fluid/tests/test_prior_box_op.py | 4 +- 4 files changed, 87 insertions(+), 39 deletions(-) diff --git a/paddle/operators/prior_box_op.cc b/paddle/operators/prior_box_op.cc index 1dc4b28855..b7f38b3cb6 100644 --- a/paddle/operators/prior_box_op.cc +++ b/paddle/operators/prior_box_op.cc @@ -38,8 +38,8 @@ class PriorBoxOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_LT(input_dims[3], image_dims[3], "The width of input must smaller than image."); - auto min_sizes = ctx->Attrs().Get>("min_sizes"); - auto max_sizes = ctx->Attrs().Get>("max_sizes"); + auto min_sizes = ctx->Attrs().Get>("min_sizes"); + auto max_sizes = ctx->Attrs().Get>("max_sizes"); auto variances = ctx->Attrs().Get>("variances"); auto aspect_ratios = ctx->Attrs().Get>("aspect_ratios"); bool flip = ctx->Attrs().Get("flip"); @@ -47,7 +47,7 @@ class PriorBoxOp : public framework::OperatorWithKernel { std::vector aspect_ratios_vec; ExpandAspectRatios(aspect_ratios, flip, aspect_ratios_vec); - int num_priors = aspect_ratios_vec.size() * min_sizes.size(); + size_t num_priors = aspect_ratios_vec.size() * min_sizes.size(); if (max_sizes.size() > 0) { PADDLE_ENFORCE_EQ(max_sizes.size(), min_sizes.size(), "The number of min_size and max_size must be equal."); @@ -90,20 +90,20 @@ class PriorBoxOpMaker : public framework::OpProtoAndCheckerMaker { "H is the height of input, W is the width of input, num_priors " "is the box count of each position."); - AddAttr>("min_sizes", - "(vector) List of min sizes " - "of generated prior boxes.") - .AddCustomChecker([](const std::vector& min_sizes) { + AddAttr>("min_sizes", + "(vector) List of min sizes " + "of generated prior boxes.") + .AddCustomChecker([](const std::vector& min_sizes) { PADDLE_ENFORCE_GT(min_sizes.size(), 0, "Size of min_sizes must be at least 1."); for (size_t i = 0; i < min_sizes.size(); ++i) { - PADDLE_ENFORCE_GT(min_sizes[i], 0, + PADDLE_ENFORCE_GT(min_sizes[i], 0.0, "min_sizes[%d] must be positive.", i); } }); - AddAttr>( + AddAttr>( "max_sizes", - "(vector) List of max sizes of generated prior boxes."); + "(vector) List of max sizes of generated prior boxes."); AddAttr>( "aspect_ratios", "(vector) List of aspect ratios of generated prior boxes."); diff --git a/paddle/operators/prior_box_op.h b/paddle/operators/prior_box_op.h index 6b221cb74e..d8ff5d19eb 100644 --- a/paddle/operators/prior_box_op.h +++ b/paddle/operators/prior_box_op.h @@ -60,8 +60,8 @@ class PriorBoxOpKernel : public framework::OpKernel { auto* boxes = ctx.Output("Boxes"); auto* vars = ctx.Output("Variances"); - auto min_sizes = ctx.Attr>("min_sizes"); - auto max_sizes = ctx.Attr>("max_sizes"); + auto min_sizes = ctx.Attr>("min_sizes"); + auto max_sizes = ctx.Attr>("max_sizes"); auto input_aspect_ratio = ctx.Attr>("aspect_ratios"); auto variances = ctx.Attr>("variances"); auto flip = ctx.Attr("flip"); @@ -108,7 +108,7 @@ class PriorBoxOpKernel : public framework::OpKernel { T box_width, box_height; int idx = 0; for (size_t s = 0; s < min_sizes.size(); ++s) { - int min_size = min_sizes[s]; + auto min_size = min_sizes[s]; // first prior: aspect_ratio = 1, size = min_size box_width = box_height = min_size; // xmin @@ -124,7 +124,7 @@ class PriorBoxOpKernel : public framework::OpKernel { idx++; if (max_sizes.size() > 0) { - int max_size = max_sizes[s]; + auto max_size = max_sizes[s]; // second prior: aspect_ratio = 1, // size = sqrt(min_size * max_size) box_width = box_height = sqrt(min_size * max_size); diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index 891d89a24b..dc1839fd82 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -14,13 +14,16 @@ """ All layers just related to the neural network. """ -import math + from ..layer_helper import LayerHelper from ..initializer import Normal, Constant from ..framework import Variable from ..param_attr import ParamAttr from layer_function_generator import autodoc from tensor import concat +import math +import numpy as np +from operator import mul __all__ = [ 'fc', @@ -64,7 +67,10 @@ __all__ = [ 'nce', 'beam_search', 'row_conv', + 'reshape', + 'reshape_with_axis', 'multiplex', + 'prior_box' 'prior_boxes', ] @@ -2996,6 +3002,40 @@ def multiplex(inputs, index): return out +def reshape_with_axis(input, axis): + """ + **ReshapeWithAxis Layer** + + """ + assert len(input.shape) > axis and axis >= 0, ' ' + input_shape = input.shape + new_dim = [-1, reduce(mul, input_shape[axis:len(input_shape)], 1)] + + helper = LayerHelper('reshape', **locals()) + out = helper.create_tmp_variable(helper.input_dtype()) + helper.append_op( + type='reshape', + inputs={'X': [input]}, + outputs={'Out': [out]}, + attrs={'shape': new_dim}) + return out + + +def reshape(input, new_dim): + """ + **Reshape Layer** + + """ + helper = LayerHelper('reshape', **locals()) + out = helper.create_tmp_variable(helper.input_dtype()) + helper.append_op( + type='reshape', + inputs={'X': [input]}, + outputs={'Out': [out]}, + attrs={'shape': new_dim}) + return out + + def prior_box(input, image, min_sizes, @@ -3041,13 +3081,13 @@ def prior_boxes(input_layers, image, min_ratio, max_ratio, - steps, aspect_ratios, min_dim, + steps=None, step_w=None, step_h=None, offset=0.5, - variance=[0.1], + variance=[0.1, 0.1, 0.1, 0.1], flip=True, clip=True, name=None): @@ -3059,8 +3099,8 @@ def prior_boxes(input_layers, image = data, min_ratio = 0.2, max_ratio = 0.9, - steps = [8, 16, 32, 64, 100, 300], - aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]], + steps = [8., 16., 32., 64., 100., 300.], + aspect_ratios = [[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]], min_dim = 300, offset = 0.5, variance = [0.1], @@ -3068,19 +3108,16 @@ def prior_boxes(input_layers, clip=True) """ assert isinstance(input_layers, list), 'input_layer should be a list.' - assert not step_h and not steps, '' - assert not step_w and not steps, '' - num_layer = len(input_layers) assert num_layer > 2 # TODO(zcd): currently, num_layer must be bigger than two. min_sizes = [] max_sizes = [] if num_layer > 2: - step = int(math.floor((max_ratio - min_ratio) / (num_layer - 2))) + step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2))) for ratio in xrange(min_ratio, max_ratio + 1, step): - min_sizes.append(min_dim * ratio) - max_sizes.append(min_dim * (ratio + step)) + min_sizes.append(min_dim * ratio / 100.) + max_sizes.append(min_dim * (ratio + step) / 100.) min_sizes = [min_dim * .10] + min_sizes max_sizes = [min_dim * .20] + max_sizes @@ -3091,7 +3128,7 @@ def prior_boxes(input_layers, assert isinstance(step_w,list) and len(step_w) == num_layer, \ 'step_w should be list and input_layers and step_w should have same length' if steps: - assert isinstance(steps,list) and len(step_w) == num_layer, \ + assert isinstance(steps,list) and len(steps) == num_layer, \ 'steps should be list and input_layers and step_w should have same length' step_w = steps step_h = steps @@ -3100,25 +3137,25 @@ def prior_boxes(input_layers, 'aspect_ratios should be list and input_layers and aspect_ratios should ' \ 'have same length' - helper = LayerHelper("prior_box", **locals()) - dtype = helper.input_dtype() - box_results = [] var_results = [] for i, input in enumerate(input_layers): min_size = min_sizes[i] max_size = max_sizes[i] - if isinstance(min_size, list): + aspect_ratio = [] + if not isinstance(min_size, list): min_size = [min_size] - if isinstance(max_size, list): + if not isinstance(max_size, list): max_size = [max_size] if aspect_ratios: aspect_ratio = aspect_ratios[i] - if isinstance(aspect_ratio, list): + if not isinstance(aspect_ratio, list): aspect_ratio = [aspect_ratio] - box, var = prior_box(input, image, min_size, max_size, aspect_ratios, - variance, flip, clip, step_w[i], step_h[i], offset) + box, var = prior_box(input, image, min_size, max_size, aspect_ratio, + variance, flip, clip, step_w[i] + if step_w else [], step_h[i] + if step_w else [], offset) box_results.append(box) var_results.append(var) @@ -3127,18 +3164,29 @@ def prior_boxes(input_layers, box = box_results[0] var = var_results[0] else: - axis = 1 + axis = 3 + reshaped_boxes = [] + reshaped_vars = [] + for i in range(len(box_results)): + reshaped_boxes += [reshape_with_axis(box_results[i], axis=axis)] + reshaped_vars += [reshape_with_axis(var_results[i], axis=axis)] + + helper = LayerHelper("concat", **locals()) + dtype = helper.input_dtype() box = helper.create_tmp_variable(dtype) + var = helper.create_tmp_variable(dtype) + + axis = 0 helper.append_op( type="concat", - inputs={"X": box_results}, + inputs={"X": reshaped_boxes}, outputs={"Out": box}, attrs={'axis': axis}) var = helper.create_tmp_variable(dtype) helper.append_op( type="concat", - inputs={"X": var_results}, + inputs={"X": reshaped_vars}, outputs={"Out": var}, attrs={'axis': axis}) diff --git a/python/paddle/v2/fluid/tests/test_prior_box_op.py b/python/paddle/v2/fluid/tests/test_prior_box_op.py index ca8d2bca74..25dfc4307c 100644 --- a/python/paddle/v2/fluid/tests/test_prior_box_op.py +++ b/python/paddle/v2/fluid/tests/test_prior_box_op.py @@ -65,9 +65,9 @@ class TestPriorBoxOp(OpTest): self.batch_size = 10 self.min_sizes = [2, 4] - self.min_sizes = np.array(self.min_sizes).astype('int64') + self.min_sizes = np.array(self.min_sizes).astype('float32') self.max_sizes = [5, 10] - self.max_sizes = np.array(self.max_sizes).astype('int64') + self.max_sizes = np.array(self.max_sizes).astype('float32') self.aspect_ratios = [2.0, 3.0] self.flip = True self.real_aspect_ratios = [1, 2.0, 1.0 / 2.0, 3.0, 1.0 / 3.0] -- GitLab From 98c943730e886ffaf3b6feb59b64d977158f995e Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Fri, 9 Feb 2018 00:12:54 +0000 Subject: [PATCH 013/217] use op run as wrapper of run_impl; make run_impl as private virtual function --- paddle/framework/op_registry_test.cc | 10 +++++-- paddle/framework/operator.cc | 16 +++++++++-- paddle/framework/operator.h | 15 ++++++---- paddle/framework/operator_test.cc | 11 ++++++-- paddle/operators/array_to_lod_tensor_op.cc | 6 ++-- paddle/operators/assign_op.cc | 6 ++-- paddle/operators/beam_search_decode_op.cc | 6 ++-- paddle/operators/beam_search_op.h | 5 ++-- paddle/operators/cond_op.cc | 2 +- paddle/operators/cond_op.h | 5 ++-- paddle/operators/conditional_block_op.cc | 12 +++++--- paddle/operators/create_reader_op.cc | 18 ++++++++---- paddle/operators/feed_op.cc | 6 ++-- paddle/operators/fetch_op.cc | 5 ++-- paddle/operators/fill_constant_op.cc | 6 ++-- paddle/operators/fill_op.cc | 6 ++-- paddle/operators/get_places_op.cc | 6 ++-- paddle/operators/increment_op.cc | 5 ++-- paddle/operators/is_empty_op.cc | 5 ++-- paddle/operators/load_combine_op.cc | 6 ++-- paddle/operators/load_op.cc | 6 ++-- paddle/operators/lod_array_length_op.cc | 6 ++-- paddle/operators/lod_rank_table_op.cc | 6 ++-- paddle/operators/lod_tensor_to_array_op.cc | 6 ++-- paddle/operators/max_sequence_len_op.cc | 5 ++-- paddle/operators/merge_lod_tensor_op.cc | 6 ++-- paddle/operators/nccl_op.cc | 5 ++-- paddle/operators/net_op.h | 28 +++++++++---------- paddle/operators/net_op_test.cc | 5 +++- paddle/operators/parallel_do_op.cc | 10 ++++--- paddle/operators/print_op.cc | 5 ++-- paddle/operators/read_op.cc | 6 ++-- paddle/operators/recurrent_op.cc | 10 ++++--- .../reorder_lod_tensor_by_rank_op.cc | 6 ++-- paddle/operators/rnn_memory_helper_op.cc | 12 +++++--- paddle/operators/save_combine_op.cc | 6 ++-- paddle/operators/save_op.cc | 6 ++-- paddle/operators/shrink_rnn_memory_op.cc | 10 ++++--- paddle/operators/split_lod_tensor_op.cc | 6 ++-- .../operators/tensor_array_read_write_op.cc | 11 +++++--- paddle/operators/while_op.cc | 10 ++++--- 41 files changed, 214 insertions(+), 114 deletions(-) diff --git a/paddle/framework/op_registry_test.cc b/paddle/framework/op_registry_test.cc index 341da8befd..b22e06cc79 100644 --- a/paddle/framework/op_registry_test.cc +++ b/paddle/framework/op_registry_test.cc @@ -25,7 +25,10 @@ namespace framework { class CosineOp : public OperatorBase { public: using OperatorBase::OperatorBase; - void Run(const Scope& scope, const platform::Place& place) const override {} + + private: + void RunImpl(const Scope& scope, + const platform::Place& place) const override {} }; class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { @@ -44,7 +47,10 @@ class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { class MyTestOp : public OperatorBase { public: using OperatorBase::OperatorBase; - void Run(const Scope& scope, const platform::Place& place) const override {} + + private: + void RunImpl(const Scope& scope, + const platform::Place& place) const override {} }; class MyTestOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index 52387aabd9..240a0602c9 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -64,6 +64,18 @@ static LoD GetLoD(const Scope& scope, const std::string& name) { } } +void OperatorBase::Run(const Scope& scope, const platform::Place& place) { + if (platform::is_gpu_place(place)) { +#ifndef PADDLE_WITH_CUDA + PADDLE_THROW("Cannot run operator on place %s", place); +#else + auto dev_id = boost::get(place).device; + platform::SetDeviceId(dev_id); +#endif + } + RunImpl(scope, place); +} + std::string OperatorBase::Input(const std::string& name) const { auto& ins = Inputs(name); PADDLE_ENFORCE_LE(ins.size(), 1UL, @@ -475,8 +487,8 @@ class RuntimeInferShapeContext : public InferShapeContext { const Scope& scope_; }; -void OperatorWithKernel::Run(const Scope& scope, - const platform::Place& place) const { +void OperatorWithKernel::RunImpl(const Scope& scope, + const platform::Place& place) const { RuntimeInferShapeContext infer_shape_ctx(*this, scope); this->InferShape(&infer_shape_ctx); platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index c9140f304c..886e373348 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -89,8 +89,9 @@ class OperatorBase { std::string DebugString() const { return DebugStringEx(nullptr); } - /// Net will call this function to Run an op. - virtual void Run(const Scope& scope, const platform::Place& place) const = 0; + /// Net will call this interface function to Run an op. + // The implementation should be written at RunImpl + void Run(const Scope& scope, const platform::Place& place); // FIXME(typhoonzero): this is only used for recv_op to stop event_loop. virtual void Stop() {} @@ -144,6 +145,8 @@ class OperatorBase { private: void GenerateTemporaryNames(); void CheckAllInputOutputSet() const; + virtual void RunImpl(const Scope& scope, + const platform::Place& place) const = 0; }; // Macro for define a clone method. @@ -168,10 +171,13 @@ class OperatorBase { class NOP : public OperatorBase { public: using OperatorBase::OperatorBase; - void Run(const Scope& scope, const platform::Place& place) const override {} std::unique_ptr Clone() const override { return std::unique_ptr(new NOP(*this)); } + + private: + void RunImpl(const Scope& scope, + const platform::Place& place) const override {} }; class ExecutionContext { @@ -363,8 +369,6 @@ class OperatorWithKernel : public OperatorBase { const VariableNameMap& outputs, const AttributeMap& attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const Scope& scope, const platform::Place& place) const final; - static std::unordered_map& AllOpKernels() { static std::unordered_map g_all_op_kernels; @@ -393,6 +397,7 @@ class OperatorWithKernel : public OperatorBase { // indicate kernel DataType by input data. Defaultly all input data must be // same. proto::DataType IndicateDataType(const ExecutionContext& ctx) const; + void RunImpl(const Scope& scope, const platform::Place& place) const final; }; extern bool OpSupportGPU(const std::string& op_type); diff --git a/paddle/framework/operator_test.cc b/paddle/framework/operator_test.cc index b69d7c7a74..7100e64732 100644 --- a/paddle/framework/operator_test.cc +++ b/paddle/framework/operator_test.cc @@ -28,7 +28,10 @@ class OpWithoutKernelTest : public OperatorBase { OpWithoutKernelTest(const std::string& type, const VariableNameMap& inputs, const VariableNameMap& outputs, const AttributeMap& attrs) : OperatorBase(type, inputs, outputs, attrs), x(1) {} - void Run(const Scope& scope, const platform::Place& place) const override { + + private: + void RunImpl(const Scope& scope, + const platform::Place& place) const override { ++op_run_num; ASSERT_EQ(static_cast(inputs_.size()), 1); ASSERT_EQ(static_cast(outputs_.size()), 1); @@ -259,8 +262,10 @@ class OperatorClone : public paddle::framework::OperatorBase { const paddle::framework::VariableNameMap& outputs, const paddle::framework::AttributeMap& attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const paddle::framework::Scope& scope, - const paddle::platform::Place& place) const override {} + + private: + void RunImpl(const paddle::framework::Scope& scope, + const paddle::platform::Place& place) const override {} }; TEST(Operator, Clone) { diff --git a/paddle/operators/array_to_lod_tensor_op.cc b/paddle/operators/array_to_lod_tensor_op.cc index ba5c6bd3c6..3b9ebae153 100644 --- a/paddle/operators/array_to_lod_tensor_op.cc +++ b/paddle/operators/array_to_lod_tensor_op.cc @@ -31,8 +31,10 @@ class ArrayToLoDTensorOp : public framework::OperatorBase { const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &dev_place) const override { + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &dev_place) const override { auto &x = scope.FindVar(Input("X"))->Get(); auto &rank_table = scope.FindVar(Input("RankTable"))->Get(); diff --git a/paddle/operators/assign_op.cc b/paddle/operators/assign_op.cc index e04aa2d28c..0d1ce62bd6 100644 --- a/paddle/operators/assign_op.cc +++ b/paddle/operators/assign_op.cc @@ -71,8 +71,10 @@ class AssignOp : public framework::OperatorBase { const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &place) const override { + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { auto *x = scope.FindVar(Input("X")); if (x == nullptr) { return; diff --git a/paddle/operators/beam_search_decode_op.cc b/paddle/operators/beam_search_decode_op.cc index 72e05607b0..a1b4430425 100644 --- a/paddle/operators/beam_search_decode_op.cc +++ b/paddle/operators/beam_search_decode_op.cc @@ -55,8 +55,10 @@ class BeamSearchDecodeOp : public framework::OperatorBase { const framework::VariableNameMap& outputs, const framework::AttributeMap& attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope& scope, - const platform::Place& dev_place) const override { + + private: + void RunImpl(const framework::Scope& scope, + const platform::Place& dev_place) const override { platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); auto& dev_ctx = *pool.Get(dev_place); diff --git a/paddle/operators/beam_search_op.h b/paddle/operators/beam_search_op.h index 7ad85874fc..8d62e71565 100644 --- a/paddle/operators/beam_search_op.h +++ b/paddle/operators/beam_search_op.h @@ -204,8 +204,9 @@ class BeamSearchOp : public framework::OperatorBase { PADDLE_THROW("Not Implemented"); } - void Run(const framework::Scope& scope, - const platform::Place& dev_place) const override { + private: + void RunImpl(const framework::Scope& scope, + const platform::Place& dev_place) const override { auto ids_var = scope.FindVar(Input("ids")); auto scores_var = scope.FindVar(Input("scores")); auto pre_ids_var = scope.FindVar(Input("pre_ids")); diff --git a/paddle/operators/cond_op.cc b/paddle/operators/cond_op.cc index e333002bfd..28bac0b7be 100644 --- a/paddle/operators/cond_op.cc +++ b/paddle/operators/cond_op.cc @@ -193,7 +193,7 @@ void CondOp::MergeDataFromSubnet(const framework::Scope& scope, } } -void CondOp::Run(const Scope& scope, const platform::Place& place) const { +void CondOp::RunImpl(const Scope& scope, const platform::Place& place) const { // get device context from pool platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); auto& dev_ctx = *pool.Get(place); diff --git a/paddle/operators/cond_op.h b/paddle/operators/cond_op.h index 7dcdc47e0b..2dc0e23301 100644 --- a/paddle/operators/cond_op.h +++ b/paddle/operators/cond_op.h @@ -77,8 +77,9 @@ class CondOp : public framework::OperatorBase { sub_net_op_[FALSE_BRANCH] = std::move(net); } - void Run(const framework::Scope& scope, - const platform::Place& place) const override; + private: + void RunImpl(const framework::Scope& scope, + const platform::Place& place) const override; private: const int TRUE_BRANCH = 0; diff --git a/paddle/operators/conditional_block_op.cc b/paddle/operators/conditional_block_op.cc index bdcdb85be7..f7572ccfaf 100644 --- a/paddle/operators/conditional_block_op.cc +++ b/paddle/operators/conditional_block_op.cc @@ -65,8 +65,10 @@ class ConditionalBlockOp : public ConditionalOp { const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) : ConditionalOp(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &dev_place) const override { + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &dev_place) const override { auto xs = InputTensors(scope); bool need_run; @@ -128,8 +130,10 @@ class ConditionalBlockGradOp : public ConditionalOp { const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) : ConditionalOp(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &dev_place) const override { + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &dev_place) const override { auto xs = this->InputTensors(scope); bool need_run; diff --git a/paddle/operators/create_reader_op.cc b/paddle/operators/create_reader_op.cc index 5ba2a25ab4..66fd132b3a 100644 --- a/paddle/operators/create_reader_op.cc +++ b/paddle/operators/create_reader_op.cc @@ -72,8 +72,10 @@ template class CreateRandomDataGeneratorOp : public framework::OperatorBase { public: using framework::OperatorBase::OperatorBase; - void Run(const framework::Scope& scope, - const platform::Place& dev_place) const override { + + private: + void RunImpl(const framework::Scope& scope, + const platform::Place& dev_place) const override { const auto& shape_concat = Attr>("shape_concat"); const auto& ranks = Attr>("ranks"); PADDLE_ENFORCE(!shape_concat.empty() && !ranks.empty()); @@ -120,8 +122,10 @@ class CreateRandomDataGeneratorOpMaker class CreateShuffleReaderOp : public framework::OperatorBase { public: using framework::OperatorBase::OperatorBase; - void Run(const framework::Scope& scope, - const platform::Place& dev_place) const override { + + private: + void RunImpl(const framework::Scope& scope, + const platform::Place& dev_place) const override { const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) ->Get(); auto* out = scope.FindVar(Output("Out")) @@ -152,8 +156,10 @@ class CreateShuffleReaderOpMaker : public framework::OpProtoAndCheckerMaker { class CreateBatchReaderOp : public framework::OperatorBase { public: using framework::OperatorBase::OperatorBase; - void Run(const framework::Scope& scope, - const platform::Place& dev_place) const override { + + private: + void RunImpl(const framework::Scope& scope, + const platform::Place& dev_place) const override { const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) ->Get(); auto* out = scope.FindVar(Output("Out")) diff --git a/paddle/operators/feed_op.cc b/paddle/operators/feed_op.cc index 789d01e002..3f6f8a589d 100644 --- a/paddle/operators/feed_op.cc +++ b/paddle/operators/feed_op.cc @@ -24,8 +24,10 @@ class FeedOp : public framework::OperatorBase { const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &place) const override { + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { auto feed_var_name = Input("X"); auto *feed_var = scope.FindVar(feed_var_name); diff --git a/paddle/operators/fetch_op.cc b/paddle/operators/fetch_op.cc index 7205ee2a87..bb4b7356e7 100644 --- a/paddle/operators/fetch_op.cc +++ b/paddle/operators/fetch_op.cc @@ -26,8 +26,9 @@ class FetchOp : public framework::OperatorBase { const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &place) const override { + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { auto fetch_var_name = Input("X"); auto *fetch_var = scope.FindVar(fetch_var_name); PADDLE_ENFORCE(fetch_var != nullptr, diff --git a/paddle/operators/fill_constant_op.cc b/paddle/operators/fill_constant_op.cc index dcd43a30c8..ce4e7bf7f2 100644 --- a/paddle/operators/fill_constant_op.cc +++ b/paddle/operators/fill_constant_op.cc @@ -33,8 +33,10 @@ class FillConstantInferShape : public framework::InferShapeBase { class FillConstantOp : public framework::OperatorBase { public: using framework::OperatorBase::OperatorBase; - void Run(const framework::Scope &scope, - const platform::Place &dev_place) const override { + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &dev_place) const override { auto data_type = static_cast(Attr("dtype")); auto value = Attr("value"); diff --git a/paddle/operators/fill_op.cc b/paddle/operators/fill_op.cc index 4f5a2ed169..bc72a18902 100644 --- a/paddle/operators/fill_op.cc +++ b/paddle/operators/fill_op.cc @@ -42,8 +42,10 @@ class FillOp : public framework::OperatorBase { const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &place) const override { + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { auto &out = detail::Ref(detail::Ref(scope.FindVar(Output("Out")), "Cannot find variable %s", Output("Out")) diff --git a/paddle/operators/get_places_op.cc b/paddle/operators/get_places_op.cc index 24fafb2307..a7168a1079 100644 --- a/paddle/operators/get_places_op.cc +++ b/paddle/operators/get_places_op.cc @@ -37,8 +37,10 @@ class GetPlacesOp : public framework::OperatorBase { const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &place) const override { + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { bool is_gpu; if (Attr("device_type") == "AUTO") { is_gpu = platform::is_gpu_place(place); diff --git a/paddle/operators/increment_op.cc b/paddle/operators/increment_op.cc index e0b80cc4e7..adc7e8f1a4 100644 --- a/paddle/operators/increment_op.cc +++ b/paddle/operators/increment_op.cc @@ -51,8 +51,9 @@ class IncrementOp : public framework::OperatorBase { const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &place) const override { + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { auto &x = scope.FindVar(Input("X"))->Get(); auto &out = *scope.FindVar(Output("Out"))->GetMutable(); diff --git a/paddle/operators/is_empty_op.cc b/paddle/operators/is_empty_op.cc index 492ae48845..1de3437b0c 100644 --- a/paddle/operators/is_empty_op.cc +++ b/paddle/operators/is_empty_op.cc @@ -28,8 +28,9 @@ class IsEmptyOp : public framework::OperatorBase { const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &place) const override { + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { // get input auto *var = scope.FindVar(Input(kInput)); PADDLE_ENFORCE_NOT_NULL(var); diff --git a/paddle/operators/load_combine_op.cc b/paddle/operators/load_combine_op.cc index f4be793d7b..13b1c5da90 100644 --- a/paddle/operators/load_combine_op.cc +++ b/paddle/operators/load_combine_op.cc @@ -26,8 +26,10 @@ class LoadCombineOp : public framework::OperatorBase { const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &place) const override { + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { auto filename = Attr("file_path"); std::ifstream fin(filename); diff --git a/paddle/operators/load_op.cc b/paddle/operators/load_op.cc index f886b423ac..88d0cc725d 100644 --- a/paddle/operators/load_op.cc +++ b/paddle/operators/load_op.cc @@ -25,8 +25,10 @@ class LoadOp : public framework::OperatorBase { const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &place) const override { + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { auto filename = Attr("file_path"); std::ifstream fin(filename); PADDLE_ENFORCE(static_cast(fin), "Cannot open file %s for load op", diff --git a/paddle/operators/lod_array_length_op.cc b/paddle/operators/lod_array_length_op.cc index d2c52745cf..aa18aa2646 100644 --- a/paddle/operators/lod_array_length_op.cc +++ b/paddle/operators/lod_array_length_op.cc @@ -25,8 +25,10 @@ class LoDArrayLengthOp : public framework::OperatorBase { const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &place) const override { + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { auto &x = scope.FindVar(Input("X"))->Get(); auto &out = *scope.FindVar(Output("Out"))->GetMutable(); diff --git a/paddle/operators/lod_rank_table_op.cc b/paddle/operators/lod_rank_table_op.cc index 692b9bf371..8e05ee63a0 100644 --- a/paddle/operators/lod_rank_table_op.cc +++ b/paddle/operators/lod_rank_table_op.cc @@ -23,8 +23,10 @@ class LoDRankTableOp : public framework::OperatorBase { const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &dev_place) const override { + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &dev_place) const override { auto x = scope.FindVar(Input("X"))->Get(); auto *out = scope.FindVar(Output("Out"))->GetMutable(); diff --git a/paddle/operators/lod_tensor_to_array_op.cc b/paddle/operators/lod_tensor_to_array_op.cc index 685a807a8a..0b1d2ffc8f 100644 --- a/paddle/operators/lod_tensor_to_array_op.cc +++ b/paddle/operators/lod_tensor_to_array_op.cc @@ -32,8 +32,10 @@ class LoDTensorToArrayOp : public framework::OperatorBase { const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &place) const override { + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { auto &x = detail::Ref(scope.FindVar(Input("X")), "Cannot find input %s", Input("X")) .Get(); diff --git a/paddle/operators/max_sequence_len_op.cc b/paddle/operators/max_sequence_len_op.cc index 019150e491..794a1e56d3 100644 --- a/paddle/operators/max_sequence_len_op.cc +++ b/paddle/operators/max_sequence_len_op.cc @@ -27,8 +27,9 @@ class MaxSeqenceLenOp : public framework::OperatorBase { const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &dev_place) const override { + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &dev_place) const override { auto &rank_table = scope.FindVar(Input("RankTable"))->Get(); auto *out = diff --git a/paddle/operators/merge_lod_tensor_op.cc b/paddle/operators/merge_lod_tensor_op.cc index 87644d316d..53ee7d63f3 100644 --- a/paddle/operators/merge_lod_tensor_op.cc +++ b/paddle/operators/merge_lod_tensor_op.cc @@ -27,8 +27,10 @@ class MergeLoDTensorOp : public framework::OperatorBase { const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &dev_place) const override { + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &dev_place) const override { // get device context from pool platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(dev_place); diff --git a/paddle/operators/nccl_op.cc b/paddle/operators/nccl_op.cc index 9d51153b06..974ae9d963 100644 --- a/paddle/operators/nccl_op.cc +++ b/paddle/operators/nccl_op.cc @@ -26,8 +26,9 @@ class NCCLInitOp : public framework::OperatorBase { const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &place) const override { + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { const auto &name = Output("Communicator"); PADDLE_ENFORCE_NOT_NULL(scope.FindVar(name), "Can not find variable '%s' in the scope.", name); diff --git a/paddle/operators/net_op.h b/paddle/operators/net_op.h index b24042f5ef..9ac8f34347 100644 --- a/paddle/operators/net_op.h +++ b/paddle/operators/net_op.h @@ -57,20 +57,6 @@ class NetOp : public framework::OperatorBase { this->CompleteAddOp(); } - /** - * @brief Run the network. - * - * Run all the operators with the `scope`, if no scope is provided, default - * scope will be used instead. If no OpContext is provicded, default context - * will be used. - */ - void Run(const framework::Scope& scope, - const platform::Place& place) const override { - for (auto& op : ops_) { - op->Run(scope, place); - } - } - bool SupportGPU() const override { for (auto& op : ops_) { if (!op->SupportGPU()) { @@ -117,6 +103,20 @@ class NetOp : public framework::OperatorBase { std::vector> ops_; private: + /** + * @brief Run the network. + * + * Run all the operators with the `scope`, if no scope is provided, default + * scope will be used instead. If no OpContext is provicded, default context + * will be used. + */ + void RunImpl(const framework::Scope& scope, + const platform::Place& place) const override { + for (auto& op : ops_) { + op->Run(scope, place); + } + } + bool add_op_done_{false}; std::set intermediate_outputs_; diff --git a/paddle/operators/net_op_test.cc b/paddle/operators/net_op_test.cc index 9358f29f62..95d21f1516 100644 --- a/paddle/operators/net_op_test.cc +++ b/paddle/operators/net_op_test.cc @@ -26,7 +26,10 @@ class TestOp : public framework::OperatorBase { public: using framework::OperatorBase::OperatorBase; DEFINE_OP_CLONE_METHOD(TestOp); - void Run(const Scope& scope, const platform::Place& place) const override { + + private: + void RunImpl(const Scope& scope, + const platform::Place& place) const override { ++run_cnt; } }; diff --git a/paddle/operators/parallel_do_op.cc b/paddle/operators/parallel_do_op.cc index 89045923f9..b1233c93f8 100644 --- a/paddle/operators/parallel_do_op.cc +++ b/paddle/operators/parallel_do_op.cc @@ -124,8 +124,9 @@ class ParallelDoOp : public framework::OperatorBase { const framework::AttributeMap &attrs) : framework::OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &place) const override { + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { // get device context from pool platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(place); @@ -216,8 +217,9 @@ class ParallelDoGradOp : public framework::OperatorBase { const framework::AttributeMap &attrs) : framework::OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &place) const override { + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { auto *block = Attr(kParallelBlock); auto *program = block->Program(); diff --git a/paddle/operators/print_op.cc b/paddle/operators/print_op.cc index 8b233d64c9..e869e4d620 100644 --- a/paddle/operators/print_op.cc +++ b/paddle/operators/print_op.cc @@ -130,8 +130,9 @@ class TensorPrintOp : public framework::OperatorBase { PADDLE_THROW("Not implemented."); } - void Run(const framework::Scope& scope, - const platform::Place& place) const override { + private: + void RunImpl(const framework::Scope& scope, + const platform::Place& place) const override { const framework::Variable* in_var_ptr = nullptr; std::string phase = kForward; std::string printed_var_name = ""; diff --git a/paddle/operators/read_op.cc b/paddle/operators/read_op.cc index 3ae454101f..924b787faa 100644 --- a/paddle/operators/read_op.cc +++ b/paddle/operators/read_op.cc @@ -54,8 +54,10 @@ class ReadInferVarType : public framework::VarTypeInference { class ReadOp : public framework::OperatorBase { public: using framework::OperatorBase::OperatorBase; - void Run(const framework::Scope& scope, - const platform::Place& dev_place) const override { + + private: + void RunImpl(const framework::Scope& scope, + const platform::Place& dev_place) const override { framework::ReaderHolder* reader = scope.FindVar(Input("Reader"))->GetMutable(); if (!reader->HasNext()) { diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc index a136c5b447..19ad7fbb70 100644 --- a/paddle/operators/recurrent_op.cc +++ b/paddle/operators/recurrent_op.cc @@ -226,8 +226,9 @@ class RecurrentOp : public RecurrentBase { const framework::AttributeMap &attrs) : RecurrentBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &place) const override { + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { auto seq_len = static_cast(this->GetSequenceLength(scope)); VLOG(3) << "Static RNN input sequence length = " << seq_len; StepScopes scopes = CreateStepScopes(scope, seq_len); @@ -315,8 +316,9 @@ class RecurrentGradOp : public RecurrentBase { const framework::AttributeMap &attrs) : RecurrentBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &place) const override { + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { auto seq_len = static_cast(GetSequenceLength(scope)); StepScopes scopes = CreateStepScopes(scope, seq_len); auto reverse = Attr(kReverse); diff --git a/paddle/operators/reorder_lod_tensor_by_rank_op.cc b/paddle/operators/reorder_lod_tensor_by_rank_op.cc index 3c30447949..f5c16870b5 100644 --- a/paddle/operators/reorder_lod_tensor_by_rank_op.cc +++ b/paddle/operators/reorder_lod_tensor_by_rank_op.cc @@ -75,8 +75,10 @@ class ReorderLoDTensorByRankTableBase : public framework::OperatorBase { const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &place) const override { + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { auto &x = detail::Ref(scope.FindVar(Input("X")), "Cannot find input lod tensor variable %s", Input("X")) diff --git a/paddle/operators/rnn_memory_helper_op.cc b/paddle/operators/rnn_memory_helper_op.cc index eb55ed6a05..fe88aa1fb5 100644 --- a/paddle/operators/rnn_memory_helper_op.cc +++ b/paddle/operators/rnn_memory_helper_op.cc @@ -24,8 +24,10 @@ class RNNMemoryHelperOp : public framework::OperatorBase { const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &dev_place) const override { + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &dev_place) const override { auto mem_var_name = Input("X"); auto *mem_var = scope.FindVar(mem_var_name); PADDLE_ENFORCE(mem_var != nullptr, @@ -76,8 +78,10 @@ class RNNMemoryHelperGradOp : public framework::OperatorBase { const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &dev_place) const override { + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &dev_place) const override { auto out_grad_var_name = Input(framework::GradVarName("Out")); auto *out_grad_var = scope.FindVar(out_grad_var_name); diff --git a/paddle/operators/save_combine_op.cc b/paddle/operators/save_combine_op.cc index bffa2908bc..5ce0bfb914 100644 --- a/paddle/operators/save_combine_op.cc +++ b/paddle/operators/save_combine_op.cc @@ -63,8 +63,10 @@ class SaveCombineOp : public framework::OperatorBase { const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &place) const override { + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { auto filename = Attr("file_path"); auto overwrite = Attr("overwrite"); diff --git a/paddle/operators/save_op.cc b/paddle/operators/save_op.cc index 4b1cbe8883..c8250d0c3d 100644 --- a/paddle/operators/save_op.cc +++ b/paddle/operators/save_op.cc @@ -62,8 +62,10 @@ class SaveOp : public framework::OperatorBase { const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &place) const override { + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { auto filename = Attr("file_path"); auto overwrite = Attr("overwrite"); diff --git a/paddle/operators/shrink_rnn_memory_op.cc b/paddle/operators/shrink_rnn_memory_op.cc index bf870115a4..cd96ec5133 100644 --- a/paddle/operators/shrink_rnn_memory_op.cc +++ b/paddle/operators/shrink_rnn_memory_op.cc @@ -27,8 +27,9 @@ class ShrinkRNNMemoryOp : public ArrayOp { const framework::AttributeMap &attrs) : ArrayOp(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &place) const override { + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { auto *x_var = scope.FindVar(Input("X")); PADDLE_ENFORCE(x_var != nullptr, "Input X must be set"); auto &x_tensor = x_var->Get(); @@ -108,8 +109,9 @@ class ShrinkRNNMemoryGradOp : public ArrayOp { const framework::AttributeMap &attrs) : ArrayOp(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &place) const override { + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { auto *dout_var = scope.FindVar(Input(framework::GradVarName("Out"))); auto *dx_var = scope.FindVar(Output(framework::GradVarName("X"))); PADDLE_ENFORCE(dx_var != nullptr, "Input Gradient should not be nullptr"); diff --git a/paddle/operators/split_lod_tensor_op.cc b/paddle/operators/split_lod_tensor_op.cc index bd93c49201..cd833889ed 100644 --- a/paddle/operators/split_lod_tensor_op.cc +++ b/paddle/operators/split_lod_tensor_op.cc @@ -33,8 +33,10 @@ class SplitLoDTensorOp : public framework::OperatorBase { const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &dev_place) const override { + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &dev_place) const override { auto &x = scope.FindVar(Input("X"))->Get(); auto &mask = scope.FindVar(Input("Mask"))->Get(); auto *out_true = diff --git a/paddle/operators/tensor_array_read_write_op.cc b/paddle/operators/tensor_array_read_write_op.cc index a70be8b875..af3d9b7cc3 100644 --- a/paddle/operators/tensor_array_read_write_op.cc +++ b/paddle/operators/tensor_array_read_write_op.cc @@ -24,8 +24,9 @@ class WriteToArrayOp : public ArrayOp { const framework::AttributeMap &attrs) : ArrayOp(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &place) const override { + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { auto *x = scope.FindVar(Input("X")); if (x == nullptr) return; auto &x_tensor = x->Get(); @@ -122,8 +123,10 @@ class ReadFromArrayOp : public ArrayOp { const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) : ArrayOp(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &place) const override { + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { auto *x = scope.FindVar(Input("X")); PADDLE_ENFORCE(x != nullptr, "X must be set"); auto &x_array = x->Get(); diff --git a/paddle/operators/while_op.cc b/paddle/operators/while_op.cc index a744ebd615..06b0c77485 100644 --- a/paddle/operators/while_op.cc +++ b/paddle/operators/while_op.cc @@ -39,8 +39,9 @@ class WhileOp : public framework::OperatorBase { const framework::AttributeMap &attrs) : framework::OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &dev_place) const override { + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &dev_place) const override { PADDLE_ENFORCE_NOT_NULL(scope.FindVar(Input(kCondition))); auto &cond = scope.FindVar(Input(kCondition))->Get(); PADDLE_ENFORCE_EQ(cond.dims(), paddle::framework::make_ddim({1})); @@ -99,8 +100,9 @@ class WhileGradOp : public framework::OperatorBase { const framework::AttributeMap &attrs) : framework::OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope &scope, - const platform::Place &dev_place) const override { + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &dev_place) const override { // get device context from pool platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(dev_place); -- GitLab From 3cdb419b15b13cdf29803aef9e5b4fd28cca930e Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Fri, 9 Feb 2018 16:44:06 +0800 Subject: [PATCH 014/217] add doc for prior box --- python/paddle/v2/fluid/layers/nn.py | 158 ++++++++++++++++++++++++---- 1 file changed, 137 insertions(+), 21 deletions(-) diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index dc1839fd82..0d944c332b 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -22,7 +22,6 @@ from ..param_attr import ParamAttr from layer_function_generator import autodoc from tensor import concat import math -import numpy as np from operator import mul __all__ = [ @@ -3006,10 +3005,43 @@ def reshape_with_axis(input, axis): """ **ReshapeWithAxis Layer** - """ - assert len(input.shape) > axis and axis >= 0, ' ' + According to the axis to merge the adjacent dim of input. Currently, the axis of + reshape_with_axis must be a scalar. + + Args: + input(variable): The input tensor. + axis(list): According to the axis to merge the adjacent dim. + + Returns: + Variable: A tensor variable. + + Examples: + .. code-block:: python + + x = fluid.layers.data(name="data", shape=[3, 32, 32], dtype="float32") + reshaped = fluid.layers.reshape_with_axis(input=x, axis=2) + reshaped.shape + >> [-1, 1024] + reshaped = fluid.layers.reshape_with_axis(input=x, axis=[1,3]) + reshaped.shape + >> [-1, 96, 32] + """ + assert isinstance(axis, list), "axis should be list." + assert len(input.shape) > len( + axis), "the length of axis should be litter than input.shape's." input_shape = input.shape - new_dim = [-1, reduce(mul, input_shape[axis:len(input_shape)], 1)] + temp = 0 + for ax in axis: + assert ax < len(input.shape) and ax > 0, \ + 'The data of Axis should be between 1 and len(input.shape)' + assert ax > temp, 'Axis should be incremented sequence' + temp = ax + axis += [len(input.shape)] + + new_shape = [] + for i in range(len(axis) - 1): + new_shape += [reduce(mul, input_shape[axis[i]:axis[i + 1]], 1)] + new_shape = [-1] + new_shape helper = LayerHelper('reshape', **locals()) out = helper.create_tmp_variable(helper.input_dtype()) @@ -3017,14 +3049,28 @@ def reshape_with_axis(input, axis): type='reshape', inputs={'X': [input]}, outputs={'Out': [out]}, - attrs={'shape': new_dim}) + attrs={'shape': new_shape}) return out -def reshape(input, new_dim): +def reshape(input, new_shape): """ **Reshape Layer** + Reshape the shape of input according to new_dim. + + Args: + input(variable): The input tensor. + new_shape(list): The new shape of input. + + Returns: + Variable: A tensor variable. + + Examples: + .. code-block:: python + + x = fluid.layers.data(name="data", shape=[3, 32, 32], dtype="float32") + reshaped = fluid.layers.reshape(input=x, new_shape=[-1, 1024]) """ helper = LayerHelper('reshape', **locals()) out = helper.create_tmp_variable(helper.input_dtype()) @@ -3051,6 +3097,44 @@ def prior_box(input, """ **Prior_box** + Generate prior boxes for SSD(Single Shot MultiBox Detector) algorithm. + Each position of the input produce N prior boxes, N is determined by + the count of min_sizes, max_sizes and aspect_ratios, The size of the + box is in range(min_size, max_size) interval, which is generated in + sequence according to the aspect_ratios. + + Args: + input(variable): The input feature data of PriorBox, the layout is NCHW. + image(variable): The input image data of PriorBoxOp, the layout is NCHW. + min_sizes(list): the min sizes of generated prior boxes. + max_sizes(list): the max sizes of generated prior boxes. + aspect_ratios(list): the aspect ratios of generated prior boxes. + variance(list): the variances to be encoded in prior boxes. + flip(bool): Whether to flip aspect ratios. + clip(bool): Whether to clip out-of-boundary boxes. + step_w(list): Prior boxes step across width, 0 for auto calculation. + step_h(list): Prior boxes step across height, 0 for auto calculation. + offset(float): Prior boxes center offset. + name(str): Name of the prior box layer. + + Returns: + boxes(variable): the output prior boxes of PriorBoxOp. The layout is + [H, W, num_priors, 4]. H is the height of input, W is the width + of input, num_priors is the box count of each position. + Variances(variable): the expanded variances of PriorBoxOp. The layout + is [H, W, num_priors, 4]. H is the height of input, W is the width + of input, num_priors is the box count of each position. + + Examples: + .. code-block:: python + + data = fluid.layers.data(name="data", shape=[3, 32, 32], dtype="float32") + conv2d = fluid.layers.conv2d( + input=data, num_filters=2, filter_size=3) + box, var = fluid.layers.prior_box(conv2d, data, + min_size, max_size, aspect_ratio, + variance, flip, clip, + step_w, step_h, offset) """ helper = LayerHelper("prior_box", **locals()) dtype = helper.input_dtype() @@ -3093,19 +3177,51 @@ def prior_boxes(input_layers, name=None): """ **Prior_boxes** - e.g. - prior_boxes( - input_layers = [conv1, conv2, conv3, conv4, conv5, conv6], - image = data, - min_ratio = 0.2, - max_ratio = 0.9, - steps = [8., 16., 32., 64., 100., 300.], - aspect_ratios = [[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]], - min_dim = 300, - offset = 0.5, - variance = [0.1], - flip=True, - clip=True) + + Generate prior boxes for SSD(Single Shot MultiBox Detector) algorithm. + Each position of the input produce N prior boxes, N is determined by + the count of min_sizes, max_sizes and aspect_ratios, The size of the + box is in range(min_size, max_size) interval, which is generated in + sequence according to the aspect_ratios. + + Args: + input(list): The list of input variables, the format of all variables is NCHW. + image(variable): The input image data of PriorBoxOp, the layout is NCHW. + min_ratio(list): the min sizes of generated prior boxes. + max_ratio(list): the max sizes of generated prior boxes. + aspect_ratios(list): the aspect ratios of generated prior boxes. + min_dim(int): + step_w(list): Prior boxes step across width, 0 for auto calculation. + step_h(list): Prior boxes step across height, 0 for auto calculation. + offset(float): Prior boxes center offset. + variance(list): the variances to be encoded in prior boxes. + flip(bool): Whether to flip aspect ratios. + clip(bool): Whether to clip out-of-boundary boxes. + name(str): Name of the prior box layer. + + Returns: + boxes(variable): the output prior boxes of PriorBoxOp. The layout is + [num_priors, 4]. num_priors is the total box count of each + position of input_layers. + Variances(variable): the expanded variances of PriorBoxOp. The layout + is [num_priors, 4]. num_priors is the total box count of each + position of input_layers + + Examples: + .. code-block:: python + + prior_boxes( + input_layers = [conv1, conv2, conv3, conv4, conv5, conv6], + image = data, + min_ratio = 0.2, + max_ratio = 0.9, + steps = [8., 16., 32., 64., 100., 300.], + aspect_ratios = [[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]], + min_dim = 300, + offset = 0.5, + variance = [0.1,0.1,0.1,0.1], + flip=True, + clip=True) """ assert isinstance(input_layers, list), 'input_layer should be a list.' num_layer = len(input_layers) @@ -3168,8 +3284,8 @@ def prior_boxes(input_layers, reshaped_boxes = [] reshaped_vars = [] for i in range(len(box_results)): - reshaped_boxes += [reshape_with_axis(box_results[i], axis=axis)] - reshaped_vars += [reshape_with_axis(var_results[i], axis=axis)] + reshaped_boxes += [reshape_with_axis(box_results[i], axis=[axis])] + reshaped_vars += [reshape_with_axis(var_results[i], axis=[axis])] helper = LayerHelper("concat", **locals()) dtype = helper.input_dtype() -- GitLab From e0e545344a8212f62ab21f771d0693d3bcc80e45 Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Fri, 9 Feb 2018 20:16:55 +0800 Subject: [PATCH 015/217] refine the code --- paddle/operators/concat_op.h | 56 ++++-------------------------- paddle/operators/split_op.h | 29 +++------------- paddle/operators/strided_memcpy.h | 57 +++++++++++++++++++++++++++++++ 3 files changed, 68 insertions(+), 74 deletions(-) diff --git a/paddle/operators/concat_op.h b/paddle/operators/concat_op.h index 92ee8d3b18..2ee9912a3b 100644 --- a/paddle/operators/concat_op.h +++ b/paddle/operators/concat_op.h @@ -17,6 +17,7 @@ limitations under the License. */ #include #include "paddle/framework/ddim.h" #include "paddle/framework/op_registry.h" +#include "paddle/operators/strided_memcpy.h" namespace paddle { namespace operators { @@ -32,34 +33,13 @@ class ConcatKernel : public framework::OpKernel { out->mutable_data(place); auto out_stride = framework::stride_numel(out->dims()); - int64_t before = out_stride[0] / out_stride[axis]; - int64_t out_after = out_stride[axis]; size_t output_offset = 0; for (auto* in : ins) { auto in_stride = framework::stride_numel(in->dims()); - int64_t in_after = in_stride[axis]; - for (int64_t i = 0; i < before; ++i) { - if (platform::is_cpu_place(place)) { - auto& cpu_place = boost::get(place); - memory::Copy( - cpu_place, out->data() + output_offset + i * out_after, - cpu_place, in->data() + i * in_after, sizeof(T) * in_after); - } else { -#ifdef PADDLE_WITH_CUDA - auto& gpu_place = boost::get(place); - auto& cuda_ctx = - reinterpret_cast(dev_ctx); - memory::Copy(gpu_place, out->data() + - output_offset + i * out_after, - gpu_place, in->data() + i * in_after, - sizeof(T) * in_after, cuda_ctx.stream())); -#else - PADDLE_THROW("Paddle is not compiled with GPU"); -#endif - } - } - output_offset += in_after; + StridedNumelCopyWithAxis(ctx, axis, out->data() + output_offset, + out_stride, in->data(), in_stride); + output_offset += in_stride[axis]; } } }; @@ -73,35 +53,13 @@ class ConcatGradKernel : public framework::OpKernel { int64_t axis = static_cast(ctx.Attr("axis")); size_t input_offset = 0; auto in_stride = framework::stride_numel(in->dims()); - auto place = ctx.GetPlace(); - // numel before the specified axis - int64_t before = in_stride[0] / in_stride[axis]; - int64_t in_after = in_stride[axis]; for (auto& out : outs) { out->mutable_data(ctx.GetPlace()); auto out_stride = framework::stride_numel(out->dims()); - int64_t out_after = out_stride[axis]; - for (int64_t i = 0; i < before; ++i) { - if (platform::is_cpu_place(place)) { - auto& cpu_place = boost::get(place); - memory::Copy(cpu_place, out->data() + i * out_after, cpu_place, - in->data() + input_offset + i * in_after, - sizeof(T) * out_after); - } else { -#ifdef PADDLE_WITH_CUDA - auto& gpu_place = boost::get(place); - auto& cuda_ctx = - reinterpret_cast(dev_ctx); - memory::Copy(gpu_place, out->data() + i * out_after, gpu_place, - in->data() + input_offset + i * in_after, - sizeof(T) * out_after, cuda_ctx.stream()); -#else - PADDLE_THROW("Paddle is not compiled with GPU"); -#endif - } - } - input_offset += out_after; + StridedNumelCopyWithAxis(ctx, axis, out->data(), out_stride, + in->data() + input_offset, in_stride); + input_offset += out_stride[axis]; } } }; diff --git a/paddle/operators/split_op.h b/paddle/operators/split_op.h index 7fe9357eb5..e239c9cf30 100644 --- a/paddle/operators/split_op.h +++ b/paddle/operators/split_op.h @@ -18,6 +18,7 @@ limitations under the License. */ #include #include "paddle/framework/ddim.h" #include "paddle/framework/op_registry.h" +#include "paddle/operators/strided_memcpy.h" namespace paddle { namespace operators { @@ -26,41 +27,19 @@ template class SplitOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - // auto start = std::chrono::steady_clock::now(); auto* in = ctx.Input("X"); auto outs = ctx.MultiOutput("Out"); auto in_stride = framework::stride_numel(in->dims()); int64_t axis = static_cast(ctx.Attr("axis")); auto place = ctx.GetPlace(); - // numel before the specified axis - int64_t before = in_stride[0] / in_stride[axis]; - int64_t in_after = in_stride[axis]; size_t input_offset = 0; for (auto& out : outs) { out->mutable_data(ctx.GetPlace()); auto out_stride = framework::stride_numel(out->dims()); - int64_t out_after = out_stride[axis]; - for (int64_t i = 0; i < before; ++i) { - if (platform::is_cpu_place(place)) { - auto& cpu_place = boost::get(place); - memory::Copy(cpu_place, out->data() + i * out_after, cpu_place, - in->data() + input_offset + i * in_after, - sizeof(T) * out_after); - } else { -#ifdef PADDLE_WITH_CUDA - auto& gpu_place = boost::get(place); - auto& cuda_ctx = - reinterpret_cast(dev_ctx); - memory::Copy(gpu_place, out->data() + i * out_after, gpu_place, - in->data() + input_offset + i * in_after, - sizeof(T) * out_after, cuda_ctx.stream()); -#else - PADDLE_THROW("Paddle is not compiled with GPU"); -#endif - } - } - input_offset += out_after; + StridedNumelCopyWithAxis(ctx, axis, out->data(), out_stride, + in->data() + input_offset, in_stride); + input_offset += out_stride[axis]; } } }; diff --git a/paddle/operators/strided_memcpy.h b/paddle/operators/strided_memcpy.h index 735cabcd97..49795db91d 100644 --- a/paddle/operators/strided_memcpy.h +++ b/paddle/operators/strided_memcpy.h @@ -41,5 +41,62 @@ inline void StridedMemcpy(const platform::DeviceContext& dev_ctx, const T* src, StridedCopyDimVisitor func(dev_ctx, src, src_stride, dst_stride, dst); boost::apply_visitor(func, dst_dim); } + +// Strided numel memory copy from src to dst by the specified axis +// +// For example, for a tensor dims [4, 20, 100], the strieded numel is +// [8000, 2000, 100] +// +// NOTE: The src and dst tensor should have the same elements +// except the specified axis. +template +inline void StridedNumelCopyWithAxis(const framework::ExecutionContext& ctx, + int64_t axis, T* dst, + const framework::DDim& dst_stride_numel, + const T* src, + const framework::DDim& src_stride_numel) { + int64_t before = dst_stride_numel[0] / dst_stride_numel[axis]; + int64_t src_after = src_stride_numel[axis]; + int64_t dst_after = dst_stride_numel[axis]; + auto place = ctx.GetPlace(); + + PADDLE_ENFORCE_EQ(src_stride_numel.size(), dst_stride_numel.size(), + "src and dst tensor should have the same dims size."); + + for (int64_t i = 0; i < axis; ++i) { + if (i < axis) { + PADDLE_ENFORCE_EQ(src_stride_numel[i] / src_stride_numel[axis], + dst_stride_numel[i] / dst_stride_numel[axis], + "src and dst should have the same elements " + "except the specified axis."); + } else if (i == axis) { + continue; + } else { + PADDLE_ENFORCE_EQ(src_stride_numel[i], dst_stride_numel[i], + "src and dst should have the same elements " + "except the specified axis."); + } + } + + for (int64_t i = 0; i < before; ++i) { + if (platform::is_cpu_place(place)) { + auto& cpu_place = boost::get(place); + memory::Copy(cpu_place, dst + i * dst_after, cpu_place, + src + i * src_after, sizeof(T) * src_after); + } else { +#ifdef PADDLE_WITH_CUDA + auto& gpu_place = boost::get(place); + auto& cuda_ctx = + reinterpret_cast(ctx); + memory::Copy(cpu_place, dst + i * dst_after, cpu_place, + src + i * src_after, sizeof(T) * src_after, + cuda_ctx.stream()); +#else + PADDLE_THROW("Paddle is not compiled with GPU"); +#endif + } + } +} + } // namespace operators } // namespace paddle -- GitLab From b368f13ef4cbbec0699cb9f2c44b10339039146e Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Fri, 9 Feb 2018 20:41:49 +0800 Subject: [PATCH 016/217] Fix output dims of sequence expand op --- paddle/operators/sequence_expand_op.cc | 4 +++- .../paddle/v2/fluid/tests/test_sequence_expand.py | 15 +++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/paddle/operators/sequence_expand_op.cc b/paddle/operators/sequence_expand_op.cc index d34dbd35b6..d2a386ffbe 100644 --- a/paddle/operators/sequence_expand_op.cc +++ b/paddle/operators/sequence_expand_op.cc @@ -29,7 +29,9 @@ class SequenceExpandOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasOutput("Out")); PADDLE_ENFORCE(ctx->HasInput("Y")); framework::DDim out_dim; - out_dim = ctx->GetInputDim("Y"); + auto y_dim = ctx->GetInputDim("Y"); + out_dim = ctx->GetInputDim("X"); + out_dim[0] = y_dim[0]; ctx->ShareLoD("Y", "Out"); ctx->SetOutputDim("Out", out_dim); } diff --git a/python/paddle/v2/fluid/tests/test_sequence_expand.py b/python/paddle/v2/fluid/tests/test_sequence_expand.py index 6fc045125f..0d37751de4 100644 --- a/python/paddle/v2/fluid/tests/test_sequence_expand.py +++ b/python/paddle/v2/fluid/tests/test_sequence_expand.py @@ -73,5 +73,20 @@ class TestSequenceExpandCase3(TestSequenceExpand): self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)} +class TestSequenceExpandCase4(TestSequenceExpand): + def set_data(self): + x_data = np.array( + [0.1, 0.3, 0.2, 0.15, 0.25, 0.2, 0.15, 0.25, 0.1, 0.3]).reshape( + [2, 5]).astype('float32') + x_lod = [[ + 0, + 1, + 2, + ]] + y_data = np.random.uniform(0.1, 1, [2, 1]).astype('float32') + y_lod = [[0, 1, 2], [0, 1, 2]] + self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)} + + if __name__ == '__main__': unittest.main() -- GitLab From 1c91574bbdf5b7a46ad5cba35908354b178bf0e4 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Fri, 9 Feb 2018 21:35:14 +0000 Subject: [PATCH 017/217] backward insert callback pass compile --- python/paddle/v2/fluid/backward.py | 46 ++++++++++++++++++++++++++++-- 1 file changed, 44 insertions(+), 2 deletions(-) diff --git a/python/paddle/v2/fluid/backward.py b/python/paddle/v2/fluid/backward.py index 29243c90e8..2946ef1967 100644 --- a/python/paddle/v2/fluid/backward.py +++ b/python/paddle/v2/fluid/backward.py @@ -199,6 +199,47 @@ def _remove_no_grad_branch_(op_descs, no_grad_set): return op_descs +def _callback_lookup_(op): + """ + Only used in _append_backward_ops_ + Build and returns a callback function for certain op. For example + + parallel_do: AllReduce + + :param op: + :return: callback function + """ + print(op.type) + if op.type == 'parallel_do': + param_names = set(op.input('parameters')) + param_grad_names = [n + "@GRAD" for n in param_names] + + class ParallelDoCallBack(object): + def __init__(self, param_grad_names): + self.has_inserted_nccl_init = False + self.param_grad_names = param_grad_names + + def __call__(self, block, context): + # TODO(tonyyang-svail): insert nccl init + + for o_param in context.output_names(): + for o_argu in context.output(o_param): + if o_argu in self.param_grad_names: + print("reduce", o_argu) + op_desc = block.desc.append_op() + framework.Operator( + block, + type='fill_constant', + desc=op_desc, + inputs={}, + attrs={'shape': [1], }, + outputs={'Out': [block.create_var()]}) + + return ParallelDoCallBack(param_grad_names) + else: + return None + + def _append_backward_ops_(block, ops, target_block, @@ -239,7 +280,8 @@ def _append_backward_ops_(block, sub_block = program.block(op.block_attr("sub_block")) grad_sub_block = program.create_block(parent_idx=sub_block.idx) _append_backward_ops_(sub_block, sub_block.ops, grad_sub_block, - no_grad_dict, grad_to_var) + no_grad_dict, grad_to_var, + _callback_lookup_(op)) grad_sub_block_list.append(grad_sub_block.desc) # Getting op's corresponding grad_op @@ -258,7 +300,7 @@ def _append_backward_ops_(block, for op_desc in grad_op_descs: new_op_desc = target_block.desc.append_op() new_op_desc.copy_from(op_desc) - callback(block=target_block, context=grad_to_var) + callback(block=target_block, context=new_op_desc) def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map): -- GitLab From 672cdc21a0e95a59590be1b6be376f73ad9ba116 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Fri, 9 Feb 2018 21:52:48 +0000 Subject: [PATCH 018/217] add nccl --- CMakeLists.txt | 1 - paddle/framework/executor.cc | 6 ++++-- paddle/framework/framework.proto | 1 + paddle/operators/nccl_op.cc | 16 ++++++++-------- paddle/platform/CMakeLists.txt | 2 +- paddle/platform/dynload/CMakeLists.txt | 2 +- paddle/pybind/protobuf.cc | 3 ++- paddle/scripts/docker/build.sh | 5 ++++- 8 files changed, 21 insertions(+), 15 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3a21574b85..37556a37a0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -141,7 +141,6 @@ include(external/boost) # download boost include(external/any) # download libn::any include(external/eigen) # download eigen3 include(external/pybind11) # download pybind11 -include(external/nccl) include(external/cares) include(external/grpc) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 2a88e5a929..c604fdcc7b 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -23,6 +23,7 @@ limitations under the License. */ #include "paddle/framework/lod_tensor_array.h" #include "paddle/framework/op_registry.h" #include "paddle/framework/reader.h" +#include "paddle/operators/nccl/nccl_gpu_common.h" // platform::Communicator #include "paddle/platform/place.h" #include "paddle/platform/profiler.h" @@ -53,6 +54,8 @@ static void CreateTensor(Variable* var, proto::VarDesc::VarType var_type) { var->GetMutable(); } else if (var_type == proto::VarDesc::PLACE_LIST) { var->GetMutable(); + } else if (var_type == proto::VarDesc::NCCL_COM) { + var->GetMutable(); } else if (var_type == proto::VarDesc::READER) { var->GetMutable(); } else { @@ -118,13 +121,12 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id, for (auto& op_desc : block.AllOps()) { auto op = paddle::framework::OpRegistry::CreateOp(*op_desc); - VLOG(4) << op->DebugStringEx(local_scope); + VLOG(3) << op->DebugStringEx(local_scope); platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); platform::RecordEvent record_event(op->Type(), pool.Get(place_)); op->Run(*local_scope, place_); - VLOG(3) << op->DebugStringEx(local_scope); if (FLAGS_benchmark) { VLOG(2) << "Memory used after operator " + op->Type() + " running: " << memory::memory_usage(place_); diff --git a/paddle/framework/framework.proto b/paddle/framework/framework.proto index d7be1a7352..1e3db1a3ba 100644 --- a/paddle/framework/framework.proto +++ b/paddle/framework/framework.proto @@ -129,6 +129,7 @@ message VarDesc { LOD_TENSOR_ARRAY = 7; PLACE_LIST = 8; READER = 9; + NCCL_COM = 10; } required string name = 1; required VarType type = 2; diff --git a/paddle/operators/nccl_op.cc b/paddle/operators/nccl_op.cc index 9d51153b06..83ac67f353 100644 --- a/paddle/operators/nccl_op.cc +++ b/paddle/operators/nccl_op.cc @@ -31,8 +31,13 @@ class NCCLInitOp : public framework::OperatorBase { const auto &name = Output("Communicator"); PADDLE_ENFORCE_NOT_NULL(scope.FindVar(name), "Can not find variable '%s' in the scope.", name); - std::vector gpus = Attr>("gpus"); - PADDLE_ENFORCE(!gpus.empty(), "Attr(gpus) should not be empty."); + + int count = platform::GetCUDADeviceCount(); + std::vector gpus(count); + for (int i = 0; i < count; ++i) { + gpus[i] = i; + } + PADDLE_ENFORCE(!gpus.empty(), "NCCL init with 0 gpus."); if (scope.FindVar(name) == nullptr) { PADDLE_THROW("Output(Communicator) is needed for ncclInit operator."); @@ -50,11 +55,6 @@ class NCCLInitOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddOutput("Communicator", "Create Communicator for communicating between gpus"); - AddAttr>("gpus", "(vector) GPU id lists"); - AddAttr("dtype", - "(int, default 5 (FP32)) " - "Output data type") - .SetDefault(framework::proto::DataType::FP32); AddComment(R"DOC( NCCLInit Operator. @@ -77,7 +77,7 @@ class NCCLAllReduceOp : public framework::OperatorWithKernel { ctx->HasInput("Communicator"), " Input(Communicator) of AllReduce op input should not be NULL"); PADDLE_ENFORCE(ctx->HasOutput("Out"), - " Input(X) of AllReduce op input should not be NULL"); + " Output(Out) of AllReduce op output should not be NULL"); auto x_dims = ctx->GetInputsDim("X"); diff --git a/paddle/platform/CMakeLists.txt b/paddle/platform/CMakeLists.txt index 5ce4b3de39..b91fd4cf54 100644 --- a/paddle/platform/CMakeLists.txt +++ b/paddle/platform/CMakeLists.txt @@ -1,5 +1,5 @@ if(WITH_GPU) - cc_library(enforce SRCS enforce.cc DEPS nccl) + cc_library(enforce SRCS enforce.cc DEPS) else() cc_library(enforce SRCS enforce.cc) endif() diff --git a/paddle/platform/dynload/CMakeLists.txt b/paddle/platform/dynload/CMakeLists.txt index cf2081b434..264b4ebf2c 100644 --- a/paddle/platform/dynload/CMakeLists.txt +++ b/paddle/platform/dynload/CMakeLists.txt @@ -1,4 +1,4 @@ cc_library(dynamic_loader SRCS dynamic_loader.cc DEPS glog gflags enforce) nv_library(dynload_cuda SRCS cublas.cc cudnn.cc curand.cc nccl.cc - DEPS dynamic_loader nccl) + DEPS dynamic_loader) cc_library(dynload_warpctc SRCS warpctc.cc DEPS dynamic_loader warpctc) diff --git a/paddle/pybind/protobuf.cc b/paddle/pybind/protobuf.cc index 0a92e10927..02aeae8b3d 100644 --- a/paddle/pybind/protobuf.cc +++ b/paddle/pybind/protobuf.cc @@ -241,7 +241,8 @@ void BindVarDsec(py::module &m) { .value("LOD_RANK_TABLE", proto::VarDesc::LOD_RANK_TABLE) .value("LOD_TENSOR_ARRAY", proto::VarDesc::LOD_TENSOR_ARRAY) .value("PLACE_LIST", proto::VarDesc::PLACE_LIST) - .value("READER", proto::VarDesc::READER); + .value("READER", proto::VarDesc::READER) + .value("NCCL_COM", proto::VarDesc::NCCL_COM); } void BindOpDesc(py::module &m) { diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index ba496db5f8..26ecb128eb 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -34,6 +34,7 @@ function cmake_gen() { Configuring cmake in /paddle/build ... -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE:-Release} ${PYTHON_FLAGS} + -DWITH_DSO=ON -DWITH_DOC=OFF -DWITH_GPU=${WITH_GPU:-OFF} -DWITH_DISTRIBUTE=${WITH_DISTRIBUTE:-OFF} @@ -57,6 +58,7 @@ EOF cmake .. \ -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE:-Release} \ ${PYTHON_FLAGS} \ + -DWITH_DSO=ON \ -DWITH_DOC=OFF \ -DWITH_GPU=${WITH_GPU:-OFF} \ -DWITH_DISTRIBUTE=${WITH_DISTRIBUTE:-OFF} \ @@ -173,7 +175,7 @@ EOF if [[ ${WITH_GPU} == "ON" ]]; then NCCL_DEPS="apt-get install -y libnccl-dev &&" else - NCCL_DEPS="" + NCCL_DEPS="" fi cat >> /paddle/build/Dockerfile < Date: Fri, 9 Feb 2018 21:53:45 +0000 Subject: [PATCH 019/217] disable ncclInit infer shape & var type --- python/paddle/v2/fluid/framework.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/v2/fluid/framework.py b/python/paddle/v2/fluid/framework.py index a12427258e..36f20d9155 100644 --- a/python/paddle/v2/fluid/framework.py +++ b/python/paddle/v2/fluid/framework.py @@ -490,7 +490,7 @@ class Operator(object): 'feed', 'fetch', 'save', 'load', 'recurrent', 'rnn_memory_helper_grad', 'conditional_block', 'while', 'send', 'recv', 'listen_and_serv', 'parallel_do', 'save_combine', - 'load_combine' + 'load_combine', 'ncclInit' } if type not in no_kernel_op_set: self.desc.infer_var_type(self.block.desc) -- GitLab From 3498434bccaf65fdb8cf59ccc07d2a6900d38188 Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Sat, 10 Feb 2018 10:17:40 +0800 Subject: [PATCH 020/217] fix ci --- paddle/operators/concat_op.h | 10 ++++++---- paddle/operators/split_op.h | 5 +++-- paddle/operators/strided_memcpy.h | 4 ++-- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/paddle/operators/concat_op.h b/paddle/operators/concat_op.h index 2ee9912a3b..ae10b81ea7 100644 --- a/paddle/operators/concat_op.h +++ b/paddle/operators/concat_op.h @@ -37,8 +37,9 @@ class ConcatKernel : public framework::OpKernel { size_t output_offset = 0; for (auto* in : ins) { auto in_stride = framework::stride_numel(in->dims()); - StridedNumelCopyWithAxis(ctx, axis, out->data() + output_offset, - out_stride, in->data(), in_stride); + StridedNumelCopyWithAxis(ctx.device_context(), axis, + out->data() + output_offset, out_stride, + in->data(), in_stride); output_offset += in_stride[axis]; } } @@ -57,8 +58,9 @@ class ConcatGradKernel : public framework::OpKernel { for (auto& out : outs) { out->mutable_data(ctx.GetPlace()); auto out_stride = framework::stride_numel(out->dims()); - StridedNumelCopyWithAxis(ctx, axis, out->data(), out_stride, - in->data() + input_offset, in_stride); + StridedNumelCopyWithAxis(ctx.device_context(), axis, out->data(), + out_stride, in->data() + input_offset, + in_stride); input_offset += out_stride[axis]; } } diff --git a/paddle/operators/split_op.h b/paddle/operators/split_op.h index e239c9cf30..b956808ef9 100644 --- a/paddle/operators/split_op.h +++ b/paddle/operators/split_op.h @@ -37,8 +37,9 @@ class SplitOpKernel : public framework::OpKernel { for (auto& out : outs) { out->mutable_data(ctx.GetPlace()); auto out_stride = framework::stride_numel(out->dims()); - StridedNumelCopyWithAxis(ctx, axis, out->data(), out_stride, - in->data() + input_offset, in_stride); + StridedNumelCopyWithAxis(ctx.device_context(), axis, out->data(), + out_stride, in->data() + input_offset, + in_stride); input_offset += out_stride[axis]; } } diff --git a/paddle/operators/strided_memcpy.h b/paddle/operators/strided_memcpy.h index 49795db91d..ddecfd76dd 100644 --- a/paddle/operators/strided_memcpy.h +++ b/paddle/operators/strided_memcpy.h @@ -50,7 +50,7 @@ inline void StridedMemcpy(const platform::DeviceContext& dev_ctx, const T* src, // NOTE: The src and dst tensor should have the same elements // except the specified axis. template -inline void StridedNumelCopyWithAxis(const framework::ExecutionContext& ctx, +inline void StridedNumelCopyWithAxis(const platform::DeviceContext& ctx, int64_t axis, T* dst, const framework::DDim& dst_stride_numel, const T* src, @@ -88,7 +88,7 @@ inline void StridedNumelCopyWithAxis(const framework::ExecutionContext& ctx, auto& gpu_place = boost::get(place); auto& cuda_ctx = reinterpret_cast(ctx); - memory::Copy(cpu_place, dst + i * dst_after, cpu_place, + memory::Copy(gpu_place, dst + i * dst_after, gpu_place, src + i * src_after, sizeof(T) * src_after, cuda_ctx.stream()); #else -- GitLab From f2129b193e91094b5e2a9faf8207599d3f2abd41 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Sat, 10 Feb 2018 02:35:45 +0000 Subject: [PATCH 021/217] pass run time --- python/paddle/v2/fluid/backward.py | 57 ++++++++++++++++++++++-------- 1 file changed, 43 insertions(+), 14 deletions(-) diff --git a/python/paddle/v2/fluid/backward.py b/python/paddle/v2/fluid/backward.py index 2946ef1967..34383827fd 100644 --- a/python/paddle/v2/fluid/backward.py +++ b/python/paddle/v2/fluid/backward.py @@ -199,6 +199,15 @@ def _remove_no_grad_branch_(op_descs, no_grad_set): return op_descs +import proto.framework_pb2 as framework_pb2 + + +def serialize_op_decs(op_desc): + protostr = op_desc.serialize_to_string() + proto = framework_pb2.OpDesc.FromString(str(protostr)) + return proto.__str__() + + def _callback_lookup_(op): """ Only used in _append_backward_ops_ @@ -209,7 +218,6 @@ def _callback_lookup_(op): :param op: :return: callback function """ - print(op.type) if op.type == 'parallel_do': param_names = set(op.input('parameters')) param_grad_names = [n + "@GRAD" for n in param_names] @@ -220,20 +228,38 @@ def _callback_lookup_(op): self.param_grad_names = param_grad_names def __call__(self, block, context): - # TODO(tonyyang-svail): insert nccl init - - for o_param in context.output_names(): - for o_argu in context.output(o_param): + # move to parallel_do.py + # # TODO(tonyyang-svail): insert nccl init + if not self.has_inserted_nccl_init: + global_block = block.program.global_block() + op_desc = global_block.desc.append_op() + var_desc = global_block.desc.var('nccl_com') + var_desc.set_type(core.VarDesc.VarType.NCCL_COM) + self.nccl_com = global_block.create_var( + name='nccl_com', type=core.VarDesc.VarType.NCCL_COM) + framework.Operator( + global_block, + type='ncclInit', + desc=op_desc, + inputs={}, + outputs={'Communicator': [self.nccl_com]}) + self.has_inserted_nccl_init = True + + current_op_desc = context["__current_op_desc__"] + # print(serialize_op_decs(context)) + for o_param in current_op_desc.output_names(): + for o_argu in current_op_desc.output(o_param): if o_argu in self.param_grad_names: - print("reduce", o_argu) + # print("reduce", o_argu) op_desc = block.desc.append_op() - framework.Operator( - block, - type='fill_constant', - desc=op_desc, - inputs={}, - attrs={'shape': [1], }, - outputs={'Out': [block.create_var()]}) + op_desc.set_type("ncclAllReduce") + op_desc.set_input("X", [o_argu]) + # FIXME(tonyyang-svail): + # Looks like nccl_com has been changed to nccl_com_0 + op_desc.set_input("Communicator", ['nccl_com_0']) + out_var = block.create_var() + op_desc.set_output("Out", [out_var.name]) + op_desc.set_attr("reduction", "ncclSum") return ParallelDoCallBack(param_grad_names) else: @@ -300,7 +326,8 @@ def _append_backward_ops_(block, for op_desc in grad_op_descs: new_op_desc = target_block.desc.append_op() new_op_desc.copy_from(op_desc) - callback(block=target_block, context=new_op_desc) + grad_to_var["__current_op_desc__"] = new_op_desc + callback(block=target_block, context=grad_to_var) def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map): @@ -336,6 +363,8 @@ def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map): continue grad_info_map[grad_to_var[grad_var_name]] = (grad_var_name, block) # infer_shape and infer_type + if op_desc.type() == 'ncclInit': + continue op_desc.infer_var_type(block.desc) op_desc.infer_shape(block.desc) for arg in op_desc.output_arg_names(): -- GitLab From 0815c0f141d1df2088ed3c5a5391662bb4484e3d Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Sat, 10 Feb 2018 03:16:02 +0000 Subject: [PATCH 022/217] add assign op --- python/paddle/v2/fluid/backward.py | 36 ++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/python/paddle/v2/fluid/backward.py b/python/paddle/v2/fluid/backward.py index 34383827fd..40c54bf220 100644 --- a/python/paddle/v2/fluid/backward.py +++ b/python/paddle/v2/fluid/backward.py @@ -228,8 +228,6 @@ def _callback_lookup_(op): self.param_grad_names = param_grad_names def __call__(self, block, context): - # move to parallel_do.py - # # TODO(tonyyang-svail): insert nccl init if not self.has_inserted_nccl_init: global_block = block.program.global_block() op_desc = global_block.desc.append_op() @@ -250,16 +248,30 @@ def _callback_lookup_(op): for o_param in current_op_desc.output_names(): for o_argu in current_op_desc.output(o_param): if o_argu in self.param_grad_names: - # print("reduce", o_argu) - op_desc = block.desc.append_op() - op_desc.set_type("ncclAllReduce") - op_desc.set_input("X", [o_argu]) - # FIXME(tonyyang-svail): - # Looks like nccl_com has been changed to nccl_com_0 - op_desc.set_input("Communicator", ['nccl_com_0']) - out_var = block.create_var() - op_desc.set_output("Out", [out_var.name]) - op_desc.set_attr("reduction", "ncclSum") + # # print("reduce", o_argu) + # op_desc = block.desc.append_op() + # op_desc.set_type("ncclAllReduce") + # op_desc.set_input("X", [o_argu]) + # + # # FIXME(tonyyang-svail): + # # Looks like nccl_com has been changed to nccl_com_0 + # op_desc.set_input("Communicator", ['nccl_com_0']) + # out_var = block.create_var() + # op_desc.set_output("Out", [out_var.name]) + # op_desc.set_attr("reduction", "ncclSum") + allreduce_out_name = o_argu + "__nccl_all_reduce__" + op_desc = _create_op_desc_( + "ncclAllReduce", { + "X": [o_argu], + "Communicator": ['nccl_com_0'] + }, {"Out": [allreduce_out_name]}, + {"reduction": "ncclSum"}) + block.desc.append_op().copy_from(op_desc) + + op_desc = _create_op_desc_( + "assign", {"X": [allreduce_out_name]}, + {"Out": [o_argu]}, {}) + block.desc.append_op().copy_from(op_desc) return ParallelDoCallBack(param_grad_names) else: -- GitLab From 4a8559c0ccf237ec09c6434accc6e2b76a5e4d06 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Fri, 9 Feb 2018 20:26:25 +0800 Subject: [PATCH 023/217] follow comments and code refine --- paddle/operators/prior_box_op.cc | 8 +- python/paddle/v2/fluid/layers/nn.py | 153 +++++++++++++--------------- 2 files changed, 72 insertions(+), 89 deletions(-) diff --git a/paddle/operators/prior_box_op.cc b/paddle/operators/prior_box_op.cc index 82b4eb1528..064543c2b4 100644 --- a/paddle/operators/prior_box_op.cc +++ b/paddle/operators/prior_box_op.cc @@ -51,11 +51,11 @@ class PriorBoxOp : public framework::OperatorWithKernel { if (max_sizes.size() > 0) { PADDLE_ENFORCE_EQ(max_sizes.size(), min_sizes.size(), "The number of min_size and max_size must be equal."); - for (size_t i = 0; i < min_sizes.size(); ++i) { + num_priors += max_sizes.size(); + for (size_t i = 0; i < max_sizes.size(); ++i) { PADDLE_ENFORCE_GT(max_sizes[i], min_sizes[i], "max_size[%d] must be greater than min_size[%d].", i, i); - num_priors += 1; } } @@ -125,13 +125,13 @@ class PriorBoxOpMaker : public framework::OpProtoAndCheckerMaker { .SetDefault(true); AddAttr("step_w", - "Prior boxes step across width, 0 for auto calculation.") + "Prior boxes step across width, 0.0 for auto calculation.") .SetDefault(0.0) .AddCustomChecker([](const float& step_w) { PADDLE_ENFORCE_GE(step_w, 0.0, "step_w should be larger than 0."); }); AddAttr("step_h", - "Prior boxes step across height, 0 for auto calculation.") + "Prior boxes step across height, 0.0 for auto calculation.") .SetDefault(0.0) .AddCustomChecker([](const float& step_h) { PADDLE_ENFORCE_GE(step_h, 0.0, "step_h should be larger than 0."); diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index b1b3da46b9..f0bcddaf9a 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -66,7 +66,6 @@ __all__ = [ 'nce', 'beam_search', 'row_conv', - 'reshape', 'reshape_with_axis', 'multiplex', 'prior_box', @@ -3103,12 +3102,11 @@ def reshape_with_axis(input, axis): """ **ReshapeWithAxis Layer** - According to the axis to merge the adjacent dim of input. Currently, the axis of - reshape_with_axis must be a scalar. + ReshapeWithAxis is used to merge adjacent dimensions according to axis. Args: input(variable): The input tensor. - axis(list): According to the axis to merge the adjacent dim. + axis(list): The axis which is used to merge the adjacent dimensions. Returns: Variable: A tensor variable. @@ -3117,7 +3115,7 @@ def reshape_with_axis(input, axis): .. code-block:: python x = fluid.layers.data(name="data", shape=[3, 32, 32], dtype="float32") - reshaped = fluid.layers.reshape_with_axis(input=x, axis=2) + reshaped = fluid.layers.reshape_with_axis(input=x, axis=[2]) reshaped.shape >> [-1, 1024] reshaped = fluid.layers.reshape_with_axis(input=x, axis=[1,3]) @@ -3151,46 +3149,17 @@ def reshape_with_axis(input, axis): return out -def reshape(input, new_shape): - """ - **Reshape Layer** - - Reshape the shape of input according to new_dim. - - Args: - input(variable): The input tensor. - new_shape(list): The new shape of input. - - Returns: - Variable: A tensor variable. - - Examples: - .. code-block:: python - - x = fluid.layers.data(name="data", shape=[3, 32, 32], dtype="float32") - reshaped = fluid.layers.reshape(input=x, new_shape=[-1, 1024]) - """ - helper = LayerHelper('reshape', **locals()) - out = helper.create_tmp_variable(helper.input_dtype()) - helper.append_op( - type='reshape', - inputs={'X': [input]}, - outputs={'Out': [out]}, - attrs={'shape': new_dim}) - return out - - def prior_box(input, image, min_sizes, max_sizes, aspect_ratios, variance, - flip, - clip, - step_w, - step_h, - offset, + flip=False, + clip=False, + step_w=0.0, + step_h=0.0, + offset=0.5, name=None): """ **Prior_box** @@ -3202,27 +3171,33 @@ def prior_box(input, sequence according to the aspect_ratios. Args: - input(variable): The input feature data of PriorBox, the layout is NCHW. - image(variable): The input image data of PriorBoxOp, the layout is NCHW. + input(variable): The input feature data of PriorBox, + the layout is NCHW. + image(variable): The input image data of PriorBox, the + layout is NCHW. min_sizes(list): the min sizes of generated prior boxes. max_sizes(list): the max sizes of generated prior boxes. aspect_ratios(list): the aspect ratios of generated prior boxes. variance(list): the variances to be encoded in prior boxes. - flip(bool): Whether to flip aspect ratios. - clip(bool): Whether to clip out-of-boundary boxes. - step_w(list): Prior boxes step across width, 0 for auto calculation. - step_h(list): Prior boxes step across height, 0 for auto calculation. - offset(float): Prior boxes center offset. - name(str): Name of the prior box layer. + flip(bool, optional, default=False): Whether to flip aspect ratios. + clip(bool, optional, default=False)): Whether to clip + out-of-boundary boxes. + step_w(int, optional, default=0.0): Prior boxes step across + width, 0.0 for auto calculation. + step_h(int, optional, default=0.0): Prior boxes step across + height, 0.0 for auto calculation. + offset(float, optional, default=0.5): Prior boxes center offset. + name(str, optional, default=None): Name of the prior box layer. Returns: boxes(variable): the output prior boxes of PriorBoxOp. The layout is [H, W, num_priors, 4]. H is the height of input, W is the width - of input, num_priors is the box count of each position. + of input, num_priors is the box count of each position. Where num_priors = + len(aspect_ratios) * len(min_sizes) + len(max_sizes) Variances(variable): the expanded variances of PriorBoxOp. The layout is [H, W, num_priors, 4]. H is the height of input, W is the width - of input, num_priors is the box count of each position. - + of input, num_priors is the box count of each position. Where num_priors = + len(aspect_ratios) * len(min_sizes) + len(max_sizes) Examples: .. code-block:: python @@ -3259,70 +3234,78 @@ def prior_box(input, return box, var -def prior_boxes(input_layers, +def prior_boxes(inputs, image, min_ratio, max_ratio, aspect_ratios, - min_dim, + base_size, steps=None, step_w=None, step_h=None, offset=0.5, variance=[0.1, 0.1, 0.1, 0.1], - flip=True, - clip=True, + flip=False, + clip=False, name=None): """ **Prior_boxes** Generate prior boxes for SSD(Single Shot MultiBox Detector) algorithm. - Each position of the input produce N prior boxes, N is determined by - the count of min_sizes, max_sizes and aspect_ratios, The size of the - box is in range(min_size, max_size) interval, which is generated in + Each position of the inputs produces many prior boxes respectly, the number + of prior boxes which is produced by inputs respectly is determined by + the count of min_ratio, max_ratio and aspect_ratios, The size of the + box is in range(min_ratio, max_ratio) interval, which is generated in sequence according to the aspect_ratios. Args: - input(list): The list of input variables, the format of all variables is NCHW. + inputs(list): The list of input variables, the format of all variables is NCHW. image(variable): The input image data of PriorBoxOp, the layout is NCHW. - min_ratio(list): the min sizes of generated prior boxes. - max_ratio(list): the max sizes of generated prior boxes. + min_ratio(int): the min ratio of generated prior boxes. + max_ratio(int): the max ratio of generated prior boxes. aspect_ratios(list): the aspect ratios of generated prior boxes. - min_dim(int): - step_w(list): Prior boxes step across width, 0 for auto calculation. - step_h(list): Prior boxes step across height, 0 for auto calculation. - offset(float): Prior boxes center offset. - variance(list): the variances to be encoded in prior boxes. - flip(bool): Whether to flip aspect ratios. - clip(bool): Whether to clip out-of-boundary boxes. - name(str): Name of the prior box layer. + The length of input and aspect_ratios must be equal. + base_size(int): the base_size is used to get min_size and max_size + according to min_ratio and max_ratio. + step_w(list, optional, default=None): Prior boxes step across width. + If step_w[i] == 0.0, the prior boxes step across width of the inputs[i] + will be automatically calculated. + step_h(list, optional, default=None): Prior boxes step across height, + If step_h[i] == 0.0, the prior boxes step across height of the inputs[i] + will be automatically calculated. + offset(float, optional, default=0.5): Prior boxes center offset. + variance(list, optional, default=[0.1, 0.1, 0.1, 0.1]): the variances + to be encoded in prior boxes. + flip(bool, optional, default=False): Whether to flip aspect ratios. + clip(bool, optional, default=False): Whether to clip out-of-boundary boxes. + name(str, optional, None): Name of the prior box layer. Returns: boxes(variable): the output prior boxes of PriorBoxOp. The layout is [num_priors, 4]. num_priors is the total box count of each - position of input_layers. + position of inputs. Variances(variable): the expanded variances of PriorBoxOp. The layout is [num_priors, 4]. num_priors is the total box count of each - position of input_layers + position of inputs Examples: .. code-block:: python prior_boxes( - input_layers = [conv1, conv2, conv3, conv4, conv5, conv6], + inputs = [conv1, conv2, conv3, conv4, conv5, conv6], image = data, - min_ratio = 0.2, - max_ratio = 0.9, + min_ratio = 20, # 0.20 + max_ratio = 90, # 0.90 steps = [8., 16., 32., 64., 100., 300.], aspect_ratios = [[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]], - min_dim = 300, + base_size = 300, offset = 0.5, variance = [0.1,0.1,0.1,0.1], flip=True, clip=True) """ - assert isinstance(input_layers, list), 'input_layer should be a list.' - num_layer = len(input_layers) + assert isinstance(inputs, list), 'inputs should be a list.' + num_layer = len(inputs) assert num_layer > 2 # TODO(zcd): currently, num_layer must be bigger than two. min_sizes = [] @@ -3330,30 +3313,30 @@ def prior_boxes(input_layers, if num_layer > 2: step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2))) for ratio in xrange(min_ratio, max_ratio + 1, step): - min_sizes.append(min_dim * ratio / 100.) - max_sizes.append(min_dim * (ratio + step) / 100.) - min_sizes = [min_dim * .10] + min_sizes - max_sizes = [min_dim * .20] + max_sizes + min_sizes.append(base_size * ratio / 100.) + max_sizes.append(base_size * (ratio + step) / 100.) + min_sizes = [base_size * .10] + min_sizes + max_sizes = [base_size * .20] + max_sizes if step_h: assert isinstance(step_h,list) and len(step_h) == num_layer, \ - 'step_h should be list and input_layers and step_h should have same length' + 'step_h should be list and inputs and step_h should have same length' if step_w: assert isinstance(step_w,list) and len(step_w) == num_layer, \ - 'step_w should be list and input_layers and step_w should have same length' + 'step_w should be list and inputs and step_w should have same length' if steps: assert isinstance(steps,list) and len(steps) == num_layer, \ - 'steps should be list and input_layers and step_w should have same length' + 'steps should be list and inputs and step_w should have same length' step_w = steps step_h = steps if aspect_ratios: assert isinstance(aspect_ratios, list) and len(aspect_ratios) == num_layer, \ - 'aspect_ratios should be list and input_layers and aspect_ratios should ' \ + 'aspect_ratios should be list and inputs and aspect_ratios should ' \ 'have same length' box_results = [] var_results = [] - for i, input in enumerate(input_layers): + for i, input in enumerate(inputs): min_size = min_sizes[i] max_size = max_sizes[i] aspect_ratio = [] -- GitLab From df7c29e5165847f9958903f8c492f566b3df63fc Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sat, 10 Feb 2018 20:54:50 +0800 Subject: [PATCH 024/217] override comparison operators in Python for Variable --- python/paddle/v2/fluid/layers/math_op_patch.py | 6 +++++- python/paddle/v2/fluid/learning_rate_decay.py | 4 ++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/python/paddle/v2/fluid/layers/math_op_patch.py b/python/paddle/v2/fluid/layers/math_op_patch.py index 9b5f22759c..4cf995ec85 100644 --- a/python/paddle/v2/fluid/layers/math_op_patch.py +++ b/python/paddle/v2/fluid/layers/math_op_patch.py @@ -151,7 +151,11 @@ def monkey_patch_variable(): ("__div__", "elementwise_div", False), ("__rdiv__", "elementwise_div", True), ("__pow__", "elementwise_pow", False), - ("__rpow__", "elementwise_pow", True)): + ("__rpow__", "elementwise_pow", True), + # for logical compare + ("__eq__", "equal", False), + ("__lt__", "less_then", False), + ("__le__", "less_equal", False), ): setattr(Variable, method_name, _elemwise_method_creator_(method_name, op_type, reverse)) diff --git a/python/paddle/v2/fluid/learning_rate_decay.py b/python/paddle/v2/fluid/learning_rate_decay.py index 2a2a29fd9c..0826d3da79 100644 --- a/python/paddle/v2/fluid/learning_rate_decay.py +++ b/python/paddle/v2/fluid/learning_rate_decay.py @@ -179,7 +179,7 @@ def polynomial_decay(learning_rate, shape=[1], dtype='float32', value=1.0) with layers.Switch() as switch: - with switch.case(layers.equal(x=global_step, y=zero_var)): + with switch.case(global_step == zero_var): layers.assign(input=one_var, output=div_res) decay_steps = decay_steps * div_res else: @@ -229,7 +229,7 @@ def piecewise_decay(global_step, boundaries, values): shape=[1], dtype='float32', value=float(boundaries[i])) value_var = layers.fill_constant( shape=[1], dtype='float32', value=float(values[i])) - with switch.case(layers.less_than(global_step, boundary_val)): + with switch.case(global_step < boundary_val): layers.assign(value_var, lr) last_value_var = layers.fill_constant( shape=[1], -- GitLab From 6ed545b0d8386c0a4ed0d978bb15cf9acd29c42f Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sat, 10 Feb 2018 21:09:53 +0800 Subject: [PATCH 025/217] fix typo --- python/paddle/v2/fluid/layers/math_op_patch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/v2/fluid/layers/math_op_patch.py b/python/paddle/v2/fluid/layers/math_op_patch.py index 4cf995ec85..5301c3d1de 100644 --- a/python/paddle/v2/fluid/layers/math_op_patch.py +++ b/python/paddle/v2/fluid/layers/math_op_patch.py @@ -154,7 +154,7 @@ def monkey_patch_variable(): ("__rpow__", "elementwise_pow", True), # for logical compare ("__eq__", "equal", False), - ("__lt__", "less_then", False), + ("__lt__", "less_than", False), ("__le__", "less_equal", False), ): setattr(Variable, method_name, _elemwise_method_creator_(method_name, op_type, reverse)) -- GitLab From 4b3fadc1cdcd3d2cbc4c7cf63d4a96d7db45fce6 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sat, 10 Feb 2018 22:16:27 +0800 Subject: [PATCH 026/217] init test_python_operator_overriding.py --- .../tests/test_python_operator_overriding.py | 54 +++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 python/paddle/v2/fluid/tests/test_python_operator_overriding.py diff --git a/python/paddle/v2/fluid/tests/test_python_operator_overriding.py b/python/paddle/v2/fluid/tests/test_python_operator_overriding.py new file mode 100644 index 0000000000..b985ae3e29 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_python_operator_overriding.py @@ -0,0 +1,54 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy +import paddle.v2.fluid.framework as framework +import paddle.v2.fluid as fluid + + +class TestPythonOperatorOverride(unittest.TestCase): + def check_result(self, fn, place, dtype='float32'): + shape = [9, 10] + + x_data = numpy.random.random(size=shape).astype(dtype) + y_data = numpy.random.random(size=shape).astype(dtype) + python_out = fn(x_data, y_data) + + x_var = fluid.layers.data(name='x', shape=shape, dtype=dtype) + y_var = fluid.layers.data(name='y', shape=shape, dtype=dtype) + out = fn(x_var, y_var) + + exe = fluid.Executor(place) + feeder = fluid.DataFeeder(feed_list=[x_var, y_var], place=place) + + exe.run(fluid.default_startup_program()) + fluid_out = exe.run(fluid.default_main_program(), + feed=feeder.feed([x_data, y_data]), + fetch_list=[out]) + + print(python_out) + self.assertAlmostEqual(python_out, fluid_out[0]) + + def test_override(self): + main_program = framework.Program() + startup_program = framework.Program() + with framework.program_guard(main_program, startup_program): + place = fluid.CPUPlace() + self.check_result(lambda _a, _b: _a == _b, place) + + +if __name__ == '__main__': + unittest.main() -- GitLab From d89e1449b7701d759b6e3180f12ea430320db18d Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sat, 10 Feb 2018 22:41:54 +0800 Subject: [PATCH 027/217] optimize test --- .../tests/test_python_operator_overriding.py | 36 ++++++++++++------- 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/python/paddle/v2/fluid/tests/test_python_operator_overriding.py b/python/paddle/v2/fluid/tests/test_python_operator_overriding.py index b985ae3e29..94f3fc958e 100644 --- a/python/paddle/v2/fluid/tests/test_python_operator_overriding.py +++ b/python/paddle/v2/fluid/tests/test_python_operator_overriding.py @@ -14,40 +14,52 @@ import unittest -import numpy +import numpy as np + +import paddle.v2.fluid.layers as layers import paddle.v2.fluid.framework as framework import paddle.v2.fluid as fluid class TestPythonOperatorOverride(unittest.TestCase): - def check_result(self, fn, place, dtype='float32'): + def check_result(self, fn, x_val, y_val, place, dtype): shape = [9, 10] - x_data = numpy.random.random(size=shape).astype(dtype) - y_data = numpy.random.random(size=shape).astype(dtype) + x_data = np.full(shape, x_val).astype(dtype) + y_data = np.full(shape, y_val).astype(dtype) python_out = fn(x_data, y_data) - x_var = fluid.layers.data(name='x', shape=shape, dtype=dtype) - y_var = fluid.layers.data(name='y', shape=shape, dtype=dtype) + x_var = layers.create_global_var( + shape=shape, value=x_val, dtype=dtype, persistable=True) + y_var = layers.create_global_var( + shape=shape, value=y_val, dtype=dtype, persistable=True) out = fn(x_var, y_var) exe = fluid.Executor(place) - feeder = fluid.DataFeeder(feed_list=[x_var, y_var], place=place) exe.run(fluid.default_startup_program()) fluid_out = exe.run(fluid.default_main_program(), - feed=feeder.feed([x_data, y_data]), + feed=[], fetch_list=[out]) - print(python_out) - self.assertAlmostEqual(python_out, fluid_out[0]) + np.testing.assert_array_equal(python_out, fluid_out[0]) def test_override(self): + cpu_place = fluid.CPUPlace() + test_data = [(lambda _a, _b: _a == _b, 0.1, 1.1, cpu_place, 'float32'), + (lambda _a, _b: _a == _b, 1.2, 1.1, cpu_place, 'float32'), + (lambda _a, _b: _a < _b, 0.1, 1.1, cpu_place, 'float32'), + (lambda _a, _b: _a < _b, 2.1, 1.1, cpu_place, 'float32'), + (lambda _a, _b: _a <= _b, 0.1, 1.1, cpu_place, 'float32'), + (lambda _a, _b: _a <= _b, 1.1, 1.1, cpu_place, 'float32'), + (lambda _a, _b: _a >= _b, 1.1, 1.1, cpu_place, 'float32')] + main_program = framework.Program() startup_program = framework.Program() + with framework.program_guard(main_program, startup_program): - place = fluid.CPUPlace() - self.check_result(lambda _a, _b: _a == _b, place) + for fn, x_val, y_val, place, dtype in test_data: + self.check_result(fn, x_val, y_val, place, dtype) if __name__ == '__main__': -- GitLab From 0d57ca46ea06257447cc2a82839d64d94fc5e421 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Sat, 10 Feb 2018 23:31:12 +0000 Subject: [PATCH 028/217] nccl pass parallel_do test --- paddle/operators/nccl_op.cc | 21 +++++++++- paddle/operators/nccl_op.cu.cc | 8 ++++ paddle/operators/parallel_do_op.cc | 24 ++++++++++- python/paddle/v2/fluid/backward.py | 41 +++++++++++-------- python/paddle/v2/fluid/layers/control_flow.py | 6 ++- .../paddle/v2/fluid/tests/test_parallel_op.py | 33 +++++++++------ 6 files changed, 99 insertions(+), 34 deletions(-) diff --git a/paddle/operators/nccl_op.cc b/paddle/operators/nccl_op.cc index 83ac67f353..a906223f38 100644 --- a/paddle/operators/nccl_op.cc +++ b/paddle/operators/nccl_op.cc @@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include "paddle/framework/op_registry.h" #include "paddle/operators/nccl/nccl_gpu_common.h" @@ -49,6 +50,22 @@ class NCCLInitOp : public framework::OperatorBase { } }; +class NCCLInitOpVarTypeInference : public framework::VarTypeInference { + public: + void operator()(const framework::OpDesc &op_desc, + framework::BlockDesc *block) const override { + auto out_var_name = op_desc.Output("Communicator").front(); + auto &out_var = block->FindRecursiveOrCreateVar(out_var_name); + auto var_type = framework::proto::VarDesc::NCCL_COM; + out_var.SetType(var_type); + } +}; + +class NCCLInitOpShapeInference : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *ctx) const override {} +}; + class NCCLInitOpMaker : public framework::OpProtoAndCheckerMaker { public: NCCLInitOpMaker(OpProto *proto, OpAttrChecker *op_checker) @@ -214,7 +231,9 @@ Bcast the tensors. namespace ops = paddle::operators; REGISTER_OPERATOR(ncclInit, ops::NCCLInitOp, - paddle::framework::EmptyGradOpMaker, ops::NCCLInitOpMaker); + paddle::framework::EmptyGradOpMaker, ops::NCCLInitOpMaker, + ops::NCCLInitOpVarTypeInference, + ops::NCCLInitOpShapeInference); REGISTER_OP_WITHOUT_GRADIENT(ncclAllReduce, ops::NCCLAllReduceOp, ops::NCCLAllReduceOpMaker); diff --git a/paddle/operators/nccl_op.cu.cc b/paddle/operators/nccl_op.cu.cc index 1b986a1365..b6db63ac6a 100644 --- a/paddle/operators/nccl_op.cu.cc +++ b/paddle/operators/nccl_op.cu.cc @@ -47,8 +47,11 @@ class NCCLAllReduceKernel : public framework::OpKernel { auto ins = ctx.MultiInput("X"); auto outs = ctx.MultiOutput("Out"); + LOG(INFO) << "------------------"; std::string reduction = ctx.Attr("reduction"); + LOG(INFO) << "------------------"; ncclRedOp_t reduction_op_ = ncclSum; + LOG(INFO) << "------------------"; if (reduction == "ncclMin") { reduction_op_ = ncclMin; @@ -62,14 +65,19 @@ class NCCLAllReduceKernel : public framework::OpKernel { PADDLE_THROW("Invalid reduction. default ncclSum."); } + LOG(INFO) << "------------------"; auto* comm = ctx.Input("Communicator"); + LOG(INFO) << "------------------"; auto stream = ctx.cuda_device_context().stream(); + LOG(INFO) << "------------------"; // device id int gpu_id = boost::get(ctx.GetPlace()).GetDeviceId(); + LOG(INFO) << "------------------"; int idx = comm->GetCommId(gpu_id); + LOG(INFO) << "------------------"; for (size_t i = 0; i < ins.size(); ++i) { VLOG(1) << "gpu : " << " invoke allreduce. send " << ins[i]->numel() << " recv " diff --git a/paddle/operators/parallel_do_op.cc b/paddle/operators/parallel_do_op.cc index 89045923f9..950a95ae36 100644 --- a/paddle/operators/parallel_do_op.cc +++ b/paddle/operators/parallel_do_op.cc @@ -30,6 +30,7 @@ static constexpr char kOutputs[] = "outputs"; static constexpr char kParallelScopes[] = "parallel_scopes"; static constexpr char kParallelBlock[] = "sub_block"; +static constexpr char kUseNCCL[] = "use_nccl"; using LoDTensor = framework::LoDTensor; using SelectedRows = framework::SelectedRows; @@ -159,6 +160,7 @@ class ParallelDoOp : public framework::OperatorBase { } WaitOnPlaces(places); + // PADDLE_ENFORCE_EQ(places.size(), sub_scopes.size()); std::vector> workers; workers.reserve(places.size()); for (size_t place_idx = 0; place_idx < sub_scopes.size(); ++place_idx) { @@ -202,6 +204,8 @@ class ParallelDoOpProtoMaker : public framework::OpProtoAndCheckerMaker { AddOutput(kOutputs, "").AsDuplicable(); AddOutput(kParallelScopes, ""); AddAttr(kParallelBlock, ""); + AddAttr(kUseNCCL, "true if we use nccl on backward") + .SetDefault(false); AddComment(R"DOC( ParallelDo Operator. )DOC"); @@ -223,20 +227,22 @@ class ParallelDoGradOp : public framework::OperatorBase { auto &sub_scopes = scope.FindVar(Input(kParallelScopes)) ->Get>(); - auto &places = scope.FindVar(Input(kPlaces))->Get(); + // PADDLE_ENFORCE_EQ(places.size(), sub_scopes.size()); // feed output@grad SplitTensorAndMoveTensorToScopes( scope, const_cast *>(&sub_scopes), places, Inputs(framework::GradVarName(kOutputs))); WaitOnPlaces(places); + LOG(INFO) << "places " << places.size(); // exe run std::vector> workers; for (size_t i = 0; i < sub_scopes.size(); ++i) { auto &place = places[i]; auto *cur_scope = sub_scopes[i]; + LOG(INFO) << place; // execute workers.emplace_back(framework::Async([program, cur_scope, place, block] { @@ -245,12 +251,26 @@ class ParallelDoGradOp : public framework::OperatorBase { false /*create_local_scope*/); })); } + LOG(INFO) << "places " << places.size(); for (auto &worker : workers) { worker.wait(); } WaitOnPlaces(places); - AccumulateGrad(scope, place, sub_scopes, places); + // NCCL allreduce op will be added by backward, + // so no need to explicitly accumulate grad + if (!(Attr(kUseNCCL))) { + AccumulateGrad(scope, place, sub_scopes, places); + } else { + for (auto &place : places) { + PADDLE_ENFORCE(platform::is_gpu_place(place), + "NCCL only supports cuda place"); + } + } + for (auto &s : Outputs(framework::GradVarName(kParameters))) { + CopyOrShare(*sub_scopes[0]->FindVar(s), place, scope.FindVar(s)); + } + WaitOnPlaces(places); } void AccumulateGrad(const framework::Scope &scope, diff --git a/python/paddle/v2/fluid/backward.py b/python/paddle/v2/fluid/backward.py index 40c54bf220..28768ef07f 100644 --- a/python/paddle/v2/fluid/backward.py +++ b/python/paddle/v2/fluid/backward.py @@ -218,7 +218,7 @@ def _callback_lookup_(op): :param op: :return: callback function """ - if op.type == 'parallel_do': + if op.type == 'parallel_do' and op.attr('use_nccl'): param_names = set(op.input('parameters')) param_grad_names = [n + "@GRAD" for n in param_names] @@ -229,18 +229,25 @@ def _callback_lookup_(op): def __call__(self, block, context): if not self.has_inserted_nccl_init: - global_block = block.program.global_block() - op_desc = global_block.desc.append_op() - var_desc = global_block.desc.var('nccl_com') - var_desc.set_type(core.VarDesc.VarType.NCCL_COM) - self.nccl_com = global_block.create_var( - name='nccl_com', type=core.VarDesc.VarType.NCCL_COM) - framework.Operator( - global_block, - type='ncclInit', - desc=op_desc, - inputs={}, - outputs={'Communicator': [self.nccl_com]}) + # global_block = block.program.global_block() + # op_desc = global_block.desc.append_op() + # var_desc = global_block.desc.var('nccl_com__do_not_change_') + # var_desc.set_type(core.VarDesc.VarType.NCCL_COM) + # self.nccl_com = global_block.create_var( + # name='nccl_com', type=core.VarDesc.VarType.NCCL_COM) + # framework.Operator( + # global_block, + # type='ncclInit', + # desc=op_desc, + # inputs={}, + # outputs={'Communicator': [self.nccl_com]}) + op_desc = _create_op_desc_( + "ncclInit", {}, + {"Communicator": ['nccl_com__do_not_change_']}, {}) + # block.desc.append_op().copy_from(op_desc) + print(serialize_op_decs(op_desc)) + block.program.global_block().desc.append_op().copy_from( + op_desc) self.has_inserted_nccl_init = True current_op_desc = context["__current_op_desc__"] @@ -263,7 +270,8 @@ def _callback_lookup_(op): op_desc = _create_op_desc_( "ncclAllReduce", { "X": [o_argu], - "Communicator": ['nccl_com_0'] + "Communicator": + ['nccl_com__do_not_change_'] }, {"Out": [allreduce_out_name]}, {"reduction": "ncclSum"}) block.desc.append_op().copy_from(op_desc) @@ -375,10 +383,11 @@ def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map): continue grad_info_map[grad_to_var[grad_var_name]] = (grad_var_name, block) # infer_shape and infer_type - if op_desc.type() == 'ncclInit': - continue op_desc.infer_var_type(block.desc) op_desc.infer_shape(block.desc) + # ncclInit dones't need to set data_type + if op_desc.type() == 'ncclInit': + continue for arg in op_desc.output_arg_names(): if arg in new_vars: _infer_var_data_type_(arg, block) diff --git a/python/paddle/v2/fluid/layers/control_flow.py b/python/paddle/v2/fluid/layers/control_flow.py index 71a9459d55..5c9c247066 100644 --- a/python/paddle/v2/fluid/layers/control_flow.py +++ b/python/paddle/v2/fluid/layers/control_flow.py @@ -237,12 +237,13 @@ class ParallelDo(object): ParallelDo class is used to create a ParallelDo. """ - def __init__(self, places, name=None): + def __init__(self, places, use_nccl=False, name=None): self.helper = LayerHelper("parallel_do", name=name) self.inputs = [] self.places = places self.outputs = [] self.status = StaticRNN.BEFORE_RNN_BLOCK + self.use_nccl = use_nccl def do(self): return BlockGuardWithCompletion(self) @@ -325,7 +326,8 @@ class ParallelDo(object): }, outputs={'outputs': outputs, 'parallel_scopes': [step_scope]}, - attrs={'sub_block': current_block}) + attrs={'sub_block': current_block, + 'use_nccl': self.use_nccl}) class BlockGuardWithCompletion(BlockGuard): diff --git a/python/paddle/v2/fluid/tests/test_parallel_op.py b/python/paddle/v2/fluid/tests/test_parallel_op.py index 367cc8b1aa..8452d6835f 100644 --- a/python/paddle/v2/fluid/tests/test_parallel_op.py +++ b/python/paddle/v2/fluid/tests/test_parallel_op.py @@ -67,12 +67,25 @@ class BaseParallelForTest(unittest.TestCase): fetch=fetch, place=gpu, use_parallel=True) + result_gpu_nccl = self._run_test_impl_( + callback=callback, + feed=feed, + fetch=fetch, + place=gpu, + use_parallel=True, + use_nccl=True) self._assert_same_(fetch, result_cpu, result_cpu_parallel, - result_gpu, result_gpu_parallel) + result_gpu, result_gpu_parallel, result_gpu_nccl) else: self._assert_same_(fetch, result_cpu, result_cpu_parallel) - def _run_test_impl_(self, callback, feed, fetch, place, use_parallel=False): + def _run_test_impl_(self, + callback, + feed, + fetch, + place, + use_parallel=False, + use_nccl=False): """ Run a single test, returns the fetch values Args: @@ -96,7 +109,7 @@ class BaseParallelForTest(unittest.TestCase): # Automatically insert parallel do if use_parallel = True if use_parallel: places = fluid.layers.get_places() - pd = fluid.layers.ParallelDo(places) + pd = fluid.layers.ParallelDo(places, use_nccl=use_nccl) data = next(generator) if isinstance(data, fluid.Variable): @@ -137,7 +150,9 @@ class BaseParallelForTest(unittest.TestCase): """ def _impl_(a, b, fetch_id, item_id): - item_str = ['CPU', 'ParallelCPU', 'GPU', 'ParallelGPU'] + item_str = [ + 'CPU', 'ParallelCPU', 'GPU', 'ParallelGPU', 'ParallelGPUNCCL' + ] flag = numpy.allclose(a, b, rtol=0.1) self.assertTrue(flag, "The {0} are different in {1}".format( fetch[fetch_id], item_str[item_id])) @@ -157,18 +172,10 @@ class ParallelOpTest(BaseParallelForTest): loss = fluid.layers.mean(x=hidden) yield loss - def test_simple_fc(self): - self.run_test( - callback=self.__network__, - feed={ - 'img': numpy.random.random(size=(51, 784)).astype('float32') - }, - fetch=['fc1.w@GRAD']) - def test_fc_with_tiny_data(self): self.run_test( callback=self.__network__, - feed={'img': numpy.random.random(size=(1, 784)).astype('float32')}, + feed={'img': numpy.random.random(size=(8, 784)).astype('float32')}, fetch=['fc1.w@GRAD']) -- GitLab From bb3ae20664a1bba5ce1e6d45e3afff274095e9e1 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Sat, 10 Feb 2018 23:34:36 +0000 Subject: [PATCH 029/217] nccl pass parallel_do test --- python/paddle/v2/fluid/tests/test_parallel_op.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/python/paddle/v2/fluid/tests/test_parallel_op.py b/python/paddle/v2/fluid/tests/test_parallel_op.py index 8452d6835f..dc8c806074 100644 --- a/python/paddle/v2/fluid/tests/test_parallel_op.py +++ b/python/paddle/v2/fluid/tests/test_parallel_op.py @@ -172,12 +172,18 @@ class ParallelOpTest(BaseParallelForTest): loss = fluid.layers.mean(x=hidden) yield loss - def test_fc_with_tiny_data(self): + def test_simple_fc(self): self.run_test( callback=self.__network__, feed={'img': numpy.random.random(size=(8, 784)).astype('float32')}, fetch=['fc1.w@GRAD']) + def test_fc_with_tiny_data(self): + self.run_test( + callback=self.__network__, + feed={'img': numpy.random.random(size=(1, 784)).astype('float32')}, + fetch=['fc1.w@GRAD']) + class ParallelOpTestMultipleInput(BaseParallelForTest): @staticmethod -- GitLab From 4bb492e76c09c1bd11953d3893d559f88b9ea219 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Sun, 11 Feb 2018 00:11:26 +0000 Subject: [PATCH 030/217] pass tiny data --- paddle/operators/nccl_op.cc | 24 ++++++++++++++++++++---- python/paddle/v2/fluid/backward.py | 9 ++++++--- 2 files changed, 26 insertions(+), 7 deletions(-) diff --git a/paddle/operators/nccl_op.cc b/paddle/operators/nccl_op.cc index a906223f38..8e4edb78bb 100644 --- a/paddle/operators/nccl_op.cc +++ b/paddle/operators/nccl_op.cc @@ -19,6 +19,8 @@ limitations under the License. */ namespace paddle { namespace operators { +static constexpr char kParallelScopes[] = "parallel_scopes"; + // NCCLinitOp class NCCLInitOp : public framework::OperatorBase { public: @@ -29,24 +31,37 @@ class NCCLInitOp : public framework::OperatorBase { void Run(const framework::Scope &scope, const platform::Place &place) const override { + PADDLE_ENFORCE_NOT_NULL(scope.FindVar(Input(kParallelScopes)), + "Can not find variable '%s' in the scope.", + kParallelScopes); const auto &name = Output("Communicator"); PADDLE_ENFORCE_NOT_NULL(scope.FindVar(name), "Can not find variable '%s' in the scope.", name); - - int count = platform::GetCUDADeviceCount(); - std::vector gpus(count); - for (int i = 0; i < count; ++i) { + // A parallel do may not use all the gpus. For example, the batch size is 7 + // in the last batch while we have 8 gpu. In this case, parallel_do will + // create 7 parallel scopes, so should ncclInitOp create 7 gpu peers + LOG(INFO) << "---------------"; + auto ¶llel_scopes = scope.FindVar(Input(kParallelScopes)) + ->Get>(); + LOG(INFO) << "---------------"; + std::vector gpus(parallel_scopes.size()); + for (int i = 0; i < static_cast(parallel_scopes.size()); ++i) { gpus[i] = i; } + LOG(INFO) << "---------------"; PADDLE_ENFORCE(!gpus.empty(), "NCCL init with 0 gpus."); + LOG(INFO) << "---------------"; if (scope.FindVar(name) == nullptr) { PADDLE_THROW("Output(Communicator) is needed for ncclInit operator."); } + LOG(INFO) << "---------------"; platform::Communicator *comm = scope.FindVar(name)->GetMutable(); + LOG(INFO) << "---------------"; comm->InitAll(gpus); + LOG(INFO) << "---------------"; } }; @@ -70,6 +85,7 @@ class NCCLInitOpMaker : public framework::OpProtoAndCheckerMaker { public: NCCLInitOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput(kParallelScopes, "The working place of parallel do."); AddOutput("Communicator", "Create Communicator for communicating between gpus"); AddComment(R"DOC( diff --git a/python/paddle/v2/fluid/backward.py b/python/paddle/v2/fluid/backward.py index 28768ef07f..8ec9db81b3 100644 --- a/python/paddle/v2/fluid/backward.py +++ b/python/paddle/v2/fluid/backward.py @@ -223,9 +223,10 @@ def _callback_lookup_(op): param_grad_names = [n + "@GRAD" for n in param_names] class ParallelDoCallBack(object): - def __init__(self, param_grad_names): + def __init__(self, param_grad_names, parallel_scopes_name): self.has_inserted_nccl_init = False self.param_grad_names = param_grad_names + self.parallel_scopes_name = parallel_scopes_name def __call__(self, block, context): if not self.has_inserted_nccl_init: @@ -242,7 +243,8 @@ def _callback_lookup_(op): # inputs={}, # outputs={'Communicator': [self.nccl_com]}) op_desc = _create_op_desc_( - "ncclInit", {}, + "ncclInit", + {"parallel_scopes": self.parallel_scopes_name}, {"Communicator": ['nccl_com__do_not_change_']}, {}) # block.desc.append_op().copy_from(op_desc) print(serialize_op_decs(op_desc)) @@ -281,7 +283,8 @@ def _callback_lookup_(op): {"Out": [o_argu]}, {}) block.desc.append_op().copy_from(op_desc) - return ParallelDoCallBack(param_grad_names) + return ParallelDoCallBack(param_grad_names, + op.output("parallel_scopes")) else: return None -- GitLab From bfa78cacdfdf7988159419256432d5550a59c730 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Sun, 11 Feb 2018 00:11:56 +0000 Subject: [PATCH 031/217] clean up log(info) --- paddle/operators/nccl_op.cc | 7 ------- 1 file changed, 7 deletions(-) diff --git a/paddle/operators/nccl_op.cc b/paddle/operators/nccl_op.cc index 8e4edb78bb..ae912d7f36 100644 --- a/paddle/operators/nccl_op.cc +++ b/paddle/operators/nccl_op.cc @@ -40,28 +40,21 @@ class NCCLInitOp : public framework::OperatorBase { // A parallel do may not use all the gpus. For example, the batch size is 7 // in the last batch while we have 8 gpu. In this case, parallel_do will // create 7 parallel scopes, so should ncclInitOp create 7 gpu peers - LOG(INFO) << "---------------"; auto ¶llel_scopes = scope.FindVar(Input(kParallelScopes)) ->Get>(); - LOG(INFO) << "---------------"; std::vector gpus(parallel_scopes.size()); for (int i = 0; i < static_cast(parallel_scopes.size()); ++i) { gpus[i] = i; } - LOG(INFO) << "---------------"; PADDLE_ENFORCE(!gpus.empty(), "NCCL init with 0 gpus."); - LOG(INFO) << "---------------"; if (scope.FindVar(name) == nullptr) { PADDLE_THROW("Output(Communicator) is needed for ncclInit operator."); } - LOG(INFO) << "---------------"; platform::Communicator *comm = scope.FindVar(name)->GetMutable(); - LOG(INFO) << "---------------"; comm->InitAll(gpus); - LOG(INFO) << "---------------"; } }; -- GitLab From 3067114f3a08f39e44cd1e828381e06b633a7a48 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Sun, 11 Feb 2018 00:22:17 +0000 Subject: [PATCH 032/217] clean up --- paddle/fluid/operators/nccl_op.cu.cc | 8 ------ paddle/fluid/operators/parallel_do_op.cc | 5 ---- python/paddle/v2/fluid/backward.py | 25 ------------------- .../paddle/v2/fluid/tests/test_parallel_op.py | 4 ++- 4 files changed, 3 insertions(+), 39 deletions(-) diff --git a/paddle/fluid/operators/nccl_op.cu.cc b/paddle/fluid/operators/nccl_op.cu.cc index 7637c7ed16..333aed2903 100644 --- a/paddle/fluid/operators/nccl_op.cu.cc +++ b/paddle/fluid/operators/nccl_op.cu.cc @@ -47,11 +47,8 @@ class NCCLAllReduceKernel : public framework::OpKernel { auto ins = ctx.MultiInput("X"); auto outs = ctx.MultiOutput("Out"); - LOG(INFO) << "------------------"; std::string reduction = ctx.Attr("reduction"); - LOG(INFO) << "------------------"; ncclRedOp_t reduction_op_ = ncclSum; - LOG(INFO) << "------------------"; if (reduction == "ncclMin") { reduction_op_ = ncclMin; @@ -65,19 +62,14 @@ class NCCLAllReduceKernel : public framework::OpKernel { PADDLE_THROW("Invalid reduction. default ncclSum."); } - LOG(INFO) << "------------------"; auto* comm = ctx.Input("Communicator"); - LOG(INFO) << "------------------"; auto stream = ctx.cuda_device_context().stream(); - LOG(INFO) << "------------------"; // device id int gpu_id = boost::get(ctx.GetPlace()).GetDeviceId(); - LOG(INFO) << "------------------"; int idx = comm->GetCommId(gpu_id); - LOG(INFO) << "------------------"; for (size_t i = 0; i < ins.size(); ++i) { VLOG(1) << "gpu : " << " invoke allreduce. send " << ins[i]->numel() << " recv " diff --git a/paddle/fluid/operators/parallel_do_op.cc b/paddle/fluid/operators/parallel_do_op.cc index ff5730bfe7..f808c71306 100644 --- a/paddle/fluid/operators/parallel_do_op.cc +++ b/paddle/fluid/operators/parallel_do_op.cc @@ -151,7 +151,6 @@ class ParallelDoOp : public framework::OperatorBase { } WaitOnPlaces(places); - // PADDLE_ENFORCE_EQ(places.size(), sub_scopes.size()); std::vector> workers; workers.reserve(places.size()); for (size_t place_idx = 0; place_idx < sub_scopes.size(); ++place_idx) { @@ -219,21 +218,18 @@ class ParallelDoGradOp : public framework::OperatorBase { auto &sub_scopes = scope.FindVar(Input(kParallelScopes)) ->Get>(); auto &places = scope.FindVar(Input(kPlaces))->Get(); - // PADDLE_ENFORCE_EQ(places.size(), sub_scopes.size()); // feed output@grad SplitTensorAndMoveTensorToScopes( scope, const_cast *>(&sub_scopes), places, Inputs(framework::GradVarName(kOutputs))); WaitOnPlaces(places); - LOG(INFO) << "places " << places.size(); // exe run std::vector> workers; for (size_t i = 0; i < sub_scopes.size(); ++i) { auto &place = places[i]; auto *cur_scope = sub_scopes[i]; - LOG(INFO) << place; // execute workers.emplace_back(framework::Async([program, cur_scope, place, block] { @@ -242,7 +238,6 @@ class ParallelDoGradOp : public framework::OperatorBase { false /*create_local_scope*/); })); } - LOG(INFO) << "places " << places.size(); for (auto &worker : workers) { worker.wait(); } diff --git a/python/paddle/v2/fluid/backward.py b/python/paddle/v2/fluid/backward.py index 8ec9db81b3..6da4325c64 100644 --- a/python/paddle/v2/fluid/backward.py +++ b/python/paddle/v2/fluid/backward.py @@ -230,44 +230,19 @@ def _callback_lookup_(op): def __call__(self, block, context): if not self.has_inserted_nccl_init: - # global_block = block.program.global_block() - # op_desc = global_block.desc.append_op() - # var_desc = global_block.desc.var('nccl_com__do_not_change_') - # var_desc.set_type(core.VarDesc.VarType.NCCL_COM) - # self.nccl_com = global_block.create_var( - # name='nccl_com', type=core.VarDesc.VarType.NCCL_COM) - # framework.Operator( - # global_block, - # type='ncclInit', - # desc=op_desc, - # inputs={}, - # outputs={'Communicator': [self.nccl_com]}) op_desc = _create_op_desc_( "ncclInit", {"parallel_scopes": self.parallel_scopes_name}, {"Communicator": ['nccl_com__do_not_change_']}, {}) - # block.desc.append_op().copy_from(op_desc) print(serialize_op_decs(op_desc)) block.program.global_block().desc.append_op().copy_from( op_desc) self.has_inserted_nccl_init = True current_op_desc = context["__current_op_desc__"] - # print(serialize_op_decs(context)) for o_param in current_op_desc.output_names(): for o_argu in current_op_desc.output(o_param): if o_argu in self.param_grad_names: - # # print("reduce", o_argu) - # op_desc = block.desc.append_op() - # op_desc.set_type("ncclAllReduce") - # op_desc.set_input("X", [o_argu]) - # - # # FIXME(tonyyang-svail): - # # Looks like nccl_com has been changed to nccl_com_0 - # op_desc.set_input("Communicator", ['nccl_com_0']) - # out_var = block.create_var() - # op_desc.set_output("Out", [out_var.name]) - # op_desc.set_attr("reduction", "ncclSum") allreduce_out_name = o_argu + "__nccl_all_reduce__" op_desc = _create_op_desc_( "ncclAllReduce", { diff --git a/python/paddle/v2/fluid/tests/test_parallel_op.py b/python/paddle/v2/fluid/tests/test_parallel_op.py index 2914c8dbaa..66bb6442af 100644 --- a/python/paddle/v2/fluid/tests/test_parallel_op.py +++ b/python/paddle/v2/fluid/tests/test_parallel_op.py @@ -175,7 +175,9 @@ class ParallelOpTest(BaseParallelForTest): def test_simple_fc(self): self.run_test( callback=self.__network__, - feed={'img': numpy.random.random(size=(8, 784)).astype('float32')}, + feed={ + 'img': numpy.random.random(size=(51, 784)).astype('float32') + }, fetch=['fc1.w@GRAD']) def test_fc_with_tiny_data(self): -- GitLab From de469d58380dd4376d905165678ad05eee9e3e17 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sun, 11 Feb 2018 10:17:23 +0800 Subject: [PATCH 033/217] optimize test --- .../tests/test_python_operator_overriding.py | 53 +++++++++++-------- 1 file changed, 32 insertions(+), 21 deletions(-) diff --git a/python/paddle/v2/fluid/tests/test_python_operator_overriding.py b/python/paddle/v2/fluid/tests/test_python_operator_overriding.py index 94f3fc958e..b9e2623bdd 100644 --- a/python/paddle/v2/fluid/tests/test_python_operator_overriding.py +++ b/python/paddle/v2/fluid/tests/test_python_operator_overriding.py @@ -22,44 +22,55 @@ import paddle.v2.fluid as fluid class TestPythonOperatorOverride(unittest.TestCase): - def check_result(self, fn, x_val, y_val, place, dtype): + def check_result(self, fn, place, dtype): shape = [9, 10] - x_data = np.full(shape, x_val).astype(dtype) - y_data = np.full(shape, y_val).astype(dtype) + x_data = np.random.random(size=shape).astype(dtype) + y_data = np.random.random(size=shape).astype(dtype) python_out = fn(x_data, y_data) x_var = layers.create_global_var( - shape=shape, value=x_val, dtype=dtype, persistable=True) + name='x', shape=shape, value=0.0, dtype=dtype, persistable=True) y_var = layers.create_global_var( - shape=shape, value=y_val, dtype=dtype, persistable=True) + name='y', shape=shape, value=0.0, dtype=dtype, persistable=True) out = fn(x_var, y_var) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) fluid_out = exe.run(fluid.default_main_program(), - feed=[], + feed={'x': x_data, + 'y': y_data}, fetch_list=[out]) np.testing.assert_array_equal(python_out, fluid_out[0]) def test_override(self): - cpu_place = fluid.CPUPlace() - test_data = [(lambda _a, _b: _a == _b, 0.1, 1.1, cpu_place, 'float32'), - (lambda _a, _b: _a == _b, 1.2, 1.1, cpu_place, 'float32'), - (lambda _a, _b: _a < _b, 0.1, 1.1, cpu_place, 'float32'), - (lambda _a, _b: _a < _b, 2.1, 1.1, cpu_place, 'float32'), - (lambda _a, _b: _a <= _b, 0.1, 1.1, cpu_place, 'float32'), - (lambda _a, _b: _a <= _b, 1.1, 1.1, cpu_place, 'float32'), - (lambda _a, _b: _a >= _b, 1.1, 1.1, cpu_place, 'float32')] - - main_program = framework.Program() - startup_program = framework.Program() - - with framework.program_guard(main_program, startup_program): - for fn, x_val, y_val, place, dtype in test_data: - self.check_result(fn, x_val, y_val, place, dtype) + # compare func to check + compare_fns = [ + lambda _a, _b: _a == _b, + lambda _a, _b: _a == _b, + lambda _a, _b: _a < _b, + lambda _a, _b: _a < _b, + lambda _a, _b: _a <= _b, + lambda _a, _b: _a <= _b, + lambda _a, _b: _a >= _b, + ] + + # places to check + places = [fluid.CPUPlace()] + if fluid.core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + + # dtypes to check + dtypes = ['int32', 'float32'] + + for place in places: + for dtype in dtypes: + for compare_fn in compare_fns: + with framework.program_guard(framework.Program(), + gframework.Program()): + self.check_result(compare_fn, place, dtype) if __name__ == '__main__': -- GitLab From 23ba79b16b7135503a5ec804071de5ba22f57ce2 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sun, 11 Feb 2018 10:19:38 +0800 Subject: [PATCH 034/217] fix typo --- .../v2/fluid/tests/test_python_operator_overriding.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/python/paddle/v2/fluid/tests/test_python_operator_overriding.py b/python/paddle/v2/fluid/tests/test_python_operator_overriding.py index b9e2623bdd..aecae3332b 100644 --- a/python/paddle/v2/fluid/tests/test_python_operator_overriding.py +++ b/python/paddle/v2/fluid/tests/test_python_operator_overriding.py @@ -49,10 +49,8 @@ class TestPythonOperatorOverride(unittest.TestCase): # compare func to check compare_fns = [ lambda _a, _b: _a == _b, - lambda _a, _b: _a == _b, - lambda _a, _b: _a < _b, lambda _a, _b: _a < _b, - lambda _a, _b: _a <= _b, + lambda _a, _b: _a > _b, lambda _a, _b: _a <= _b, lambda _a, _b: _a >= _b, ] @@ -69,7 +67,7 @@ class TestPythonOperatorOverride(unittest.TestCase): for dtype in dtypes: for compare_fn in compare_fns: with framework.program_guard(framework.Program(), - gframework.Program()): + framework.Program()): self.check_result(compare_fn, place, dtype) -- GitLab From 6f78cb996912d056c7df131838d2c0a79a018e19 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sun, 11 Feb 2018 10:34:24 +0800 Subject: [PATCH 035/217] add not_equal --- paddle/fluid/operators/compare_op.cc | 2 ++ paddle/fluid/operators/compare_op.cu | 1 + paddle/fluid/operators/compare_op.h | 8 ++++++++ python/paddle/v2/fluid/layers/math_op_patch.py | 3 ++- .../v2/fluid/tests/test_python_operator_overriding.py | 1 + 5 files changed, 14 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/operators/compare_op.cc b/paddle/fluid/operators/compare_op.cc index f3414c33b5..b1f09fb002 100644 --- a/paddle/fluid/operators/compare_op.cc +++ b/paddle/fluid/operators/compare_op.cc @@ -102,3 +102,5 @@ REGISTER_LOGICAL_OP(less_equal, "Out = X <= Y"); REGISTER_LOGICAL_KERNEL(less_equal, CPU, paddle::operators::LessEqualFunctor); REGISTER_LOGICAL_OP(equal, "Out = X == Y"); REGISTER_LOGICAL_KERNEL(equal, CPU, paddle::operators::EqualFunctor); +REGISTER_LOGICAL_OP(not_equal, "Out = X != Y"); +REGISTER_LOGICAL_KERNEL(not_equal, CPU, paddle::operators::NotEqualFunctor); diff --git a/paddle/fluid/operators/compare_op.cu b/paddle/fluid/operators/compare_op.cu index 3507af2ae3..00263a2ade 100644 --- a/paddle/fluid/operators/compare_op.cu +++ b/paddle/fluid/operators/compare_op.cu @@ -17,3 +17,4 @@ limitations under the License. */ REGISTER_LOGICAL_KERNEL(less_than, CUDA, paddle::operators::LessThanFunctor); REGISTER_LOGICAL_KERNEL(less_equal, CUDA, paddle::operators::LessEqualFunctor); REGISTER_LOGICAL_KERNEL(equal, CUDA, paddle::operators::EqualFunctor); +REGISTER_LOGICAL_KERNEL(not_equal, CUDA, paddle::operators::NotEqualFunctor); diff --git a/paddle/fluid/operators/compare_op.h b/paddle/fluid/operators/compare_op.h index 4b2ee5a9d6..c651335268 100644 --- a/paddle/fluid/operators/compare_op.h +++ b/paddle/fluid/operators/compare_op.h @@ -48,6 +48,14 @@ struct EqualFunctor { } }; +template +struct NotEqualFunctor { + using ELEM_TYPE = T; + HOSTDEVICE bool operator()(const T& a, const T& b) const { + return !EqualFunctor()(a, b); + } +}; + template class CompareOpKernel : public framework::OpKernel { diff --git a/python/paddle/v2/fluid/layers/math_op_patch.py b/python/paddle/v2/fluid/layers/math_op_patch.py index 5301c3d1de..8208629af7 100644 --- a/python/paddle/v2/fluid/layers/math_op_patch.py +++ b/python/paddle/v2/fluid/layers/math_op_patch.py @@ -154,8 +154,9 @@ def monkey_patch_variable(): ("__rpow__", "elementwise_pow", True), # for logical compare ("__eq__", "equal", False), + ("__ne__", "not_equal", False), ("__lt__", "less_than", False), - ("__le__", "less_equal", False), ): + ("__le__", "less_equal", False)): setattr(Variable, method_name, _elemwise_method_creator_(method_name, op_type, reverse)) diff --git a/python/paddle/v2/fluid/tests/test_python_operator_overriding.py b/python/paddle/v2/fluid/tests/test_python_operator_overriding.py index aecae3332b..5ef0097388 100644 --- a/python/paddle/v2/fluid/tests/test_python_operator_overriding.py +++ b/python/paddle/v2/fluid/tests/test_python_operator_overriding.py @@ -53,6 +53,7 @@ class TestPythonOperatorOverride(unittest.TestCase): lambda _a, _b: _a > _b, lambda _a, _b: _a <= _b, lambda _a, _b: _a >= _b, + lambda _a, _b: _a != _b, ] # places to check -- GitLab From b19ef3f05e81a9564d1b26dde474f44a6f1bc7be Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sun, 11 Feb 2018 10:38:02 +0800 Subject: [PATCH 036/217] optimize code --- .../paddle/v2/fluid/tests/test_python_operator_overriding.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/paddle/v2/fluid/tests/test_python_operator_overriding.py b/python/paddle/v2/fluid/tests/test_python_operator_overriding.py index 5ef0097388..e5198ec17d 100644 --- a/python/paddle/v2/fluid/tests/test_python_operator_overriding.py +++ b/python/paddle/v2/fluid/tests/test_python_operator_overriding.py @@ -49,11 +49,11 @@ class TestPythonOperatorOverride(unittest.TestCase): # compare func to check compare_fns = [ lambda _a, _b: _a == _b, + lambda _a, _b: _a != _b, lambda _a, _b: _a < _b, - lambda _a, _b: _a > _b, lambda _a, _b: _a <= _b, + lambda _a, _b: _a > _b, lambda _a, _b: _a >= _b, - lambda _a, _b: _a != _b, ] # places to check -- GitLab From 0e8568679d57b4a12b2283a19e4e2efc0f10c70d Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Sun, 11 Feb 2018 10:40:18 +0800 Subject: [PATCH 037/217] wip --- paddle/framework/block_desc.cc | 44 ++++++------ paddle/framework/block_desc.h | 2 +- paddle/pybind/protobuf.cc | 10 +-- python/paddle/v2/dataset/common.py | 2 + .../paddle/v2/fluid/distribute_transpiler.py | 13 +++- python/paddle/v2/fluid/framework.py | 68 ++++++++++++++++++- 6 files changed, 111 insertions(+), 28 deletions(-) diff --git a/paddle/framework/block_desc.cc b/paddle/framework/block_desc.cc index 945f1d8f3e..4bdd9ec04a 100644 --- a/paddle/framework/block_desc.cc +++ b/paddle/framework/block_desc.cc @@ -42,28 +42,30 @@ bool BlockDesc::HasVar(const std::string &name) const { return vars_.find(name) != vars_.end(); } -void BlockDesc::RenameVar(const std::string &old_name, - const std::string &new_name) { - if (this->HasVar(old_name)) { - auto *var = this->Var(old_name); - var->SetName(new_name); - vars_[new_name].reset(var); - vars_.erase(old_name); - // rename inputs and outputs - for (const auto &op : ops_) { - auto *it = op.get(); - for (auto in_name : it->InputArgumentNames()) { - if (in_name == old_name) { - it->RenameInput(old_name, new_name); - } - } - for (auto out_name : it->OutputArgumentNames()) { - if (out_name == old_name) { - it->RenameOutput(old_name, new_name); - } - } - } +VarDesc *BlockDesc::RenameVar(const std::string &old_name, + const std::string &new_name) { + if (!this->HasVar(old_name)) { + return nullptr; + } + need_update_ = true; + auto *var = this->Var(old_name); + VarDesc *new_var = new VarDesc(*(var->Proto())); + new_var->SetName(new_name); + // new_var->SetShape(var->GetShape()); + // new_var->SetType(var->GetType()); + // new_var->SetDataType(var->GetDataType()); + // new_var->SetLoDLevel(var->GetLoDLevel()); + // new_var->SetPersistable(var->Persistable()); + + vars_[new_name].reset(new_var); + + // rename inputs and outputs + for (const auto &op : ops_) { + auto *it = op.get(); + it->Rename(old_name, new_name); } + vars_.erase(old_name); + return new_var; } VarDesc *BlockDesc::FindVarRecursive(const std::string &name) const { diff --git a/paddle/framework/block_desc.h b/paddle/framework/block_desc.h index e87a543909..41bc7c1ad2 100644 --- a/paddle/framework/block_desc.h +++ b/paddle/framework/block_desc.h @@ -55,7 +55,7 @@ class BlockDesc { bool HasVar(const std::string &var_name) const; - void RenameVar(const std::string &old_name, const std::string &new_name); + VarDesc *RenameVar(const std::string &old_name, const std::string &new_name); VarDesc *FindVarRecursive(const std::string &name_bytes) const; diff --git a/paddle/pybind/protobuf.cc b/paddle/pybind/protobuf.cc index b1f621adca..3f19ae7e50 100644 --- a/paddle/pybind/protobuf.cc +++ b/paddle/pybind/protobuf.cc @@ -170,12 +170,14 @@ void BindBlockDesc(py::module &m) { [](BlockDesc &self, py::bytes byte_name) { std::string name = byte_name; return self.HasVar(name); - }) + }, + py::return_value_policy::reference) .def("rename_var", - [](BlockDesc &self, py::bytes byte_name, py::bytes byte_name_new) { + [](BlockDesc &self, const py::bytes &byte_name, + const py::bytes &byte_name_new) { std::string name = byte_name; std::string new_name = byte_name_new; - return self.RenameVar(name, new_name); + self.RenameVar(name, new_name); }) .def("has_var_recursive", [](BlockDesc &self, py::bytes byte_name) { @@ -213,7 +215,7 @@ void BindVarDsec(py::module &m) { py::class_ var_desc(m, "VarDesc", ""); var_desc .def("name", - [](const VarDesc &self) { + [](VarDesc &self) { py::bytes name = self.Name(); return name; }, diff --git a/python/paddle/v2/dataset/common.py b/python/paddle/v2/dataset/common.py index 9aba35a648..c6ff09a1d1 100644 --- a/python/paddle/v2/dataset/common.py +++ b/python/paddle/v2/dataset/common.py @@ -74,6 +74,8 @@ def download(url, module_name, md5sum, save_name=None): retry = 0 retry_limit = 3 while not (os.path.exists(filename) and md5file(filename) == md5sum): + if os.path.exists(filename): + print "file md5", md5file(filename), md5sum if retry < retry_limit: retry += 1 else: diff --git a/python/paddle/v2/fluid/distribute_transpiler.py b/python/paddle/v2/fluid/distribute_transpiler.py index 603dbcf707..f0834d0663 100644 --- a/python/paddle/v2/fluid/distribute_transpiler.py +++ b/python/paddle/v2/fluid/distribute_transpiler.py @@ -175,6 +175,7 @@ class DistributeTranspiler: shape=[0]) # create send_op + print("send inputs: ", send_inputs) send_op = program.global_block().append_op( type="send", inputs={"X": send_inputs}, @@ -204,12 +205,12 @@ class DistributeTranspiler: block_map[varname].append((long(offset), long(size))) for varname, splited in block_map.iteritems(): orig_var = program.global_block().var(varname) - if len(splited) == 1: # rename var to the trainer_id var new_var_name = "%s.trainer_%d" % \ (orig_var.name, self.trainer_id) program.global_block().rename_var(varname, new_var_name) + print("renaming OK...", varname, new_var_name) var_mapping[varname] = \ [program.global_block().var(new_var_name)] continue @@ -375,7 +376,10 @@ class DistributeTranspiler: new_inputs = dict() # update param/grad shape first, then other inputs like # moment can use the updated shape + print("mark1") for key in opt_op.input_names: + # print("opt type: ", opt_op.type) + # print("opt op input: ", key) if key == "Grad": grad_block = None for g in self.param_grad_ep_mapping[endpoint]["grads"]: @@ -422,6 +426,7 @@ class DistributeTranspiler: new_inputs[key] = tmpvar + print("mark2") for key in opt_op.input_names: if key in ["Param", "Grad"]: continue @@ -453,6 +458,7 @@ class DistributeTranspiler: inputs=new_inputs, outputs=outputs, attrs=opt_op.attrs) + print("mark3") def _append_pserver_non_opt_ops(self, program, pserver_program, opt_op): # Append the ops for parameters that do not need to be optimized/updated @@ -523,6 +529,11 @@ class DistributeTranspiler: optimize_sub_program = Program() # Iterate through the ops and append ops as needed for idx, opt_op in enumerate(self.optimize_ops): + print("mark0") + print(opt_op.inputs.keys()) + for v in opt_op.inputs.values(): + print(v.name) + print(v.shape) is_op_on_pserver = self._is_op_on_pserver(endpoint, self.optimize_ops, idx) if not is_op_on_pserver: diff --git a/python/paddle/v2/fluid/framework.py b/python/paddle/v2/fluid/framework.py index 553b3f3b91..417fcb4fd3 100644 --- a/python/paddle/v2/fluid/framework.py +++ b/python/paddle/v2/fluid/framework.py @@ -741,9 +741,75 @@ class Block(object): """ if not self.has_var(name): raise ValueError("var %s is not in current" % name) + v = self.var(name) + stop_gradient = None + trainable = None + optimize_attr = None + regularizer = None + gradient_clip_attr = None + error_clip = None + if type(v) == Parameter: + stop_gradient = v.stop_gradient + trainable = v.trainable + optimize_attr = v.optimize_attr + regularizer = v.regularizer + gradient_clip_attr = v.gradient_clip_attr + error_clip = v.error_clip + elif type(v) == Variable: + error_clip = v.error_clip + stop_gradient = v.stop_gradient + else: + raise ValueError("unsupported var type: %s", type(v)) + + def _clear_op_io_for_var(name): + for op in self.ops: + for k in op.inputs.keys(): + + if op.inputs[k].name == name: + op.inputs[k] = None + for k in op.outputs.keys(): + if op.outputs[k].name == name: + op.outputs[k] = None + + _clear_op_io_for_var(name) self.desc.rename_var(name, new_name) + d = self.desc.find_var(new_name) + var = None + if type(v) == Parameter: + var = Parameter( + self, + d.shape(), + d.dtype(), + name=new_name, + stop_gradient=stop_gradient, + trainable=trainable, + optimize_attr=optimize_attr, + regularizer=regularizer, + gradient_clip_attr=gradient_clip_attr, + error_clip=error_clip) + elif type(v) == Variable: + var = Variable( + self, + name=new_name, + error_clip=error_clip, + stop_gradient=stop_gradient) + + # rename the python side, sync_with_cpp will only add + # new vars/ops to python side. + self.vars[new_name] = var + for op in self.ops: + print("### rename op i/o ", name, op.inputs) + if op.inputs: + for k in op.inputs.keys(): + if op.inputs[k] == None: + print("rename input: ", name, var) + op.inputs[k] = var + if op.outputs: + for k in op.outputs.keys(): + if op.outputs[k] == None: + op.outputs[k] = var + del self.vars[name] self.sync_with_cpp() - print("renamed var: ", self.var(new_name)) def create_parameter(self, *args, **kwargs): global_block = self.program.global_block() -- GitLab From 82c33c61d9e7584060916c604478b67f59fbdfc0 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sun, 11 Feb 2018 10:46:25 +0800 Subject: [PATCH 038/217] Fix constructor bug in mixed_vector --- paddle/fluid/framework/mixed_vector.h | 5 ++--- paddle/fluid/framework/mixed_vector_test.cu | 8 ++++++++ 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/framework/mixed_vector.h b/paddle/fluid/framework/mixed_vector.h index 9756754260..902dedd48e 100644 --- a/paddle/fluid/framework/mixed_vector.h +++ b/paddle/fluid/framework/mixed_vector.h @@ -37,9 +37,8 @@ class Vector { // Fill vector with value. The vector size is `count`. explicit Vector(size_t count, const T& value = T()) { - if (count == 0) { - InitEmpty(); - } else { + InitEmpty(); + if (count != 0) { resize(count); T* ptr = begin(); for (size_t i = 0; i < count; ++i) { diff --git a/paddle/fluid/framework/mixed_vector_test.cu b/paddle/fluid/framework/mixed_vector_test.cu index a890645256..20b79d60c1 100644 --- a/paddle/fluid/framework/mixed_vector_test.cu +++ b/paddle/fluid/framework/mixed_vector_test.cu @@ -15,6 +15,7 @@ #include "glog/logging.h" #include "gtest/gtest.h" +#include "mixed_vector.h" #include "paddle/fluid/framework/mixed_vector.h" #include "paddle/fluid/platform/gpu_info.h" @@ -91,3 +92,10 @@ TEST(mixed_vector, MultiGPU) { ASSERT_EQ(tmp[i], i * 100); } } + +TEST(mixed_vector, InitWithCount) { + paddle::framework::Vector vec(10, 10); + for (int i = 0; i < 10; ++i) { + ASSERT_EQ(vec[i], 10); + } +} -- GitLab From 816fa8f32e951f2c6bef9c7b59bd2cb8dd4d9f96 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sun, 11 Feb 2018 10:59:56 +0800 Subject: [PATCH 039/217] Fix warnings --- paddle/fluid/framework/mixed_vector.h | 7 ++++++- paddle/fluid/framework/mixed_vector_test.cu | 8 ++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/paddle/fluid/framework/mixed_vector.h b/paddle/fluid/framework/mixed_vector.h index 902dedd48e..26f160e509 100644 --- a/paddle/fluid/framework/mixed_vector.h +++ b/paddle/fluid/framework/mixed_vector.h @@ -121,6 +121,10 @@ class Vector { const T* begin() const { return &this->operator[](0); } const T* end() const { return &this->operator[](size()); } + const T* cbegin() const { return begin(); } + + const T* cend() const { return end(); } + const T& back() const { auto it = end(); --it; @@ -243,7 +247,8 @@ class Vector { bool operator==(const Vector& other) const { if (size() != other.size()) return false; - for (auto it1 = begin(), it2 = other.begin(); it1 < end(); ++it1, ++it2) { + for (const T *it1 = cbegin(), it2 = other.cbegin(); it1 < cend(); + ++it1, ++it2) { if (*it1 != *it2) { return false; } diff --git a/paddle/fluid/framework/mixed_vector_test.cu b/paddle/fluid/framework/mixed_vector_test.cu index 20b79d60c1..83694a590f 100644 --- a/paddle/fluid/framework/mixed_vector_test.cu +++ b/paddle/fluid/framework/mixed_vector_test.cu @@ -27,10 +27,10 @@ TEST(mixed_vector, CPU_VECTOR) { for (int i = 0; i < 10; ++i) { tmp.push_back(i); } - ASSERT_EQ(tmp.size(), 10); + ASSERT_EQ(tmp.size(), 10UL); vec tmp2; tmp2 = tmp; - ASSERT_EQ(tmp2.size(), 10); + ASSERT_EQ(tmp2.size(), 10UL); for (int i = 0; i < 10; ++i) { ASSERT_EQ(tmp2[i], i); ASSERT_EQ(tmp2[i], tmp[i]); @@ -59,7 +59,7 @@ TEST(mixed_vector, GPU_VECTOR) { for (int i = 0; i < 10; ++i) { tmp.push_back(i); } - ASSERT_EQ(tmp.size(), 10); + ASSERT_EQ(tmp.size(), 10UL); paddle::platform::CUDAPlace gpu(0); multiply_10<<<1, 1, 0, GetCUDAStream(gpu)>>>(tmp.MutableData(gpu)); @@ -80,7 +80,7 @@ TEST(mixed_vector, MultiGPU) { for (int i = 0; i < 10; ++i) { tmp.push_back(i); } - ASSERT_EQ(tmp.size(), 10); + ASSERT_EQ(tmp.size(), 10UL); paddle::platform::CUDAPlace gpu0(0); paddle::platform::SetDeviceId(0); multiply_10<<<1, 1, 0, GetCUDAStream(gpu0)>>>(tmp.MutableData(gpu0)); -- GitLab From 72bcf72c6683242b8da88a488da09eebc1b85175 Mon Sep 17 00:00:00 2001 From: qingqing01 Date: Sun, 11 Feb 2018 11:01:03 +0800 Subject: [PATCH 040/217] Refine target_assign_op to unify the classification and regression targets assigning. (#8326) * Refine target_assign_op to unify the classification and regression targets assignment. * Fix the unit testing. * Fix conflicts. --- paddle/fluid/operators/target_assign_op.cc | 193 +++++++----------- paddle/fluid/operators/target_assign_op.cu | 42 ++-- paddle/fluid/operators/target_assign_op.h | 169 +++++++-------- .../v2/fluid/tests/test_target_assign_op.py | 75 +++++-- 4 files changed, 225 insertions(+), 254 deletions(-) diff --git a/paddle/fluid/operators/target_assign_op.cc b/paddle/fluid/operators/target_assign_op.cc index 24f1b72523..bafb830df9 100644 --- a/paddle/fluid/operators/target_assign_op.cc +++ b/paddle/fluid/operators/target_assign_op.cc @@ -22,69 +22,43 @@ class TargetAssignOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - // checkout inputs - PADDLE_ENFORCE(ctx->HasInput("EncodedGTBBox"), - "Input(EncodedGTBBox) of TargetAssignOp should not be null"); - PADDLE_ENFORCE(ctx->HasInput("GTScoreLabel"), - "Input(GTScoreLabel) of TargetAssignOp should not be null"); + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of TargetAssignOp should not be null"); PADDLE_ENFORCE(ctx->HasInput("MatchIndices"), "Input(MatchIndices) of TargetAssignOp should not be null"); - PADDLE_ENFORCE(ctx->HasInput("NegIndices"), - "Input(NegIndices) of TargetAssignOp should not be null"); - - // checkout outputs - PADDLE_ENFORCE( - ctx->HasOutput("PredBBoxLabel"), - "Output(PredBBoxLabel) of TargetAssignOp should not be null."); - PADDLE_ENFORCE( - ctx->HasOutput("PredBBoxWeight"), - "Output(PredBBoxWeight) of TargetAssignOp should not be null."); - PADDLE_ENFORCE( - ctx->HasOutput("PredScoreLabel"), - "Output(PredScoreLabel) of TargetAssignOp should not be null."); - PADDLE_ENFORCE( - ctx->HasOutput("PredScoreWeight"), - "Output(PredScoreWeight) of TargetAssignOp should not be null."); - - auto blabel_dims = ctx->GetInputDim("EncodedGTBBox"); - auto slabel_dims = ctx->GetInputDim("GTScoreLabel"); + + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of TargetAssignOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("OutWeight"), + "Output(OutWeight) of TargetAssignOp should not be null."); + + auto in_dims = ctx->GetInputDim("X"); auto mi_dims = ctx->GetInputDim("MatchIndices"); - auto neg_dims = ctx->GetInputDim("NegIndices"); - PADDLE_ENFORCE_EQ(blabel_dims.size(), 3UL, - "The rank of Input(EncodedGTBBox) must be 3."); - PADDLE_ENFORCE_EQ(slabel_dims.size(), 2UL, - "The rank of Input(GTScoreLabel) must be 2."); - PADDLE_ENFORCE_EQ(mi_dims.size(), 2UL, + PADDLE_ENFORCE_EQ(in_dims.size(), 3, "The rank of Input(X) must be 3."); + PADDLE_ENFORCE_EQ(mi_dims.size(), 2, "The rank of Input(MatchIndices) must be 2."); - PADDLE_ENFORCE_EQ(neg_dims.size(), 2UL, - "The rank of Input(NegIndices) must be 2."); - - PADDLE_ENFORCE_EQ(blabel_dims[0], slabel_dims[0], - "The 1st dimension (means the total number of " - "ground-truth bounding boxes) of Input(EncodedGTBBox) " - "and Input(GTScoreLabel) must be the same."); - PADDLE_ENFORCE_EQ(blabel_dims[1], mi_dims[1], - "The 2nd dimension (means the number of priod boxes) " - "of Input(EncodedGTBBox) and " - "Input(MatchIndices) must be the same."); - PADDLE_ENFORCE_EQ(blabel_dims[2], 4, - "The 3rd dimension of Input(EncodedGTBBox) must be 4."); + + if (ctx->HasInput("NegIndices")) { + auto neg_dims = ctx->GetInputDim("NegIndices"); + PADDLE_ENFORCE_EQ(neg_dims.size(), 2, + "The rank of Input(NegIndices) must be 2."); + PADDLE_ENFORCE_EQ(neg_dims[1], 1, + "The last dimenstion of Out(NegIndices) must be 1."); + } auto n = mi_dims[0]; - auto np = mi_dims[1]; - ctx->SetOutputDim("PredBBoxLabel", {n, np, 4}); - ctx->SetOutputDim("PredBBoxWeight", {n, np, 1}); - ctx->SetOutputDim("PredScoreLabel", {n, np, 1}); - ctx->SetOutputDim("PredScoreWeight", {n, np, 1}); + auto m = mi_dims[1]; + auto k = in_dims[in_dims.size() - 1]; + ctx->SetOutputDim("Out", {n, m, k}); + ctx->SetOutputDim("OutWeight", {n, m, 1}); } protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( - framework::ToDataType( - ctx.Input("EncodedGTBBox")->type()), + framework::ToDataType(ctx.Input("X")->type()), ctx.device_context()); } }; @@ -93,102 +67,87 @@ class TargetAssignOpMaker : public framework::OpProtoAndCheckerMaker { public: TargetAssignOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("EncodedGTBBox", - "(LoDTensor), The encoded ground-truth bounding boxes with shape " - "[Ng, Np, 4], where Ng is the total number of ground-truth boxes " - "in this mini-batch, Np the number of predictions, 4 is the " - "number of coordinate in [xmin, ymin, xmax, ymax] layout."); - AddInput("GTScoreLabel", - "(LoDTensor, default LoDTensor), The input ground-truth " - "labels with shape [Ng, 1], where the Ng is the same as it in " - "the input of EncodedGTBBox."); + AddInput("X", + "(LoDTensor), This input is a 3D LoDTensor with shape [M, P, K]. " + "Some elements in X will be assigned to Out based on the " + "MatchIndices and NegIndices."); AddInput("MatchIndices", "(Tensor, default Tensor), The input matched indices " - "with shape [N, Np], where N is the batch size, Np is the same " - "as it in the input of EncodedGTBBox. If MatchIndices[i][j] " - "is -1, the j-th prior box is not matched to any ground-truh " - "box in i-th instance."); + "with shape [N, P], If MatchIndices[i][j] is -1, the j-th entity " + "of column is not matched to any entity of row in i-th instance."); AddInput("NegIndices", "(LoDTensor, default LoDTensor), The input negative example " - "indices with shape [Neg, 1], where is the total number of " - "negative example indices."); - AddAttr("background_label", - "(int, default 0), Label index of background class.") + "indices are an optional input with shape [Neg, 1], where Neg is " + "the total number of negative example indices.") + .AsDispensable(); + AddAttr("mismatch_value", + "(int, default 0), Fill this value to the " + "mismatched location.") .SetDefault(0); - AddOutput("PredBBoxLabel", - "(Tensor), The output encoded ground-truth labels " - "with shape [N, Np, 4], N is the batch size and Np, 4 is the " - "same as they in input of EncodedGTBBox. If MatchIndices[i][j] " - "is -1, the PredBBoxLabel[i][j][:] is the encoded ground-truth " - "box for background_label in i-th instance."); - AddOutput("PredBBoxWeight", - "(Tensor), The weight for PredBBoxLabel with the shape " - "of [N, Np, 1]"); - AddOutput("PredScoreLabel", - "(Tensor, default Tensor), The output score labels for " - "each predictions with shape [N, Np, 1]. If MatchIndices[i][j] " - "is -1, PredScoreLabel[i][j] = background_label."); - AddOutput("PredScoreWeight", - "(Tensor), The weight for PredScoreLabel with the shape " - "of [N, Np, 1]"); + AddOutput("Out", + "(Tensor), The output is a 3D Tensor with shape [N, P, K], " + "N and P is the same as they are in NegIndices, K is the " + "same as it in input of X. If MatchIndices[i][j] " + "is -1, the Out[i][j][0 : K] is the mismatch_value."); + AddOutput("OutWeight", + "(Tensor), The weight for output with the shape of [N, P, 1]"); AddComment(R"DOC( -This operator is, for given the encoded boxes between prior boxes and -ground-truth boxes and ground-truth class labels, to assign classification -and regression targets to each prior box as well as weights to each -prior box. The weights is used to specify which prior box would not contribute -to training loss. - -For each instance, the output `PredBBoxLabel`, `PredBBoxWeight`, -`PredScoreLabel` and `PredScoreWeight` are assigned based on `MatchIndices`. -Assumed that the row offset for each instance in `EncodedGTBBox` is called lod, -this operato assigns classification/regression targets by performing the +This operator can be, for given the target bounding boxes or labels, +to assign classification and regression targets to each prediction as well as +weights to prediction. The weights is used to specify which prediction would +not contribute to training loss. + +For each instance, the output `Out` and`OutWeight` are assigned based on +`MatchIndices` and `NegIndices`. +Assumed that the row offset for each instance in `X` is called lod, +this operator assigns classification/regression targets by performing the following steps: 1. Assigning all outpts based on `MatchIndices`: If id = MatchIndices[i][j] > 0, - PredBBoxLabel[i][j] = EncodedGTBBox[lod[i] + id][j] - PredBBoxWeight[i][j] = 1. - PredScoreLabel[i][j] = GTScoreLabel[lod[i] + id] - PredScoreWeight[i][j] = 1. + Out[i][j][0 : K] = X[lod[i] + id][j % P][0 : K] + OutWeight[i][j] = 1. Otherwise, - PredBBoxLabel[j][j] = [0., 0., 0., 0.] - PredBBoxWeight[i][j] = 0. - PredScoreLabel[i][j] = background_label - PredScoreWeight[i][j] = 0. + Out[j][j][0 : K] = {mismatch_value, mismatch_value, ...} + OutWeight[i][j] = 0. -2. Assigning PredScoreWeight based on `NegIndices`: +2. Assigning OutWeight based on `NegIndices` if `NegIndices` is provided: -Assumed that the row offset for each instance in `NegIndices` is caleed neg_lod, -for i-th instance and all ids of NegIndices in this instance: +Assumed that the row offset for each instance in `NegIndices` is called neg_lod, +for i-th instance and each `id` of NegIndices in this instance: - PredScoreLabel[i][id] = background_label - PredScoreWeight[i][id] = 1.0 + Out[i][id][0 : K] = {mismatch_value, mismatch_value, ...} + OutWeight[i][id] = 1.0 )DOC"); } }; -template -struct NegTargetAssignFunctor { +template +struct NegTargetAssignFunctor { void operator()(const platform::CPUDeviceContext& ctx, const int* neg_indices, - const size_t* lod, const int num, const int num_prior_box, - const int background_label, int* out_label, T* out_label_wt) { - for (int i = 0; i < num; ++i) { + const size_t* lod, const int N, const int M, const int K, + const int mismatch_value, T* out, WT* out_wt) { + for (int i = 0; i < N; ++i) { for (size_t j = lod[i]; j < lod[i + 1]; ++j) { int id = neg_indices[j]; - out_label[i * num_prior_box + id] = background_label; - out_label_wt[i * num_prior_box + id] = static_cast(1.0); + int off = (i * M + id) * K; + for (int k = 0; k < K; ++k) { + out[off + k] = mismatch_value; + out_wt[off + k] = static_cast(1.0); + } } } } }; -template struct NegTargetAssignFunctor; -template struct NegTargetAssignFunctor; +template struct NegTargetAssignFunctor; +template struct NegTargetAssignFunctor; } // namespace operators } // namespace paddle @@ -198,5 +157,5 @@ REGISTER_OP_WITHOUT_GRADIENT(target_assign, ops::TargetAssignOp, ops::TargetAssignOpMaker); REGISTER_OP_CPU_KERNEL( target_assign, - ops::TargetAssignKernel, - ops::TargetAssignKernel); + ops::TargetAssignKernel, + ops::TargetAssignKernel); diff --git a/paddle/fluid/operators/target_assign_op.cu b/paddle/fluid/operators/target_assign_op.cu index 5c012d27ad..fa02b8aac9 100644 --- a/paddle/fluid/operators/target_assign_op.cu +++ b/paddle/fluid/operators/target_assign_op.cu @@ -17,39 +17,41 @@ limitations under the License. */ namespace paddle { namespace operators { -template +template __global__ void NegTargetAssignKernel(const int* neg_indices, const size_t* lod, - const int num, const int num_prior_box, - const int background_label, - int* out_label, T* out_label_wt) { + const int N, const int M, const int K, + const int mismatch_value, T* out, + WT* out_wt) { int bidx = blockIdx.x; int st = lod[bidx]; int ed = lod[bidx + 1]; - int row_start = bidx * num_prior_box; + int row_start = bidx * M; for (int i = st + threadIdx.x; i < ed; i += blockDim.x) { int id = row_start + neg_indices[i]; - out_label[id] = background_label; - out_label_wt[id] = 1.; + for (int k = 0; k < K; ++k) { + out[id * K + k] = T(mismatch_value); + out_wt[id * K + k] = WT(1.); + } } } -template -struct NegTargetAssignFunctor { +template +struct NegTargetAssignFunctor { void operator()(const platform::CUDADeviceContext& ctx, - const int* neg_indices, const size_t* lod, const int num, - const int num_prior_box, const int background_label, - int* out_label, T* out_label_wt) { + const int* neg_indices, const size_t* lod, const int N, + const int M, const int K, const int mismatch_value, T* out, + WT* out_wt) { const int block_size = 256; - const int grid_size = num; - NegTargetAssignKernel<<>>( - neg_indices, lod, num, num_prior_box, background_label, out_label, - out_label_wt); + const int grid_size = N; + NegTargetAssignKernel<<>>( + neg_indices, lod, N, M, K, mismatch_value, out, out_wt); } }; -template struct NegTargetAssignFunctor; -template struct NegTargetAssignFunctor; +template struct NegTargetAssignFunctor; +template struct NegTargetAssignFunctor; } // namespace operators } // namespace paddle @@ -57,5 +59,5 @@ template struct NegTargetAssignFunctor; namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( target_assign, - ops::TargetAssignKernel, - ops::TargetAssignKernel); + ops::TargetAssignKernel, + ops::TargetAssignKernel); diff --git a/paddle/fluid/operators/target_assign_op.h b/paddle/fluid/operators/target_assign_op.h index 876111523a..a1b2fe6f35 100644 --- a/paddle/fluid/operators/target_assign_op.h +++ b/paddle/fluid/operators/target_assign_op.h @@ -19,140 +19,113 @@ limitations under the License. */ namespace paddle { namespace operators { - -template +template struct TargetAssignFunctor { - const T* gt_box_; - const int* gt_label_; + const T* in_; const int* match_indices_; const size_t* lod_; - const int background_label_; - const int64_t num_; - const int64_t num_prior_box_; - - T* out_box_; - T* out_box_wt_; - int* out_label_; - T* out_label_wt_; - - TargetAssignFunctor(const T* gt_box, const int* gt_label, - const int* match_indices, const size_t* lod, - const int background_label, const int64_t num, - const int64_t np, T* out_box, T* out_box_wt, - int* out_label, T* out_label_wt) - : gt_box_(gt_box), - gt_label_(gt_label), + const int mismatch_value_; + const int64_t N_; + const int64_t M_; + const int64_t P_; + const int64_t K_; + + T* out_; + WT* out_wt_; + + TargetAssignFunctor(const T* input, const int* match_indices, + const size_t* lod, const int mismatch_value, + const int64_t N, const int64_t M, const int64_t P, + const int64_t K, T* out, WT* out_wt) + : in_(input), match_indices_(match_indices), lod_(lod), - background_label_(background_label), - num_(num), - num_prior_box_(np), - out_box_(out_box), - out_box_wt_(out_box_wt), - out_label_(out_label), - out_label_wt_(out_label_wt) {} + mismatch_value_(mismatch_value), + N_(N), + M_(M), + P_(P), + K_(K), + out_(out), + out_wt_(out_wt) {} HOSTDEVICE void operator()(size_t i) const { - int row = i / num_prior_box_; - int col = i - row * num_prior_box_; + int h = i / M_; + int w = i - h * M_; - size_t row_off = lod_[row]; - int offset = row * num_prior_box_ + col; + size_t off = lod_[h]; + int id = match_indices_[i]; - int id = match_indices_[offset]; - T* obox = out_box_ + offset * 4; - int* olabel = out_label_ + offset; - T* obox_wt = out_box_wt_ + offset; - T* olabel_wt = out_label_wt_ + offset; + T* out = out_ + i * K_; + WT* out_wt = out_wt_ + i; if (id > -1) { - const T* gtbox = gt_box_ + ((row_off + id) * num_prior_box_ + col) * 4; - - obox[0] = gtbox[0]; - obox[1] = gtbox[1]; - obox[2] = gtbox[2]; - obox[3] = gtbox[3]; - - olabel[0] = gt_label_[row_off + id]; - obox_wt[0] = static_cast(1.); - olabel_wt[0] = static_cast(1.); + int w_off = w % P_; + const T* in = in_ + ((off + id) * P_ + w_off) * K_; + for (int64_t k = 0; k < K_; ++k) { + out[k] = in[k]; + } + out_wt[0] = static_cast(1.); } else { - obox[0] = static_cast(0.); - obox[1] = static_cast(0.); - obox[2] = static_cast(0.); - obox[3] = static_cast(0.); - - olabel[0] = background_label_; - obox_wt[0] = static_cast(0.); - olabel_wt[0] = static_cast(0.); + for (int64_t k = 0; k < K_; ++k) { + out[k] = static_cast(mismatch_value_); + } + out_wt[0] = static_cast(0.); } } }; -template +template struct NegTargetAssignFunctor { void operator()(const platform::DeviceContext& ctx, const int* neg_indices, - const size_t* lod, const int num, const int num_prior_box, - const int background_label, int* out_label, - T* out_label_wt) const; + const size_t* lod, const int N, const int M, const int K, + const int mismatch_value, T* out, WT* out_wt) const; }; -template +template class TargetAssignKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - auto* enc_gt_box = ctx.Input("EncodedGTBBox"); - auto* gt_label = ctx.Input("GTScoreLabel"); + auto* x = ctx.Input("X"); auto* match_indices = ctx.Input("MatchIndices"); - auto* neg_indices = ctx.Input("NegIndices"); - - auto* out_box = ctx.Output("PredBBoxLabel"); - auto* out_box_wt = ctx.Output("PredBBoxWeight"); - auto* out_label = ctx.Output("PredScoreLabel"); - auto* out_label_wt = ctx.Output("PredScoreWeight"); - PADDLE_ENFORCE_EQ(enc_gt_box->lod().size(), 1UL); - PADDLE_ENFORCE_EQ(gt_label->lod().size(), 1UL); - PADDLE_ENFORCE_EQ(neg_indices->lod().size(), 1UL); + auto* out = ctx.Output("Out"); + auto* out_wt = ctx.Output("OutWeight"); - int background_label = ctx.Attr("background_label"); + PADDLE_ENFORCE_EQ(x->lod().size(), 1UL); + int mismatch_value = ctx.Attr("mismatch_value"); - const T* box_data = enc_gt_box->data(); - const int* label_data = gt_label->data(); + const T* x_data = x->data(); const int* match_idx_data = match_indices->data(); - const int* neg_idx_data = neg_indices->data(); - T* obox_data = out_box->mutable_data(ctx.GetPlace()); - T* obox_wt_data = out_box_wt->mutable_data(ctx.GetPlace()); - int* olabel_data = out_label->mutable_data(ctx.GetPlace()); - T* olabel_wt_data = out_label_wt->mutable_data(ctx.GetPlace()); + T* out_data = out->mutable_data(ctx.GetPlace()); + WT* out_wt_data = out_wt->mutable_data(ctx.GetPlace()); - int64_t num = match_indices->dims()[0]; - int64_t num_prior_box = match_indices->dims()[1]; + int64_t n = match_indices->dims()[0]; + int64_t m = match_indices->dims()[1]; + int64_t p = x->dims()[1]; + int64_t k = x->dims()[2]; - auto gt_lod = enc_gt_box->lod().back(); - auto gt_label_lod = gt_label->lod().back(); - auto neg_lod = neg_indices->lod().back(); - for (size_t i = 0; i < gt_lod.size(); ++i) { - PADDLE_ENFORCE_EQ(gt_lod.data()[i], gt_label_lod.data()[i]); - } - - size_t* gt_lod_data = gt_lod.MutableData(ctx.GetPlace()); - size_t* neg_lod_data = neg_lod.MutableData(ctx.GetPlace()); + auto x_lod = x->lod().back(); + size_t* x_lod_data = x_lod.MutableData(ctx.GetPlace()); - TargetAssignFunctor functor(box_data, label_data, match_idx_data, - gt_lod_data, background_label, num, - num_prior_box, obox_data, obox_wt_data, - olabel_data, olabel_wt_data); + TargetAssignFunctor functor(x_data, match_idx_data, x_lod_data, + mismatch_value, n, m, p, k, out_data, + out_wt_data); auto& device_ctx = ctx.template device_context(); - platform::ForRange for_range(device_ctx, - num * num_prior_box); + platform::ForRange for_range(device_ctx, n * m); for_range(functor); - NegTargetAssignFunctor neg_trg_functor; - neg_trg_functor(device_ctx, neg_idx_data, neg_lod_data, num, num_prior_box, - background_label, olabel_data, olabel_wt_data); + auto* neg_indices = ctx.Input("NegIndices"); + if (neg_indices) { + PADDLE_ENFORCE_EQ(neg_indices->lod().size(), 1UL); + const int* neg_idx_data = neg_indices->data(); + auto neg_lod = neg_indices->lod().back(); + size_t* neg_lod_data = neg_lod.MutableData(ctx.GetPlace()); + NegTargetAssignFunctor neg_trg_functor; + neg_trg_functor(device_ctx, neg_idx_data, neg_lod_data, n, m, k, + mismatch_value, out_data, out_wt_data); + } } }; diff --git a/python/paddle/v2/fluid/tests/test_target_assign_op.py b/python/paddle/v2/fluid/tests/test_target_assign_op.py index 8a1155c621..ceda61ff55 100755 --- a/python/paddle/v2/fluid/tests/test_target_assign_op.py +++ b/python/paddle/v2/fluid/tests/test_target_assign_op.py @@ -43,7 +43,7 @@ def gen_match_and_neg_indices(num_prior, gt_lod, neg_lod): def target_assign(encoded_box, gt_label, match_indices, neg_indices, gt_lod, - neg_lod, background_label): + neg_lod, mismatch_value): batch_size, num_prior = match_indices.shape # init target bbox @@ -52,7 +52,7 @@ def target_assign(encoded_box, gt_label, match_indices, neg_indices, gt_lod, trg_box_wt = np.zeros((batch_size, num_prior, 1)).astype('float32') # init target label trg_label = np.ones((batch_size, num_prior, 1)).astype('int32') - trg_label = trg_label * background_label + trg_label = trg_label * mismatch_value # init weight for target label trg_label_wt = np.zeros((batch_size, num_prior, 1)).astype('float32') @@ -65,53 +65,90 @@ def target_assign(encoded_box, gt_label, match_indices, neg_indices, gt_lod, # target bbox for v, c in zip(col_val + gt_start, col_ids[0].tolist()): trg_box[i][c][:] = encoded_box[v][c][:] - # weight for target bbox trg_box_wt[i][col_ids] = 1.0 trg_label[i][col_ids] = gt_label[col_val + gt_start] - trg_label_wt[i][col_ids] = 1.0 # set target label weight to 1.0 for the negative samples - neg_ids = neg_indices[neg_lod[i]:neg_lod[i + 1]] - trg_label_wt[i][neg_ids] = 1.0 + if neg_indices is not None: + neg_ids = neg_indices[neg_lod[i]:neg_lod[i + 1]] + trg_label_wt[i][neg_ids] = 1.0 return trg_box, trg_box_wt, trg_label, trg_label_wt -class TestTargetAssginOp(OpTest): +class TestTargetAssginFloatType(OpTest): def setUp(self): self.op_type = "target_assign" + num_prior = 120 + num_class = 21 + gt_lod = [0, 5, 11, 23] + neg_lod = [0, 4, 7, 13] + mismatch_value = 0 + batch_size = len(gt_lod) - 1 + num_gt = gt_lod[-1] + + encoded_box = np.random.random((num_gt, num_prior, 4)).astype('float32') + gt_label = np.random.randint( + num_class, size=(num_gt, 1)).astype('int32') + + match_indices, neg_indices = gen_match_and_neg_indices(num_prior, + gt_lod, neg_lod) + out, out_wt, _, _ = target_assign(encoded_box, gt_label, match_indices, + neg_indices, gt_lod, neg_lod, + mismatch_value) + + # assign regression targets + x = encoded_box + self.inputs = { + 'X': (x, [gt_lod]), + 'MatchIndices': match_indices, + } + self.attrs = {'mismatch_value': mismatch_value} + self.outputs = { + 'Out': out, + 'OutWeight': out_wt, + } + + def test_check_output(self): + self.check_output() + + +class TestTargetAssginIntType(OpTest): + def setUp(self): + self.op_type = "target_assign" num_prior = 120 num_class = 21 gt_lod = [0, 5, 11, 23] neg_lod = [0, 4, 7, 13] + mismatch_value = 0 batch_size = len(gt_lod) - 1 num_gt = gt_lod[-1] - background_label = 0 encoded_box = np.random.random((num_gt, num_prior, 4)).astype('float32') gt_label = np.random.randint( num_class, size=(num_gt, 1)).astype('int32') + match_indices, neg_indices = gen_match_and_neg_indices(num_prior, gt_lod, neg_lod) - trg_box, trg_box_wt, trg_label, trg_label_wt = target_assign( - encoded_box, gt_label, match_indices, neg_indices, gt_lod, neg_lod, - background_label) + _, _, out, out_wt, = target_assign(encoded_box, gt_label, match_indices, + neg_indices, gt_lod, neg_lod, + mismatch_value) + + # assign cassification argets + x = np.reshape(gt_label, (num_gt, 1, 1)) self.inputs = { - 'EncodedGTBBox': (encoded_box, [gt_lod]), - 'GTScoreLabel': (gt_label, [gt_lod]), - 'MatchIndices': (match_indices), + 'X': (x, [gt_lod]), + 'MatchIndices': match_indices, 'NegIndices': (neg_indices, [neg_lod]), } - self.attrs = {'background_label': background_label} + self.attrs = {'mismatch_value': mismatch_value} self.outputs = { - 'PredBBoxLabel': (trg_box), - 'PredBBoxWeight': (trg_box_wt), - 'PredScoreLabel': (trg_label), - 'PredScoreWeight': (trg_label_wt), + 'Out': out, + 'OutWeight': out_wt, } def test_check_output(self): -- GitLab From ae2296e806fb3b70f2ffe326815ade868039715f Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sun, 11 Feb 2018 11:01:11 +0800 Subject: [PATCH 041/217] Clean code --- paddle/fluid/framework/mixed_vector_test.cu | 1 - 1 file changed, 1 deletion(-) diff --git a/paddle/fluid/framework/mixed_vector_test.cu b/paddle/fluid/framework/mixed_vector_test.cu index 83694a590f..0d5a914eac 100644 --- a/paddle/fluid/framework/mixed_vector_test.cu +++ b/paddle/fluid/framework/mixed_vector_test.cu @@ -15,7 +15,6 @@ #include "glog/logging.h" #include "gtest/gtest.h" -#include "mixed_vector.h" #include "paddle/fluid/framework/mixed_vector.h" #include "paddle/fluid/platform/gpu_info.h" -- GitLab From 006ef1fd7a551e109b8ac294cdb9cc012d2a5161 Mon Sep 17 00:00:00 2001 From: wanghaox Date: Sun, 11 Feb 2018 11:17:48 +0800 Subject: [PATCH 042/217] migrate detection_map code directory --- paddle/{ => fluid}/operators/detection_map_op.cc | 2 +- paddle/{ => fluid}/operators/detection_map_op.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) rename paddle/{ => fluid}/operators/detection_map_op.cc (99%) rename paddle/{ => fluid}/operators/detection_map_op.h (99%) diff --git a/paddle/operators/detection_map_op.cc b/paddle/fluid/operators/detection_map_op.cc similarity index 99% rename from paddle/operators/detection_map_op.cc rename to paddle/fluid/operators/detection_map_op.cc index 1ab691eb4f..cc4b6202c0 100644 --- a/paddle/operators/detection_map_op.cc +++ b/paddle/fluid/operators/detection_map_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/detection_map_op.h" +#include "paddle/fluid/operators/detection_map_op.h" namespace paddle { namespace operators { diff --git a/paddle/operators/detection_map_op.h b/paddle/fluid/operators/detection_map_op.h similarity index 99% rename from paddle/operators/detection_map_op.h rename to paddle/fluid/operators/detection_map_op.h index fd0ddd10aa..0379a3328a 100644 --- a/paddle/operators/detection_map_op.h +++ b/paddle/fluid/operators/detection_map_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { -- GitLab From cf2ed179940e3de30bafb9e9e89587424c58e1b2 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Sun, 11 Feb 2018 11:18:32 +0800 Subject: [PATCH 043/217] fix prior_op unit test --- python/paddle/v2/fluid/tests/test_prior_box_op.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/paddle/v2/fluid/tests/test_prior_box_op.py b/python/paddle/v2/fluid/tests/test_prior_box_op.py index 25dfc4307c..a6c21af49f 100644 --- a/python/paddle/v2/fluid/tests/test_prior_box_op.py +++ b/python/paddle/v2/fluid/tests/test_prior_box_op.py @@ -65,9 +65,9 @@ class TestPriorBoxOp(OpTest): self.batch_size = 10 self.min_sizes = [2, 4] - self.min_sizes = np.array(self.min_sizes).astype('float32') + self.min_sizes = np.array(self.min_sizes).astype('float32').tolist() self.max_sizes = [5, 10] - self.max_sizes = np.array(self.max_sizes).astype('float32') + self.max_sizes = np.array(self.max_sizes).astype('float32').tolist() self.aspect_ratios = [2.0, 3.0] self.flip = True self.real_aspect_ratios = [1, 2.0, 1.0 / 2.0, 3.0, 1.0 / 3.0] -- GitLab From 190119bb98eaca554bb183488f6828fd2b3e18c0 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sun, 11 Feb 2018 11:28:32 +0800 Subject: [PATCH 044/217] Extract for-loop init. Make nvcc happy --- paddle/fluid/framework/mixed_vector.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/framework/mixed_vector.h b/paddle/fluid/framework/mixed_vector.h index 26f160e509..4dc3de54de 100644 --- a/paddle/fluid/framework/mixed_vector.h +++ b/paddle/fluid/framework/mixed_vector.h @@ -247,8 +247,9 @@ class Vector { bool operator==(const Vector& other) const { if (size() != other.size()) return false; - for (const T *it1 = cbegin(), it2 = other.cbegin(); it1 < cend(); - ++it1, ++it2) { + auto it1 = cbegin(); + auto it2 = other.cbegin(); + for (; it1 < cend(); ++it1, ++it2) { if (*it1 != *it2) { return false; } -- GitLab From 51d8a1168c433737e6ce19223d4c58df575b815f Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Sun, 11 Feb 2018 11:35:33 +0800 Subject: [PATCH 045/217] reduce doc build time in travis ci --- doc/templates/conf.py.cn.in | 2 +- doc/templates/conf.py.en.in | 2 +- paddle/scripts/docker/build.sh | 2 +- paddle/scripts/travis/build_doc.sh | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/templates/conf.py.cn.in b/doc/templates/conf.py.cn.in index 41b35b5b23..d134aad794 100644 --- a/doc/templates/conf.py.cn.in +++ b/doc/templates/conf.py.cn.in @@ -82,7 +82,7 @@ language = 'zh_CN' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build', '**/*_en*', '*_en*'] +exclude_patterns = ['_build', '**/*_en*', '*_en*', 'api/*'] # The reST default role (used for this markup: `text`) to use for all # documents. diff --git a/doc/templates/conf.py.en.in b/doc/templates/conf.py.en.in index 5822c2481d..1f057d2e83 100644 --- a/doc/templates/conf.py.en.in +++ b/doc/templates/conf.py.en.in @@ -82,7 +82,7 @@ language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build', '**/*_cn*', '*_cn*'] +exclude_patterns = ['_build', '**/*_cn*', '*_cn*', 'api/*'] # The reST default role (used for this markup: `text`) to use for all # documents. diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index 2f8dd48efe..94018dbb0b 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -116,7 +116,7 @@ EOF -DWITH_SWIG_PY=ON \ -DWITH_STYLE_CHECK=OFF make -j `nproc` gen_proto_py - make -j `nproc` paddle_python + make -j `nproc` copy_paddle_pybind make -j `nproc` paddle_docs paddle_docs_cn paddle_api_docs popd fi diff --git a/paddle/scripts/travis/build_doc.sh b/paddle/scripts/travis/build_doc.sh index 486c094a6a..33bcc81548 100755 --- a/paddle/scripts/travis/build_doc.sh +++ b/paddle/scripts/travis/build_doc.sh @@ -8,7 +8,7 @@ cd $TRAVIS_BUILD_DIR/build # Compile Documentation only. cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_MKL=OFF -DWITH_DOC=ON make -j `nproc` gen_proto_py -make -j `nproc` paddle_python +make -j `nproc` copy_paddle_pybind make -j `nproc` paddle_docs paddle_docs_cn paddle_api_docs # check websites for broken links -- GitLab From 279aa626ab25385093dcfc7e1a932a37535dc5a4 Mon Sep 17 00:00:00 2001 From: Yancey Date: Sun, 11 Feb 2018 11:53:00 +0800 Subject: [PATCH 046/217] Move learning rate and releated op to pserver (#8209) * dist train support lr decay * update by comment * revert elementwise method creator * delete comment --- paddle/fluid/operators/listen_and_serv_op.cc | 17 +- .../paddle/v2/fluid/distribute_transpiler.py | 246 ++++++++++++------ .../paddle/v2/fluid/layers/math_op_patch.py | 1 + .../book_distribute/notest_dist_word2vec.py | 2 +- 4 files changed, 183 insertions(+), 83 deletions(-) diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index a72708d9ba..c456c692ee 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -106,6 +106,7 @@ class ListenAndServOp : public framework::OperatorBase { // the gradients arrives, just add suffix 0~n and merge the gradient. rpc_service_->SetCond(0); size_t recv_var_cnt = 0; + size_t update_param_cnt = 0; int batch_barrier = 0; while (batch_barrier != fan_in) { const detail::MessageWithName &v = rpc_service_->Get(); @@ -126,13 +127,14 @@ class ListenAndServOp : public framework::OperatorBase { std::string param_var_name; if (it != grad_list.end()) { param_var_name = param_list[it - grad_list.begin()]; + update_param_cnt++; + VLOG(3) << "received grad: " << grad_var_name + << " updating param: " << param_var_name; } else { - LOG(ERROR) << "grad has no paired param:" << grad_var_name; + VLOG(3) << "received variable: " << grad_var_name + << " no need to update param"; } - VLOG(3) << "received grad: " << grad_var_name - << " updating param: " << param_var_name; - - if (fan_in > 1) { + if (fan_in > 1 && !param_var_name.empty()) { grad_var_name = this->GetGradVarNameForTrainer(grad_var_name); } auto *var = recv_scope.FindVar(grad_var_name); @@ -144,11 +146,10 @@ class ListenAndServOp : public framework::OperatorBase { } } VLOG(3) << "recv " << recv_var_cnt << " parmeters for one barrier."; - // TODO(Yancey1989): merge SelectedRows variables here if (exit_flag) { rpc_service_->ShutDown(); } - + VLOG(3) << "run optimize graph..."; try { executor.Run(*program, &recv_scope, block->ID(), /*global_block*/ false /*create_local_scope*/, false /*create_vars*/); @@ -156,7 +157,7 @@ class ListenAndServOp : public framework::OperatorBase { LOG(ERROR) << "run sub program error " << e.what(); } rpc_service_->SetCond(1); - rpc_service_->WaitClientGet(recv_var_cnt); + rpc_service_->WaitClientGet(update_param_cnt); grads_counter_.clear(); } // while(true) } diff --git a/python/paddle/v2/fluid/distribute_transpiler.py b/python/paddle/v2/fluid/distribute_transpiler.py index cd89dba72d..edef2b1b17 100644 --- a/python/paddle/v2/fluid/distribute_transpiler.py +++ b/python/paddle/v2/fluid/distribute_transpiler.py @@ -33,6 +33,57 @@ class VarBlock: return "%s:%d:%d" % (self.varname, self.offset, self.size) +class UnionFind(object): + """ Union-find data struct. + + Union-find is a data struct that keeps track of a set of elements partitioned + into a number of disjoint (non-overlapping) subsets. + + Reference: + https://en.wikipedia.org/wiki/Disjoint-set_data_structure + + Args: + elements(list): The initialize element list. + """ + + def __init__(self, elementes=None): + self._parents = [] # index -> parent index + self._index = {} # element -> index + self._curr_idx = 0 + if not elementes: + elementes = [] + for ele in elementes: + self._parents.append(self._curr_idx) + self._index.update({ele: self._curr_idx}) + self._curr_idx += 1 + + def find(self, x): + # Find the root index of given element x, + # execute the path compress while findind the root index + if not x in self._index: + return -1 + idx = self._index[x] + while idx != self._parents[idx]: + t = self._parents[idx] + self._parents[idx] = self._parents[t] + idx = t + return idx + + def union(self, x, y): + # Union two given element + x_root = self.find(x) + y_root = self.find(y) + + if x_root == y_root: + return + self._parents[x_root] = y_root + + def is_connected(self, x, y): + # If two given elements have the same root index, + # then they are connected. + return self.find(x) == self.find(y) + + def same_or_split_var(p_name, var_name): return p_name == var_name or p_name.startswith(var_name + ".block") @@ -178,6 +229,21 @@ class DistributeTranspiler: outputs={"Out": [orig_param]}, attrs={"axis": 0}) + self.lr_param_mapping = self._create_lr_param_mapping() + + def _create_lr_param_mapping(self): + lr_mapping = dict() + for _, opt_op in enumerate(self.optimize_ops): + if not opt_op.inputs or not opt_op.inputs.has_key("LearningRate") \ + or not opt_op.inputs.has_key("Param"): + continue + lr = opt_op.inputs["LearningRate"].name + param = opt_op.inputs["Param"].name + if not lr_mapping.has_key(lr): + lr_mapping.update({lr: list()}) + lr_mapping[lr].append(param) + return lr_mapping + def _create_vars_from_blocklist(self, program, block_list): # Create respective variables using the block_list block_map = dict() @@ -300,52 +366,15 @@ class DistributeTranspiler: pass return orig_shape - def _op_input_var(self, op, varname): - pass - - def _is_op_on_pserver(self, endpoint, all_ops, idx): - """ - Recursively check if the op need to run on current server. - Assume that ops are in the execution order. - """ - param_names = [ - p.name for p in self.param_grad_ep_mapping[endpoint]["params"] - ] - op = all_ops[idx] - input_names = set(op.input_names) - # TODO(typhoonzero): using Param and Grad input name to identify - # that the operator is an optimization operator, need a better way. - if "Param" in input_names: - if op.input("Param")[0] in param_names: - return True - else: - for n in param_names: - if same_or_split_var(n, op.input("Param")[0]) \ - and n != op.input("Param")[0]: - return True - return False - else: - j = idx - 1 - while j >= 0: - prev_op = all_ops[j] - # prev_output_names = [o.name for o in prev_op.outputs.values()] - # prev_input_names = [o.name for o in prev_op.inputs.values()] - # NOTE(typhoonzero): consider list input/output - prev_output_names = prev_op.desc.output_arg_names() - prev_input_names = prev_op.desc.input_arg_names() - found1 = False - found2 = False - for varname in op.desc.input_arg_names(): - if varname in prev_output_names: - found1 = self._is_op_on_pserver(endpoint, all_ops, j) - # later ops may produce output for prev op's next batch use. - for varname in op.desc.output_arg_names(): - if varname in prev_input_names: - found2 = self._is_op_on_pserver(endpoint, all_ops, j) - if found1 or found2: - return True - j -= 1 - return False + def _fetch_var_names(self, param_dict): + res = [] + if not param_dict: + return res + for _, values in param_dict.iteritems(): + if not isinstance(values, list): + values = [values] + res += [v.name for v in values] + return res def _append_pserver_ops(self, optimize_block, opt_op, endpoint): program = optimize_block.program @@ -363,11 +392,7 @@ class DistributeTranspiler: # do not append this op if current endpoint # is not dealing with this grad block return - merged_var = program.global_block().create_var( - name=grad_block.name, - persistable=grad_block.persistable, - dtype=grad_block.dtype, - shape=grad_block.shape) + merged_var = program.global_block().vars[grad_block.name] # append merging ops if trainers > 1 if self.trainers > 1: vars2merge = self._create_var_for_trainers( @@ -398,13 +423,19 @@ class DistributeTranspiler: shape=param_block.shape) new_inputs[key] = tmpvar + elif key == "LearningRate": + # leraning rate variable has already be created by non-optimize op, + # don't create it once again. + new_inputs[key] = program.global_block().vars[opt_op.input(key)[ + 0]] for key in opt_op.input_names: - if key in ["Param", "Grad"]: + new_shape = None + if key in ["Param", "Grad", "LearningRate"]: continue + var = program.global_block().vars[opt_op.input(key)[0]] # update accumulator variable shape param_shape = new_inputs["Param"].shape - var = program.global_block().vars[opt_op.input(key)[0]] new_shape = self._get_optimizer_input_shape(opt_op.type, key, var.shape, param_shape) tmpvar = program.global_block().create_var( @@ -415,12 +446,11 @@ class DistributeTranspiler: new_inputs[key] = tmpvar # change output's ParamOut variable - outputs = self._get_output_map_from_op(program.global_block(), opt_op) - outputs["ParamOut"] = new_inputs["Param"] + opt_op.outputs["ParamOut"] = new_inputs["Param"] optimize_block.append_op( type=opt_op.type, inputs=new_inputs, - outputs=outputs, + outputs=opt_op.outputs, attrs=opt_op.attrs) def _append_pserver_non_opt_ops(self, optimize_block, opt_op): @@ -428,11 +458,10 @@ class DistributeTranspiler: # Append the ops for parameters that do not need to be optimized/updated inputs = self._get_input_map_from_op(self.program.global_block().vars, opt_op) - for var in inputs.itervalues(): - if type(var) == list: - varlist = var - else: - varlist = [var] + for varlist in inputs.itervalues(): + if not isinstance(varlist, list): + varlist = [varlist] + for var in varlist: if not program.global_block().vars.has_key(var.name): program.global_block().create_var( @@ -444,12 +473,70 @@ class DistributeTranspiler: outputs = self._get_output_map_from_op(self.program.global_block().vars, opt_op) + for varlist in outputs.itervalues(): + if not isinstance(varlist, list): + varlist = [varlist] + + for var in varlist: + program.global_block().create_var( + name=var.name, + persistable=var.persistable, + dtype=var.dtype, + shape=var.shape) + optimize_block.append_op( type=opt_op.type, inputs=inputs, outputs=outputs, attrs=opt_op.attrs) + def _is_op_connected(self, op1, op2): + # If one op's input is another op's output or + # one op's output is another op's input, we say + # the two operator is connected. + op1_input_names = self._fetch_var_names(op1.inputs) + op1_output_names = self._fetch_var_names(op1.outputs) + + op2_input_names = self._fetch_var_names(op2.inputs) + op2_output_names = self._fetch_var_names(op2.outputs) + if set(op1_output_names) & set(op2_input_names) or \ + set(op1_input_names) & set(op2_output_names): + return True + return False + + def _create_ufind(self, optimize_ops): + # Create a unit find data struct by optimize ops + ufind = UnionFind(optimize_ops) + for i in xrange(len(optimize_ops)): + for j in xrange(i, len(optimize_ops)): + op1 = optimize_ops[i] + op2 = optimize_ops[j] + if self._is_op_connected(op1, op2): + ufind.union(op1, op2) + return ufind + + def _is_opt_op(self, op): + # NOTE: It's a HACK implement. + # optimize op: SGDOptimize, MomentumOptimizer, AdamOptimizer and etc... + if op.inputs and op.inputs.has_key("Param") \ + and op.inputs.has_key("LearningRate"): + return True + return False + + def _is_opt_op_on_pserver(self, endpoint, op): + param_names = [ + p.name for p in self.param_grad_ep_mapping[endpoint]["params"] + ] + if op.inputs["Param"].name in param_names: + return True + else: + for n in param_names: + param = op.inputs["Param"].name + if same_or_split_var(n, param) and n != op.inputs["Param"].name: + return True + return False + return False + def get_pserver_program(self, endpoint): """ Get pserver side program using the endpoint @@ -469,8 +556,6 @@ class DistributeTranspiler: pserver_program.global_block().create_var( name=v.name, persistable=True, dtype=v.dtype, shape=v.shape) for trainer_id in xrange(self.trainers): - print("create variable for program: %s.trainer_%d" % - (v.name, trainer_id)) pserver_program.global_block().create_var( name="%s.trainer_%d" % (v.name, trainer_id), persistable=True, @@ -478,17 +563,30 @@ class DistributeTranspiler: shape=v.shape) # step6 optimize_block = pserver_program.create_block(0) - # Iterate through the ops and append ops as needed - for idx, opt_op in enumerate(self.optimize_ops): - is_op_on_pserver = self._is_op_on_pserver(endpoint, - self.optimize_ops, idx) - if not is_op_on_pserver: - continue - if "Grad" in opt_op.desc.input_arg_names(): - self._append_pserver_ops(optimize_block, opt_op, endpoint) - else: - self._append_pserver_non_opt_ops(optimize_block, opt_op) - + # step 6.1 + # Create a union-find data struct by optimize ops, + # If two ops are connected, we could add these two ops + # into one set. + ufind = self._create_ufind(self.optimize_ops) + # step 6.2 + # Iterate through the ops and append optimize op which + # located on current pserver + opt_op_on_pserver = [] + for _, op in enumerate(self.optimize_ops): + if self._is_opt_op(op) and self._is_opt_op_on_pserver(endpoint, op): + opt_op_on_pserver.append(op) + # step 6.3 + # Iterate through the ops, and if an op and the optimize ops + # which located on current pserver are in one set, then + # append it into the sub program. + for _, op in enumerate(self.optimize_ops): + for _, opt_op in enumerate(opt_op_on_pserver): + if ufind.is_connected(op, opt_op): + if self._is_opt_op(op): + self._append_pserver_ops(optimize_block, op, endpoint) + else: + self._append_pserver_non_opt_ops(optimize_block, op) + break # Append the listen_and_serv op pserver_program.global_block().append_op( type="listen_and_serv", diff --git a/python/paddle/v2/fluid/layers/math_op_patch.py b/python/paddle/v2/fluid/layers/math_op_patch.py index 9b5f22759c..00e4e69078 100644 --- a/python/paddle/v2/fluid/layers/math_op_patch.py +++ b/python/paddle/v2/fluid/layers/math_op_patch.py @@ -117,6 +117,7 @@ def monkey_patch_variable(): tmp_name = unique_tmp_name() out = self.block.create_var(name=tmp_name, dtype=lhs_dtype) + self.block.append_op( type=op_type, inputs={'X': [self], diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_word2vec.py b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_word2vec.py index 04b3113690..ec4c2d2721 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_word2vec.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_word2vec.py @@ -99,7 +99,7 @@ elif training_role == "TRAINER": exe.run(fluid.default_startup_program()) for pass_id in range(PASS_NUM): for data in train_reader(): - avg_cost_np = exe.run(fluid.default_main_program(), + avg_cost_np = exe.run(t.get_trainer_program(), feed=feeder.feed(data), fetch_list=[avg_cost]) print("avg_cost_np", avg_cost_np) -- GitLab From 593bec2c225fb8ababb2c50b116adc689d635575 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sun, 11 Feb 2018 12:22:16 +0800 Subject: [PATCH 047/217] update test_layers --- python/paddle/v2/fluid/tests/test_layers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/paddle/v2/fluid/tests/test_layers.py b/python/paddle/v2/fluid/tests/test_layers.py index aea43c2517..fa46f86973 100644 --- a/python/paddle/v2/fluid/tests/test_layers.py +++ b/python/paddle/v2/fluid/tests/test_layers.py @@ -161,8 +161,8 @@ class TestBook(unittest.TestCase): label=label, chunk_scheme="IOB", num_chunk_types=(label_dict_len - 1) / 2) - self.assertNotEqual(crf, None) - self.assertNotEqual(crf_decode, None) + self.assertFalse(crf is None) + self.assertFalse(crf_decode is None) print(str(program)) -- GitLab From 3941a249b2ff57c99c18720e95881e5aa9a3c832 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Sun, 11 Feb 2018 12:56:55 +0800 Subject: [PATCH 048/217] add framework_py_proto, set WITH_STYLE_CHECK=OFF --- paddle/scripts/docker/build.sh | 2 +- paddle/scripts/travis/build_doc.sh | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index 94018dbb0b..1486d5ed25 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -115,7 +115,7 @@ EOF -DWITH_AVX=${WITH_AVX:-ON} \ -DWITH_SWIG_PY=ON \ -DWITH_STYLE_CHECK=OFF - make -j `nproc` gen_proto_py + make -j `nproc` gen_proto_py framework_py_proto make -j `nproc` copy_paddle_pybind make -j `nproc` paddle_docs paddle_docs_cn paddle_api_docs popd diff --git a/paddle/scripts/travis/build_doc.sh b/paddle/scripts/travis/build_doc.sh index 33bcc81548..aa223d87ba 100755 --- a/paddle/scripts/travis/build_doc.sh +++ b/paddle/scripts/travis/build_doc.sh @@ -6,8 +6,8 @@ mkdir -p $TRAVIS_BUILD_DIR/build cd $TRAVIS_BUILD_DIR/build # Compile Documentation only. -cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_MKL=OFF -DWITH_DOC=ON -make -j `nproc` gen_proto_py +cmake .. -DCMAKE_BUILD_TYPE=Release -DWITH_GPU=OFF -DWITH_MKL=OFF -DWITH_DOC=ON -DWITH_STYLE_CHECK=OFF +make -j `nproc` gen_proto_py framework_py_proto make -j `nproc` copy_paddle_pybind make -j `nproc` paddle_docs paddle_docs_cn paddle_api_docs -- GitLab From 9029a9d9c66bc9c975987c078ebccdaca49d2fc0 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sun, 11 Feb 2018 13:13:08 +0800 Subject: [PATCH 049/217] Fix constructor bug in mixed_vector (#8364) * Fix constructor bug in mixed_vector * Fix warnings * Clean code * Extract for-loop init. Make nvcc happy --- paddle/fluid/framework/mixed_vector.h | 13 +++++++++---- paddle/fluid/framework/mixed_vector_test.cu | 15 +++++++++++---- 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/paddle/fluid/framework/mixed_vector.h b/paddle/fluid/framework/mixed_vector.h index 9756754260..4dc3de54de 100644 --- a/paddle/fluid/framework/mixed_vector.h +++ b/paddle/fluid/framework/mixed_vector.h @@ -37,9 +37,8 @@ class Vector { // Fill vector with value. The vector size is `count`. explicit Vector(size_t count, const T& value = T()) { - if (count == 0) { - InitEmpty(); - } else { + InitEmpty(); + if (count != 0) { resize(count); T* ptr = begin(); for (size_t i = 0; i < count; ++i) { @@ -122,6 +121,10 @@ class Vector { const T* begin() const { return &this->operator[](0); } const T* end() const { return &this->operator[](size()); } + const T* cbegin() const { return begin(); } + + const T* cend() const { return end(); } + const T& back() const { auto it = end(); --it; @@ -244,7 +247,9 @@ class Vector { bool operator==(const Vector& other) const { if (size() != other.size()) return false; - for (auto it1 = begin(), it2 = other.begin(); it1 < end(); ++it1, ++it2) { + auto it1 = cbegin(); + auto it2 = other.cbegin(); + for (; it1 < cend(); ++it1, ++it2) { if (*it1 != *it2) { return false; } diff --git a/paddle/fluid/framework/mixed_vector_test.cu b/paddle/fluid/framework/mixed_vector_test.cu index a890645256..0d5a914eac 100644 --- a/paddle/fluid/framework/mixed_vector_test.cu +++ b/paddle/fluid/framework/mixed_vector_test.cu @@ -26,10 +26,10 @@ TEST(mixed_vector, CPU_VECTOR) { for (int i = 0; i < 10; ++i) { tmp.push_back(i); } - ASSERT_EQ(tmp.size(), 10); + ASSERT_EQ(tmp.size(), 10UL); vec tmp2; tmp2 = tmp; - ASSERT_EQ(tmp2.size(), 10); + ASSERT_EQ(tmp2.size(), 10UL); for (int i = 0; i < 10; ++i) { ASSERT_EQ(tmp2[i], i); ASSERT_EQ(tmp2[i], tmp[i]); @@ -58,7 +58,7 @@ TEST(mixed_vector, GPU_VECTOR) { for (int i = 0; i < 10; ++i) { tmp.push_back(i); } - ASSERT_EQ(tmp.size(), 10); + ASSERT_EQ(tmp.size(), 10UL); paddle::platform::CUDAPlace gpu(0); multiply_10<<<1, 1, 0, GetCUDAStream(gpu)>>>(tmp.MutableData(gpu)); @@ -79,7 +79,7 @@ TEST(mixed_vector, MultiGPU) { for (int i = 0; i < 10; ++i) { tmp.push_back(i); } - ASSERT_EQ(tmp.size(), 10); + ASSERT_EQ(tmp.size(), 10UL); paddle::platform::CUDAPlace gpu0(0); paddle::platform::SetDeviceId(0); multiply_10<<<1, 1, 0, GetCUDAStream(gpu0)>>>(tmp.MutableData(gpu0)); @@ -91,3 +91,10 @@ TEST(mixed_vector, MultiGPU) { ASSERT_EQ(tmp[i], i * 100); } } + +TEST(mixed_vector, InitWithCount) { + paddle::framework::Vector vec(10, 10); + for (int i = 0; i < 10; ++i) { + ASSERT_EQ(vec[i], 10); + } +} -- GitLab From b56f4a4ee28b49f5193acd5c9b6e28cff2b7e647 Mon Sep 17 00:00:00 2001 From: Tao Luo Date: Sun, 11 Feb 2018 13:15:40 +0800 Subject: [PATCH 050/217] move code from /paddle/string to /paddle/fluid/string (#8363) --- paddle/CMakeLists.txt | 1 - paddle/fluid/CMakeLists.txt | 1 + paddle/fluid/framework/init.cc | 2 +- paddle/fluid/framework/scope.cc | 2 +- paddle/fluid/operators/listen_and_serv_op.cc | 2 +- paddle/fluid/operators/send_recv_op_test.cc | 2 +- paddle/fluid/platform/cpu_info_test.cc | 2 +- paddle/fluid/platform/enforce.h | 4 +- paddle/fluid/platform/enforce_test.cc | 2 +- paddle/fluid/pybind/pybind.cc | 2 +- paddle/{ => fluid}/string/.clang-format | 0 paddle/{ => fluid}/string/CMakeLists.txt | 0 paddle/{ => fluid}/string/piece.cc | 2 +- paddle/{ => fluid}/string/piece.h | 4 +- paddle/{ => fluid}/string/piece_test.cc | 2 +- paddle/{ => fluid}/string/printf.h | 2 +- paddle/{ => fluid}/string/printf_test.cc | 6 +- .../string/tinyformat/tinyformat.h | 106 +++++++----------- paddle/{ => fluid}/string/to_string.h | 0 paddle/{ => fluid}/string/to_string_test.cc | 4 +- 20 files changed, 61 insertions(+), 85 deletions(-) rename paddle/{ => fluid}/string/.clang-format (100%) rename paddle/{ => fluid}/string/CMakeLists.txt (100%) rename paddle/{ => fluid}/string/piece.cc (99%) rename paddle/{ => fluid}/string/piece.h (99%) rename paddle/{ => fluid}/string/piece_test.cc (99%) rename paddle/{ => fluid}/string/printf.h (97%) rename paddle/{ => fluid}/string/printf_test.cc (85%) rename paddle/{ => fluid}/string/tinyformat/tinyformat.h (92%) rename paddle/{ => fluid}/string/to_string.h (100%) rename paddle/{ => fluid}/string/to_string_test.cc (96%) diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index c7deba2ab4..a7b249d43b 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -11,7 +11,6 @@ if(MOBILE_INFERENCE) else() add_subdirectory(pserver) add_subdirectory(trainer) - add_subdirectory(string) add_subdirectory(scripts) if(WITH_C_API) diff --git a/paddle/fluid/CMakeLists.txt b/paddle/fluid/CMakeLists.txt index a6b4191518..7405ef17d3 100644 --- a/paddle/fluid/CMakeLists.txt +++ b/paddle/fluid/CMakeLists.txt @@ -4,3 +4,4 @@ add_subdirectory(framework) add_subdirectory(operators) add_subdirectory(pybind) add_subdirectory(inference) +add_subdirectory(string) diff --git a/paddle/fluid/framework/init.cc b/paddle/fluid/framework/init.cc index cb2d740d86..ad806a8cd7 100644 --- a/paddle/fluid/framework/init.cc +++ b/paddle/fluid/framework/init.cc @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/place.h" -#include "paddle/string/piece.h" +#include "paddle/fluid/string/piece.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/scope.cc b/paddle/fluid/framework/scope.cc index 6006ed16bd..91a8617d66 100644 --- a/paddle/fluid/framework/scope.cc +++ b/paddle/fluid/framework/scope.cc @@ -18,7 +18,7 @@ limitations under the License. */ #include // for call_once #include "glog/logging.h" #include "paddle/fluid/framework/threadpool.h" -#include "paddle/string/printf.h" +#include "paddle/fluid/string/printf.h" DEFINE_bool(benchmark, false, "Doing memory benchmark. It will make deleting scope synchronized, " diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index c456c692ee..3730ae161f 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -27,7 +27,7 @@ limitations under the License. */ #include "paddle/fluid/operators/detail/grpc_server.h" #include "paddle/fluid/operators/detail/sendrecvop_utils.h" #include "paddle/fluid/operators/detail/simple_block_queue.h" -#include "paddle/string/printf.h" +#include "paddle/fluid/string/printf.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/send_recv_op_test.cc b/paddle/fluid/operators/send_recv_op_test.cc index 716f687044..37a3d246d7 100644 --- a/paddle/fluid/operators/send_recv_op_test.cc +++ b/paddle/fluid/operators/send_recv_op_test.cc @@ -22,7 +22,7 @@ limitations under the License. */ #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" -#include "paddle/string/printf.h" +#include "paddle/fluid/string/printf.h" USE_NO_KERNEL_OP(send); USE_NO_KERNEL_OP(listen_and_serv); diff --git a/paddle/fluid/platform/cpu_info_test.cc b/paddle/fluid/platform/cpu_info_test.cc index d1fdba13b8..046758c594 100644 --- a/paddle/fluid/platform/cpu_info_test.cc +++ b/paddle/fluid/platform/cpu_info_test.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/platform/cpu_info.h" -#include "paddle/string/printf.h" +#include "paddle/fluid/string/printf.h" #include #include diff --git a/paddle/fluid/platform/enforce.h b/paddle/fluid/platform/enforce.h index b22893c0a5..86e1792801 100644 --- a/paddle/fluid/platform/enforce.h +++ b/paddle/fluid/platform/enforce.h @@ -23,8 +23,8 @@ limitations under the License. */ #include #include "paddle/fluid/platform/macros.h" -#include "paddle/string/printf.h" -#include "paddle/string/to_string.h" +#include "paddle/fluid/string/printf.h" +#include "paddle/fluid/string/to_string.h" #ifdef __GNUC__ #include // for __cxa_demangle diff --git a/paddle/fluid/platform/enforce_test.cc b/paddle/fluid/platform/enforce_test.cc index 896a9a04ec..baa34a5c7b 100644 --- a/paddle/fluid/platform/enforce_test.cc +++ b/paddle/fluid/platform/enforce_test.cc @@ -15,7 +15,7 @@ limitations under the License. */ #include "gtest/gtest.h" #include "paddle/fluid/platform/enforce.h" -#include "paddle/string/piece.h" +#include "paddle/fluid/string/piece.h" using StringPiece = paddle::string::Piece; using paddle::string::HasPrefix; diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 8924aabd17..85a6700a61 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -35,7 +35,7 @@ limitations under the License. */ #include "paddle/fluid/pybind/exception.h" #include "paddle/fluid/pybind/pybind.h" #include "paddle/fluid/pybind/tensor_py.h" -#include "paddle/string/to_string.h" +#include "paddle/fluid/string/to_string.h" #ifdef PADDLE_WITH_CUDA #include "paddle/fluid/operators/nccl/nccl_gpu_common.h" diff --git a/paddle/string/.clang-format b/paddle/fluid/string/.clang-format similarity index 100% rename from paddle/string/.clang-format rename to paddle/fluid/string/.clang-format diff --git a/paddle/string/CMakeLists.txt b/paddle/fluid/string/CMakeLists.txt similarity index 100% rename from paddle/string/CMakeLists.txt rename to paddle/fluid/string/CMakeLists.txt diff --git a/paddle/string/piece.cc b/paddle/fluid/string/piece.cc similarity index 99% rename from paddle/string/piece.cc rename to paddle/fluid/string/piece.cc index 330ca5f015..560413dff1 100644 --- a/paddle/string/piece.cc +++ b/paddle/fluid/string/piece.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/string/piece.h" +#include "piece.h" #include diff --git a/paddle/string/piece.h b/paddle/fluid/string/piece.h similarity index 99% rename from paddle/string/piece.h rename to paddle/fluid/string/piece.h index dcef9791a7..f2bb6b2c76 100644 --- a/paddle/string/piece.h +++ b/paddle/fluid/string/piece.h @@ -28,7 +28,7 @@ namespace string { // its syntax is simple as it doesn't own/manage the string, it is // cheap to construct Pieces and pass them around. class Piece { -public: + public: static const size_t npos = static_cast(-1); // We provide non-explicit singleton constructors so users can @@ -55,7 +55,7 @@ public: // Return a string that contains the copy of the referenced data. std::string ToString() const { return std::string(data_, size_); } -private: + private: const char* data_; size_t size_; diff --git a/paddle/string/piece_test.cc b/paddle/fluid/string/piece_test.cc similarity index 99% rename from paddle/string/piece_test.cc rename to paddle/fluid/string/piece_test.cc index 250f26d61f..fc17d315b9 100644 --- a/paddle/string/piece_test.cc +++ b/paddle/fluid/string/piece_test.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/string/piece.h" +#include "paddle/fluid/string/piece.h" #include diff --git a/paddle/string/printf.h b/paddle/fluid/string/printf.h similarity index 97% rename from paddle/string/printf.h rename to paddle/fluid/string/printf.h index 03809d2209..b55ae21b87 100644 --- a/paddle/string/printf.h +++ b/paddle/fluid/string/printf.h @@ -71,7 +71,7 @@ #include #include -#include "paddle/string/tinyformat/tinyformat.h" // https://github.com/c42f/tinyformat +#include "tinyformat/tinyformat.h" // https://github.com/c42f/tinyformat namespace paddle { namespace string { diff --git a/paddle/string/printf_test.cc b/paddle/fluid/string/printf_test.cc similarity index 85% rename from paddle/string/printf_test.cc rename to paddle/fluid/string/printf_test.cc index 9815f29bdd..6ca59bdefd 100644 --- a/paddle/string/printf_test.cc +++ b/paddle/fluid/string/printf_test.cc @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/string/printf.h" +#include "printf.h" #include @@ -24,6 +24,6 @@ TEST(StringPrintf, StringPrintf) { long hour = 14; int min = 44; EXPECT_EQ(std::string("Wednesday, July 27, 14:44"), - paddle::string::Sprintf( - "%s, %s %d, %.2d:%.2d", weekday, month, day, hour, min)); + paddle::string::Sprintf("%s, %s %d, %.2d:%.2d", weekday, month, day, + hour, min)); } diff --git a/paddle/string/tinyformat/tinyformat.h b/paddle/fluid/string/tinyformat/tinyformat.h similarity index 92% rename from paddle/string/tinyformat/tinyformat.h rename to paddle/fluid/string/tinyformat/tinyformat.h index 270198dc52..d1a2c47f1a 100644 --- a/paddle/string/tinyformat/tinyformat.h +++ b/paddle/fluid/string/tinyformat/tinyformat.h @@ -147,7 +147,7 @@ namespace detail { // Test whether type T1 is convertible to type T2 template struct is_convertible { -private: + private: // two types of different size struct fail { char dummy[2]; @@ -160,7 +160,7 @@ private: static succeed tryConvert(const T2 &); static const T1 &makeT1(); -public: + public: // Standard trick: the (...) version of tryConvert will be chosen from // the overload set only if the version taking a T2 doesn't match. // Then we compare the sizes of the return types to check which @@ -170,8 +170,7 @@ public: // Format the value by casting to type fmtT. This default implementation // should never be called. -template ::value> struct formatValueAsType { static void invoke(std::ostream & /*out*/, const T & /*value*/) { assert(0); } @@ -241,11 +240,8 @@ TINYFORMAT_DEFINE_FORMAT_TRUNCATED_CSTR(char) /// operator<< to format the type T, with special cases for the %c and %p /// conversions. template -inline void formatValue(std::ostream &out, - const char * /*fmtBegin*/, - const char *fmtEnd, - int ntrunc, - const T &value) { +inline void formatValue(std::ostream &out, const char * /*fmtBegin*/, + const char *fmtEnd, int ntrunc, const T &value) { // The mess here is to support the %c and %p conversions: if these // conversions are active we try to convert the type to a char or const // void* respectively and format that instead of the value itself. For the @@ -267,25 +263,22 @@ inline void formatValue(std::ostream &out, } // Overloaded version for char types to support printing as an integer -#define TINYFORMAT_DEFINE_FORMATVALUE_CHAR(charType) \ - inline void formatValue(std::ostream &out, \ - const char * /*fmtBegin*/, \ - const char *fmtEnd, \ - int /**/, \ - charType value) { \ - switch (*(fmtEnd - 1)) { \ - case 'u': \ - case 'd': \ - case 'i': \ - case 'o': \ - case 'X': \ - case 'x': \ - out << static_cast(value); \ - break; \ - default: \ - out << value; \ - break; \ - } \ +#define TINYFORMAT_DEFINE_FORMATVALUE_CHAR(charType) \ + inline void formatValue(std::ostream &out, const char * /*fmtBegin*/, \ + const char *fmtEnd, int /**/, charType value) { \ + switch (*(fmtEnd - 1)) { \ + case 'u': \ + case 'd': \ + case 'i': \ + case 'o': \ + case 'X': \ + case 'x': \ + out << static_cast(value); \ + break; \ + default: \ + out << value; \ + break; \ + } \ } // per 3.9.1: char, signed char and unsigned char are all distinct types TINYFORMAT_DEFINE_FORMATVALUE_CHAR(char) @@ -482,7 +475,7 @@ namespace detail { // each argument to be allocated as a homogenous array inside FormatList // whereas a naive implementation based on inheritance does not. class FormatArg { -public: + public: FormatArg() {} template @@ -491,22 +484,17 @@ public: m_formatImpl(&formatImpl), m_toIntImpl(&toIntImpl) {} - void format(std::ostream &out, - const char *fmtBegin, - const char *fmtEnd, + void format(std::ostream &out, const char *fmtBegin, const char *fmtEnd, int ntrunc) const { m_formatImpl(out, fmtBegin, fmtEnd, ntrunc, m_value); } int toInt() const { return m_toIntImpl(m_value); } -private: + private: template - static void formatImpl(std::ostream &out, - const char *fmtBegin, - const char *fmtEnd, - int ntrunc, - const void *value) { + static void formatImpl(std::ostream &out, const char *fmtBegin, + const char *fmtEnd, int ntrunc, const void *value) { formatValue(out, fmtBegin, fmtEnd, ntrunc, *static_cast(value)); } @@ -516,11 +504,8 @@ private: } const void *m_value; - void (*m_formatImpl)(std::ostream &out, - const char *fmtBegin, - const char *fmtEnd, - int ntrunc, - const void *value); + void (*m_formatImpl)(std::ostream &out, const char *fmtBegin, + const char *fmtEnd, int ntrunc, const void *value); int (*m_toIntImpl)(const void *value); }; @@ -569,12 +554,10 @@ inline const char *printFormatStringLiteral(std::ostream &out, // necessary to pull out variable width and precision . The function returns a // pointer to the character after the end of the current format spec. inline const char *streamStateFromFormat(std::ostream &out, - bool &spacePadPositive, - int &ntrunc, + bool &spacePadPositive, int &ntrunc, const char *fmtStart, const detail::FormatArg *formatters, - int &argIndex, - int numFormatters) { + int &argIndex, int numFormatters) { if (*fmtStart != '%') { TINYFORMAT_ERROR( "tinyformat: Not enough conversion specifiers in format string"); @@ -750,10 +733,8 @@ inline const char *streamStateFromFormat(std::ostream &out, } //------------------------------------------------------------------------------ -inline void formatImpl(std::ostream &out, - const char *fmt, - const detail::FormatArg *formatters, - int numFormatters) { +inline void formatImpl(std::ostream &out, const char *fmt, + const detail::FormatArg *formatters, int numFormatters) { // Saved stream state std::streamsize origWidth = out.width(); std::streamsize origPrecision = out.precision(); @@ -765,13 +746,9 @@ inline void formatImpl(std::ostream &out, fmt = printFormatStringLiteral(out, fmt); bool spacePadPositive = false; int ntrunc = -1; - const char *fmtEnd = streamStateFromFormat(out, - spacePadPositive, - ntrunc, - fmt, - formatters, - argIndex, - numFormatters); + const char *fmtEnd = + streamStateFromFormat(out, spacePadPositive, ntrunc, fmt, formatters, + argIndex, numFormatters); if (argIndex >= numFormatters) { // Check args remain after reading any variable width/precision TINYFORMAT_ERROR("tinyformat: Not enough format arguments"); @@ -820,15 +797,14 @@ inline void formatImpl(std::ostream &out, /// information has been stripped from the arguments, leaving just enough of a /// common interface to perform formatting as required. class FormatList { -public: + public: FormatList(detail::FormatArg *formatters, int N) : m_formatters(formatters), m_N(N) {} - friend void vformat(std::ostream &out, - const char *fmt, + friend void vformat(std::ostream &out, const char *fmt, const FormatList &list); -private: + private: const detail::FormatArg *m_formatters; int m_N; }; @@ -841,7 +817,7 @@ namespace detail { // Format list subclass with fixed storage to avoid dynamic allocation template class FormatListN : public FormatList { -public: + public: template FormatListN(const Args &... args) : FormatList(&m_formatterStore[0], N), @@ -849,14 +825,14 @@ public: static_assert(sizeof...(args) == N, "Number of args must be N"); } -private: + private: FormatArg m_formatterStore[N]; }; // Special 0-arg version - MSVC says zero-sized C array in struct is nonstandard template <> class FormatListN<0> : public FormatList { -public: + public: FormatListN() : FormatList(0, 0) {} }; diff --git a/paddle/string/to_string.h b/paddle/fluid/string/to_string.h similarity index 100% rename from paddle/string/to_string.h rename to paddle/fluid/string/to_string.h diff --git a/paddle/string/to_string_test.cc b/paddle/fluid/string/to_string_test.cc similarity index 96% rename from paddle/string/to_string_test.cc rename to paddle/fluid/string/to_string_test.cc index 05650ee8f1..1e890f572e 100644 --- a/paddle/string/to_string_test.cc +++ b/paddle/fluid/string/to_string_test.cc @@ -12,12 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/string/to_string.h" +#include "to_string.h" #include constexpr char kOutputString[] = "User Defined Output"; class UserDefinedClass { -public: + public: }; std::ostream& operator<<(std::ostream& s, const UserDefinedClass& ins) { -- GitLab From 0c45eab7fffd94169ddda0d61e0613524dbcd8e6 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Sun, 11 Feb 2018 05:43:55 +0000 Subject: [PATCH 051/217] no getmutable nccl_com --- paddle/fluid/framework/executor.cc | 7 +++---- python/paddle/v2/fluid/tests/test_parallel_op.py | 4 ++-- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index 5e1358ab0e..254df564e2 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -23,7 +23,6 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor_array.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/reader.h" -#include "paddle/fluid/operators/nccl/nccl_gpu_common.h" // platform::Communicator #include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/profiler.h" @@ -54,15 +53,15 @@ static void CreateTensor(Variable* var, proto::VarDesc::VarType var_type) { var->GetMutable(); } else if (var_type == proto::VarDesc::PLACE_LIST) { var->GetMutable(); - } else if (var_type == proto::VarDesc::NCCL_COM) { - var->GetMutable(); } else if (var_type == proto::VarDesc::READER) { var->GetMutable(); + } else if (var_type == proto::VarDesc::NCCL_COM) { + // GetMutable will be called in ncclInit } else { PADDLE_THROW( "Variable type %d is not in " "[LOD_TENSOR, SELECTED_ROWS, FEED_MINIBATCH, FETCH_LIST, " - "LOD_RANK_TABLE, PLACE_LIST, READER]", + "LOD_RANK_TABLE, PLACE_LIST, READER, NCCL_COM]", var_type); } } diff --git a/python/paddle/v2/fluid/tests/test_parallel_op.py b/python/paddle/v2/fluid/tests/test_parallel_op.py index 66bb6442af..7f6d0b8d32 100644 --- a/python/paddle/v2/fluid/tests/test_parallel_op.py +++ b/python/paddle/v2/fluid/tests/test_parallel_op.py @@ -212,5 +212,5 @@ class ParallelOpTestMultipleInput(BaseParallelForTest): fetch=['fc1.w@GRAD', 'fc2.w@GRAD', 'fc3.w@GRAD']) -#if __name__ == '__main__': -# unittest.main() +if __name__ == '__main__': + unittest.main() -- GitLab From f35401c4da32e575bcf902c293549465374e5d60 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Sun, 11 Feb 2018 05:47:06 +0000 Subject: [PATCH 052/217] diable debug string due to vector bug --- paddle/fluid/framework/executor.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index 254df564e2..3723a9131d 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -120,11 +120,12 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id, for (auto& op_desc : block.AllOps()) { auto op = paddle::framework::OpRegistry::CreateOp(*op_desc); - VLOG(3) << op->DebugStringEx(local_scope); + // VLOG(3) << op->DebugStringEx(local_scope); platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); platform::RecordEvent record_event(op->Type(), pool.Get(place_)); + VLOG(3) << op->Type(); op->Run(*local_scope, place_); if (FLAGS_benchmark) { VLOG(2) << "Memory used after operator " + op->Type() + " running: " -- GitLab From a43fac35676ba391da1aabaadd3edb19fab4e087 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sun, 11 Feb 2018 14:38:14 +0800 Subject: [PATCH 053/217] Fix empty Vector foreach Fix #8368 --- paddle/fluid/framework/mixed_vector.h | 33 +++++++++++++-------- paddle/fluid/framework/mixed_vector_test.cu | 6 ++++ 2 files changed, 26 insertions(+), 13 deletions(-) diff --git a/paddle/fluid/framework/mixed_vector.h b/paddle/fluid/framework/mixed_vector.h index 4dc3de54de..a35ec5d1de 100644 --- a/paddle/fluid/framework/mixed_vector.h +++ b/paddle/fluid/framework/mixed_vector.h @@ -106,9 +106,9 @@ class Vector { // std::vector iterator methods. Based on CPU data access method size_t size() const { return size_; } - T* begin() { return &this->operator[](0); } + T* begin() { return size() == 0 ? &EmptyDummy() : &this->operator[](0); } - T* end() { return &this->operator[](size()); } + T* end() { return size() == 0 ? &EmptyDummy() : &this->operator[](size()); } T& front() { return *begin(); } @@ -118,12 +118,12 @@ class Vector { return *it; } - const T* begin() const { return &this->operator[](0); } - const T* end() const { return &this->operator[](size()); } - - const T* cbegin() const { return begin(); } - - const T* cend() const { return end(); } + const T* begin() const { + return size() == 0 ? &EmptyDummy() : &this->operator[](0); + } + const T* end() const { + return size() == 0 ? &EmptyDummy() : &this->operator[](size()); + } const T& back() const { auto it = end(); @@ -240,16 +240,18 @@ class Vector { // implicit cast operator. Vector can be cast to std::vector implicitly. operator std::vector() const { std::vector result; - result.resize(size()); - std::copy(begin(), end(), result.begin()); + if (size() == 0) { + result.resize(size()); + std::copy(begin(), end(), result.begin()); + } return result; } bool operator==(const Vector& other) const { if (size() != other.size()) return false; - auto it1 = cbegin(); - auto it2 = other.cbegin(); - for (; it1 < cend(); ++it1, ++it2) { + auto it1 = begin(); + auto it2 = other.begin(); + for (; it1 < end(); ++it1, ++it2) { if (*it1 != *it2) { return false; } @@ -358,6 +360,11 @@ class Vector { } } + static T& EmptyDummy() { + static T dummy = T(); + return dummy; + } + mutable int flag_; mutable Tensor cpu_vec_; mutable Tensor cuda_vec_; diff --git a/paddle/fluid/framework/mixed_vector_test.cu b/paddle/fluid/framework/mixed_vector_test.cu index 0d5a914eac..8ea574b31c 100644 --- a/paddle/fluid/framework/mixed_vector_test.cu +++ b/paddle/fluid/framework/mixed_vector_test.cu @@ -98,3 +98,9 @@ TEST(mixed_vector, InitWithCount) { ASSERT_EQ(vec[i], 10); } } + +TEST(mixed_vector, ForEach) { + vec tmp; + for (auto& v : tmp) { + } +} -- GitLab From 21071f7106e0873c3360c80d5744f7b120bb6bd9 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Sun, 11 Feb 2018 15:00:34 +0800 Subject: [PATCH 054/217] no create trainer var on listen_and_serv --- paddle/fluid/operators/listen_and_serv_op.cc | 4 ++- .../paddle/v2/fluid/distribute_transpiler.py | 30 ++++++++----------- python/paddle/v2/fluid/framework.py | 23 -------------- 3 files changed, 16 insertions(+), 41 deletions(-) diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index 3e5a17f216..8b9bd43f07 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -85,7 +85,7 @@ class ListenAndServOp : public framework::OperatorBase { rpc_service_->SetScope(&recv_scope); rpc_service_->SetDevCtx(&dev_ctx); auto ins = Inputs("X"); - auto fan_in = ins.size(); + auto fan_in = Attr("Fanin"); auto *block = Attr(kOptimizeBlock); auto *program = block->Program(); @@ -163,6 +163,8 @@ from send_op and send back variables to recv_op. .AddCustomChecker([](const std::string &ip) { return !ip.empty(); }); AddAttr(kOptimizeBlock, "BlockID to run on server side."); + AddAttr("Fanin", "How many clients send to this server.") + .SetDefault(1); } }; diff --git a/python/paddle/v2/fluid/distribute_transpiler.py b/python/paddle/v2/fluid/distribute_transpiler.py index c6a4ab0ec7..67f09f9e66 100644 --- a/python/paddle/v2/fluid/distribute_transpiler.py +++ b/python/paddle/v2/fluid/distribute_transpiler.py @@ -14,7 +14,7 @@ from __future__ import print_function import framework -from framework import Program, default_main_program, Parameter, Variable +from framework import Program, default_main_program, default_startup_program, Parameter, Variable import optimizer from layer_helper import LayerHelper from distributed_spliter import * @@ -97,7 +97,7 @@ class DistributeTranspiler: parameter servers. :param optimize_ops: op list of optimization, should be the - return value of Optimizer.minimize + return value of Optimizer.minimize :type optimize_ops: list :param params_grads: list of tuple(weight, gradient) :type params_grads: list @@ -131,6 +131,7 @@ class DistributeTranspiler: # 4. append concat_op to trainer to update local weights. # 5. create new program for parameter server. # 6. create parameter server program by split_method generated endpoint->VarBlock + # 7. update startup_program, rename variables to variables with trainer_id pserver_endpoints = pservers.split(",") @@ -175,7 +176,6 @@ class DistributeTranspiler: shape=[0]) # create send_op - print("send inputs: ", send_inputs) send_op = program.global_block().append_op( type="send", inputs={"X": send_inputs}, @@ -194,6 +194,15 @@ class DistributeTranspiler: outputs={"Out": [orig_param]}, attrs={"axis": 0}) + # step 7 + startup_prog = default_startup_program() + for varname in startup_prog.global_block().vars.keys(): + if varname in param_var_mapping and \ + len(param_var_mapping[varname]) == 1: + new_var_name = "%s.trainer_%d" % \ + (varname, self.trainer_id) + startup_prog.global_block().rename_var(varname, new_var_name) + def _create_vars_from_blocklist(self, program, block_list): # Create respective variables using the block_list block_map = dict() @@ -210,7 +219,6 @@ class DistributeTranspiler: new_var_name = "%s.trainer_%d" % \ (orig_var.name, self.trainer_id) program.global_block().rename_var(varname, new_var_name) - print("renaming OK...", varname, new_var_name) var_mapping[varname] = \ [program.global_block().var(new_var_name)] continue @@ -377,10 +385,7 @@ class DistributeTranspiler: new_inputs = dict() # update param/grad shape first, then other inputs like # moment can use the updated shape - print("mark1") for key in opt_op.input_names: - # print("opt type: ", opt_op.type) - # print("opt op input: ", key) if key == "Grad": grad_block = None for g in self.param_grad_ep_mapping[endpoint]["grads"]: @@ -427,7 +432,6 @@ class DistributeTranspiler: new_inputs[key] = tmpvar - print("mark2") for key in opt_op.input_names: if key in ["Param", "Grad"]: continue @@ -451,7 +455,6 @@ class DistributeTranspiler: inputs=new_inputs, outputs=outputs, attrs=opt_op.attrs) - print("mark3") def _append_pserver_non_opt_ops(self, optimize_block, opt_op): program = optimize_block.program @@ -505,8 +508,6 @@ class DistributeTranspiler: suff_idx = v.name.find(".trainer_") if suff_idx >= 0: orig_var_name = v.name[:suff_idx] - print("create variable for program: %s.trainer_%d" % - (orig_var_name, trainer_id)) var = pserver_program.global_block().create_var( name="%s.trainer_%d" % (orig_var_name, trainer_id), persistable=True, @@ -517,11 +518,6 @@ class DistributeTranspiler: optimize_block = pserver_program.create_block(0) # Iterate through the ops and append ops as needed for idx, opt_op in enumerate(self.optimize_ops): - print("mark0") - print(opt_op.inputs.keys()) - for v in opt_op.inputs.values(): - print(v.name) - print(v.shape) is_op_on_pserver = self._is_op_on_pserver(endpoint, self.optimize_ops, idx) if not is_op_on_pserver: @@ -547,7 +543,7 @@ class DistributeTranspiler: # p.name # for p in self.param_grad_ep_mapping[endpoint]["grads"] # ], - # "Fanin": self.trainers + "Fanin": self.trainers }) pserver_program.sync_with_cpp() return pserver_program diff --git a/python/paddle/v2/fluid/framework.py b/python/paddle/v2/fluid/framework.py index 417fcb4fd3..02e2f8a6a1 100644 --- a/python/paddle/v2/fluid/framework.py +++ b/python/paddle/v2/fluid/framework.py @@ -761,17 +761,6 @@ class Block(object): else: raise ValueError("unsupported var type: %s", type(v)) - def _clear_op_io_for_var(name): - for op in self.ops: - for k in op.inputs.keys(): - - if op.inputs[k].name == name: - op.inputs[k] = None - for k in op.outputs.keys(): - if op.outputs[k].name == name: - op.outputs[k] = None - - _clear_op_io_for_var(name) self.desc.rename_var(name, new_name) d = self.desc.find_var(new_name) var = None @@ -797,17 +786,6 @@ class Block(object): # rename the python side, sync_with_cpp will only add # new vars/ops to python side. self.vars[new_name] = var - for op in self.ops: - print("### rename op i/o ", name, op.inputs) - if op.inputs: - for k in op.inputs.keys(): - if op.inputs[k] == None: - print("rename input: ", name, var) - op.inputs[k] = var - if op.outputs: - for k in op.outputs.keys(): - if op.outputs[k] == None: - op.outputs[k] = var del self.vars[name] self.sync_with_cpp() @@ -901,7 +879,6 @@ class Block(object): for p in other.iter_parameters(): assert isinstance(p, Parameter) v = self.vars.get(p.name, None) - print("var shape to copy", v, p) if v is None: raise ValueError("copy_param_info_from should be invoked with " "same topology") -- GitLab From a8b630c89c290a40818346672e91d97675042f36 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Sun, 11 Feb 2018 15:04:46 +0800 Subject: [PATCH 055/217] remove comments --- python/paddle/v2/fluid/distribute_transpiler.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/python/paddle/v2/fluid/distribute_transpiler.py b/python/paddle/v2/fluid/distribute_transpiler.py index 67f09f9e66..48ad4500fd 100644 --- a/python/paddle/v2/fluid/distribute_transpiler.py +++ b/python/paddle/v2/fluid/distribute_transpiler.py @@ -361,8 +361,6 @@ class DistributeTranspiler: j = idx - 1 while j >= 0: prev_op = all_ops[j] - # prev_output_names = [o.name for o in prev_op.outputs.values()] - # prev_input_names = [o.name for o in prev_op.inputs.values()] # NOTE(typhoonzero): consider list input/output prev_output_names = prev_op.desc.output_arg_names() prev_input_names = prev_op.desc.input_arg_names() @@ -535,14 +533,6 @@ class DistributeTranspiler: attrs={ "OptimizeBlock": optimize_block, "endpoint": endpoint, - # "ParamList": [ - # p.name - # for p in self.param_grad_ep_mapping[endpoint]["params"] - # ], - # "GradList": [ - # p.name - # for p in self.param_grad_ep_mapping[endpoint]["grads"] - # ], "Fanin": self.trainers }) pserver_program.sync_with_cpp() -- GitLab From caf9a09d7bee946969999130477fb5de2983007b Mon Sep 17 00:00:00 2001 From: Yancey Date: Sun, 11 Feb 2018 15:44:27 +0800 Subject: [PATCH 056/217] Merge selected rows with dynamic variable count (#8023) * dynamic send/recv selected rows * update by comment * fix by comment --- paddle/fluid/operators/listen_and_serv_op.cc | 16 +++++++++++++ paddle/fluid/operators/send_op.cc | 24 +++++++++++++++++-- .../fluid/operators/split_selected_rows_op.cc | 23 +----------------- .../fluid/operators/split_selected_rows_op.h | 1 + paddle/fluid/operators/sum_op.h | 4 +++- .../paddle/v2/fluid/distribute_transpiler.py | 4 ++++ 6 files changed, 47 insertions(+), 25 deletions(-) diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index 3730ae161f..426dd0dc0e 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -101,6 +101,9 @@ class ListenAndServOp : public framework::OperatorBase { // TODO(typhoonzero): change this to a while_op for every cluster-batch. bool exit_flag = false; + // Record received sparse variables, so that + // we could reset those after execute optimize program + std::vector sparse_vars; while (!exit_flag) { // Get from multiple trainers, we don't care about the order in which // the gradients arrives, just add suffix 0~n and merge the gradient. @@ -143,6 +146,9 @@ class ListenAndServOp : public framework::OperatorBase { PADDLE_THROW("Can not find server side var"); } detail::DeserializeFromMessage(v.second, dev_ctx, var); + if (var->IsType()) { + sparse_vars.push_back(var); + } } } VLOG(3) << "recv " << recv_var_cnt << " parmeters for one barrier."; @@ -156,9 +162,19 @@ class ListenAndServOp : public framework::OperatorBase { } catch (std::exception &e) { LOG(ERROR) << "run sub program error " << e.what(); } + + // Reset the received sparse variables, the sum operator would not + // sum the input sparse variables which rows is empty at the next + // mini-batch. + // TOOD(Yancey1989): move the reset action into an operator, we couldn't + // have any hide logic in the operator. + for (auto &var : sparse_vars) { + var->GetMutable()->mutable_rows()->clear(); + } rpc_service_->SetCond(1); rpc_service_->WaitClientGet(update_param_cnt); grads_counter_.clear(); + sparse_vars.clear(); } // while(true) } diff --git a/paddle/fluid/operators/send_op.cc b/paddle/fluid/operators/send_op.cc index a8390aa659..b241f738cb 100644 --- a/paddle/fluid/operators/send_op.cc +++ b/paddle/fluid/operators/send_op.cc @@ -24,6 +24,22 @@ limitations under the License. */ namespace paddle { namespace operators { +static bool IsVariableInitialized(const framework::Scope& scope, + const std::string& varname) { + auto* var = scope.FindVar(varname); + PADDLE_ENFORCE_NOT_NULL(var, "Can not find variable '%s' in the send side.", + varname); + if (var->IsType()) { + return var->Get().IsInitialized(); + } else if (var->IsType()) { + return var->Get().value().IsInitialized(); + } else { + PADDLE_THROW( + "Variable type in send side should be in " + "[LodTensor, SelectedRows]"); + } + return false; +} class SendOp : public framework::OperatorBase { public: @@ -51,8 +67,12 @@ class SendOp : public framework::OperatorBase { detail::RPCClient* rpc_client = client_var->GetMutable(); for (size_t i = 0; i < ins.size(); i++) { - VLOG(3) << "sending " << ins[i] << " to " << epmap[i]; - rpc_client->AsyncSendVariable(epmap[i], ctx, scope, ins[i]); + if (IsVariableInitialized(scope, ins[i])) { + VLOG(3) << "sending " << ins[i] << " to " << epmap[i]; + rpc_client->AsyncSendVariable(epmap[i], ctx, scope, ins[i]); + } else { + VLOG(3) << "don't send no-initialied variable: " << ins[i]; + } } PADDLE_ENFORCE(rpc_client->Wait()); diff --git a/paddle/fluid/operators/split_selected_rows_op.cc b/paddle/fluid/operators/split_selected_rows_op.cc index 113ce2ce10..c30280f654 100644 --- a/paddle/fluid/operators/split_selected_rows_op.cc +++ b/paddle/fluid/operators/split_selected_rows_op.cc @@ -22,7 +22,7 @@ class SplitSelectedRowsOpMaker : public framework::OpProtoAndCheckerMaker { SplitSelectedRowsOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input SelectedRows."); - AddOutput("Out", "The outputs of input SelectedRows.").AsDuplicable(); + AddOutput("Out", "The outputs of the input SelectedRows.").AsDuplicable(); AddAttr>("height_sections", "Height for each output SelectedRows.") .SetDefault(std::vector({})); @@ -56,27 +56,6 @@ class SplitSelectedRowsOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasInput("X"), "SplitSelectedRowsOp must has input X."); PADDLE_ENFORCE(ctx->HasOutputs("Out"), "SplitSelectedRowsOp must has output Out."); - - std::vector height_sections = - ctx->Attrs().Get>("height_sections"); - int64_t n = ctx->Outputs("Out").size(); - - std::vector outs_dims; - outs_dims.reserve(n); - - // make output dims - for (int64_t i = 0; i < n; ++i) { - auto dims = ctx->GetInputDim("X"); - if (height_sections.size()) { - PADDLE_ENFORCE_EQ( - height_sections.size(), static_cast(n), - "The size of height section should be the same with height" - " section size."); - dims[0] = height_sections[i]; - } - outs_dims.push_back(dims); - } - ctx->SetOutputsDim("Out", outs_dims); } }; diff --git a/paddle/fluid/operators/split_selected_rows_op.h b/paddle/fluid/operators/split_selected_rows_op.h index 527264bd67..af44b09b70 100644 --- a/paddle/fluid/operators/split_selected_rows_op.h +++ b/paddle/fluid/operators/split_selected_rows_op.h @@ -55,6 +55,7 @@ class SplitSelectedRowsOpKernel : public framework::OpKernel { for (size_t i = 0; i < outs_rows_idx.size(); ++i) { auto rows_idx = outs_rows_idx[i]; + outs[i]->set_height(height_sections[i]); if (rows_idx.size() > 0) { auto dims = x->GetCompleteDims(); dims[0] = rows_idx.size(); diff --git a/paddle/fluid/operators/sum_op.h b/paddle/fluid/operators/sum_op.h index 5e1222c6ef..08218b6836 100644 --- a/paddle/fluid/operators/sum_op.h +++ b/paddle/fluid/operators/sum_op.h @@ -116,7 +116,9 @@ class SumKernel : public framework::OpKernel { int64_t offset = 0; for (int i = 0; i < N; i++) { auto &sel_row = get_selected_row(i); - + if (!sel_row.value().IsInitialized() || sel_row.rows().size() == 0) { + continue; + } PADDLE_ENFORCE_EQ(out->height(), sel_row.height()); functor(context.template device_context(), sel_row, offset, out); diff --git a/python/paddle/v2/fluid/distribute_transpiler.py b/python/paddle/v2/fluid/distribute_transpiler.py index edef2b1b17..e4675e24b1 100644 --- a/python/paddle/v2/fluid/distribute_transpiler.py +++ b/python/paddle/v2/fluid/distribute_transpiler.py @@ -191,6 +191,7 @@ class DistributeTranspiler: for b in param_blocks: varname, block_id, _ = b.split(":") send_outputs.append(param_var_mapping[varname][int(block_id)]) + # let send_op know which endpoint to send which var to, eplist has the same # order as send_inputs. eplist = split_method(send_inputs, pserver_endpoints) @@ -274,6 +275,7 @@ class DistributeTranspiler: name="%s.block%d" % (varname, i), psersistable=False, dtype=orig_var.dtype, + type=orig_var.type, shape=splited_shape) # flattend splited var var_mapping[varname].append(var) return var_mapping @@ -335,6 +337,7 @@ class DistributeTranspiler: name="%s.trainer_%d" % (var.name, i), psersistable=var.persistable, dtype=var.dtype, + type=var.type, shape=var.shape) var_list.append(var_each) return var_list @@ -561,6 +564,7 @@ class DistributeTranspiler: persistable=True, dtype=v.dtype, shape=v.shape) + # step6 optimize_block = pserver_program.create_block(0) # step 6.1 -- GitLab From 18efe5aa1d8a6395dea68cfaa299fe636a22509e Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sun, 11 Feb 2018 15:48:00 +0800 Subject: [PATCH 057/217] Fix CI --- paddle/fluid/framework/mixed_vector.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/framework/mixed_vector.h b/paddle/fluid/framework/mixed_vector.h index a35ec5d1de..b834d4633b 100644 --- a/paddle/fluid/framework/mixed_vector.h +++ b/paddle/fluid/framework/mixed_vector.h @@ -240,7 +240,7 @@ class Vector { // implicit cast operator. Vector can be cast to std::vector implicitly. operator std::vector() const { std::vector result; - if (size() == 0) { + if (size() != 0) { result.resize(size()); std::copy(begin(), end(), result.begin()); } -- GitLab From 2cfb2928dbe1b3c6848e9c4a8d187c3e1e4245ca Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Sun, 11 Feb 2018 16:44:52 +0800 Subject: [PATCH 058/217] Fix develop dist transpiler bug --- .../paddle/v2/fluid/distribute_transpiler.py | 78 ++++++++----------- 1 file changed, 34 insertions(+), 44 deletions(-) diff --git a/python/paddle/v2/fluid/distribute_transpiler.py b/python/paddle/v2/fluid/distribute_transpiler.py index e4675e24b1..62d1f3434c 100644 --- a/python/paddle/v2/fluid/distribute_transpiler.py +++ b/python/paddle/v2/fluid/distribute_transpiler.py @@ -191,7 +191,6 @@ class DistributeTranspiler: for b in param_blocks: varname, block_id, _ = b.split(":") send_outputs.append(param_var_mapping[varname][int(block_id)]) - # let send_op know which endpoint to send which var to, eplist has the same # order as send_inputs. eplist = split_method(send_inputs, pserver_endpoints) @@ -230,21 +229,6 @@ class DistributeTranspiler: outputs={"Out": [orig_param]}, attrs={"axis": 0}) - self.lr_param_mapping = self._create_lr_param_mapping() - - def _create_lr_param_mapping(self): - lr_mapping = dict() - for _, opt_op in enumerate(self.optimize_ops): - if not opt_op.inputs or not opt_op.inputs.has_key("LearningRate") \ - or not opt_op.inputs.has_key("Param"): - continue - lr = opt_op.inputs["LearningRate"].name - param = opt_op.inputs["Param"].name - if not lr_mapping.has_key(lr): - lr_mapping.update({lr: list()}) - lr_mapping[lr].append(param) - return lr_mapping - def _create_vars_from_blocklist(self, program, block_list): # Create respective variables using the block_list block_map = dict() @@ -369,18 +353,19 @@ class DistributeTranspiler: pass return orig_shape - def _fetch_var_names(self, param_dict): - res = [] - if not param_dict: - return res - for _, values in param_dict.iteritems(): - if not isinstance(values, list): - values = [values] - res += [v.name for v in values] - return res + # def _fetch_var_names(self, param_dict): + # res = [] + # if not param_dict: + # return res + # for _, values in param_dict.iteritems(): + # if not isinstance(values, list): + # values = [values] + # res += [v.name for v in values] + # return res def _append_pserver_ops(self, optimize_block, opt_op, endpoint): program = optimize_block.program + pserver_block = program.global_block() new_inputs = dict() # update param/grad shape first, then other inputs like # moment can use the updated shape @@ -395,11 +380,11 @@ class DistributeTranspiler: # do not append this op if current endpoint # is not dealing with this grad block return - merged_var = program.global_block().vars[grad_block.name] + merged_var = pserver_block.vars[grad_block.name] # append merging ops if trainers > 1 if self.trainers > 1: vars2merge = self._create_var_for_trainers( - program.global_block(), grad_block, self.trainers) + pserver_block, grad_block, self.trainers) optimize_block.append_op( type="sum", inputs={"X": vars2merge}, @@ -419,29 +404,27 @@ class DistributeTranspiler: break if not param_block: return - tmpvar = program.global_block().create_var( + tmpvar = pserver_block.create_var( name=param_block.name, persistable=True, dtype=param_block.dtype, shape=param_block.shape) - new_inputs[key] = tmpvar elif key == "LearningRate": # leraning rate variable has already be created by non-optimize op, # don't create it once again. - new_inputs[key] = program.global_block().vars[opt_op.input(key)[ - 0]] + new_inputs[key] = pserver_block.vars[opt_op.input(key)[0]] for key in opt_op.input_names: new_shape = None if key in ["Param", "Grad", "LearningRate"]: continue - var = program.global_block().vars[opt_op.input(key)[0]] + var = self.program.global_block().vars[opt_op.input(key)[0]] # update accumulator variable shape param_shape = new_inputs["Param"].shape new_shape = self._get_optimizer_input_shape(opt_op.type, key, var.shape, param_shape) - tmpvar = program.global_block().create_var( + tmpvar = pserver_block.create_var( name=var.name, persistable=var.persistable, dtype=var.dtype, @@ -449,11 +432,14 @@ class DistributeTranspiler: new_inputs[key] = tmpvar # change output's ParamOut variable + outputs = self._get_output_map_from_op(self.program.global_block().vars, + opt_op) opt_op.outputs["ParamOut"] = new_inputs["Param"] + optimize_block.append_op( type=opt_op.type, inputs=new_inputs, - outputs=opt_op.outputs, + outputs=outputs, attrs=opt_op.attrs) def _append_pserver_non_opt_ops(self, optimize_block, opt_op): @@ -497,11 +483,16 @@ class DistributeTranspiler: # If one op's input is another op's output or # one op's output is another op's input, we say # the two operator is connected. - op1_input_names = self._fetch_var_names(op1.inputs) - op1_output_names = self._fetch_var_names(op1.outputs) + # op1_input_names = self._fetch_var_names(op1.inputs) + # op1_output_names = self._fetch_var_names(op1.outputs) + op1_input_names = op1.desc.input_arg_names() + op1_output_names = op1.desc.output_arg_names() + + # op2_input_names = self._fetch_var_names(op2.inputs) + # op2_output_names = self._fetch_var_names(op2.outputs) + op2_input_names = op2.desc.input_arg_names() + op2_output_names = op2.desc.output_arg_names() - op2_input_names = self._fetch_var_names(op2.inputs) - op2_output_names = self._fetch_var_names(op2.outputs) if set(op1_output_names) & set(op2_input_names) or \ set(op1_input_names) & set(op2_output_names): return True @@ -521,8 +512,8 @@ class DistributeTranspiler: def _is_opt_op(self, op): # NOTE: It's a HACK implement. # optimize op: SGDOptimize, MomentumOptimizer, AdamOptimizer and etc... - if op.inputs and op.inputs.has_key("Param") \ - and op.inputs.has_key("LearningRate"): + if "Param" in op.input_names and \ + "LearningRate" in op.input_names: return True return False @@ -530,12 +521,12 @@ class DistributeTranspiler: param_names = [ p.name for p in self.param_grad_ep_mapping[endpoint]["params"] ] - if op.inputs["Param"].name in param_names: + if op.input("Param") in param_names: return True else: for n in param_names: - param = op.inputs["Param"].name - if same_or_split_var(n, param) and n != op.inputs["Param"].name: + param = op.input("Param")[0] + if same_or_split_var(n, param) and n != param: return True return False return False @@ -564,7 +555,6 @@ class DistributeTranspiler: persistable=True, dtype=v.dtype, shape=v.shape) - # step6 optimize_block = pserver_program.create_block(0) # step 6.1 -- GitLab From 92ac30efd9bab1e7bcf9c0d98e3b44dd4edbc5a3 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Sun, 11 Feb 2018 16:47:10 +0800 Subject: [PATCH 059/217] remove comments --- python/paddle/v2/fluid/distribute_transpiler.py | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/python/paddle/v2/fluid/distribute_transpiler.py b/python/paddle/v2/fluid/distribute_transpiler.py index 62d1f3434c..ff84e609e2 100644 --- a/python/paddle/v2/fluid/distribute_transpiler.py +++ b/python/paddle/v2/fluid/distribute_transpiler.py @@ -353,16 +353,6 @@ class DistributeTranspiler: pass return orig_shape - # def _fetch_var_names(self, param_dict): - # res = [] - # if not param_dict: - # return res - # for _, values in param_dict.iteritems(): - # if not isinstance(values, list): - # values = [values] - # res += [v.name for v in values] - # return res - def _append_pserver_ops(self, optimize_block, opt_op, endpoint): program = optimize_block.program pserver_block = program.global_block() @@ -483,13 +473,9 @@ class DistributeTranspiler: # If one op's input is another op's output or # one op's output is another op's input, we say # the two operator is connected. - # op1_input_names = self._fetch_var_names(op1.inputs) - # op1_output_names = self._fetch_var_names(op1.outputs) op1_input_names = op1.desc.input_arg_names() op1_output_names = op1.desc.output_arg_names() - # op2_input_names = self._fetch_var_names(op2.inputs) - # op2_output_names = self._fetch_var_names(op2.outputs) op2_input_names = op2.desc.input_arg_names() op2_output_names = op2.desc.output_arg_names() -- GitLab From 628bb27a5144a3765884c6c13fc1dd1655c80a93 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Sun, 11 Feb 2018 15:29:52 +0800 Subject: [PATCH 060/217] refine prior_boxes --- python/paddle/v2/fluid/layers/__init__.py | 5 +- python/paddle/v2/fluid/layers/detection.py | 260 +++++++++++++++++++++ python/paddle/v2/fluid/layers/nn.py | 256 ++------------------ 3 files changed, 287 insertions(+), 234 deletions(-) create mode 100644 python/paddle/v2/fluid/layers/detection.py diff --git a/python/paddle/v2/fluid/layers/__init__.py b/python/paddle/v2/fluid/layers/__init__.py index a83dd3db74..f4fb2ca279 100644 --- a/python/paddle/v2/fluid/layers/__init__.py +++ b/python/paddle/v2/fluid/layers/__init__.py @@ -26,12 +26,15 @@ import device from device import * import math_op_patch from math_op_patch import * +import detection +from detection import * __all__ = [] +__all__ += math_op_patch.__all__ __all__ += nn.__all__ __all__ += io.__all__ __all__ += tensor.__all__ __all__ += control_flow.__all__ __all__ += ops.__all__ __all__ += device.__all__ -__all__ += math_op_patch.__all__ +__all__ += detection.__all__ diff --git a/python/paddle/v2/fluid/layers/detection.py b/python/paddle/v2/fluid/layers/detection.py new file mode 100644 index 0000000000..b0c25c11de --- /dev/null +++ b/python/paddle/v2/fluid/layers/detection.py @@ -0,0 +1,260 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +All layers just related to the detection neural network. +""" + +from ..layer_helper import LayerHelper +from ..framework import Variable +from ..param_attr import ParamAttr +from ..framework import Variable +from layer_function_generator import autodoc +from tensor import concat +from nn import flatten +import math + +__all__ = [ + 'prior_box', + 'prior_boxes', +] + + +def prior_box(input, + image, + min_sizes, + max_sizes, + aspect_ratios, + variance, + flip=False, + clip=False, + step_w=0.0, + step_h=0.0, + offset=0.5, + name=None): + """ + **Prior_box** + + Generate prior boxes for SSD(Single Shot MultiBox Detector) algorithm. + Each position of the input produce N prior boxes, N is determined by + the count of min_sizes, max_sizes and aspect_ratios, The size of the + box is in range(min_size, max_size) interval, which is generated in + sequence according to the aspect_ratios. + + Args: + input(variable): The input feature data of PriorBox, + the layout is NCHW. + image(variable): The input image data of PriorBox, the + layout is NCHW. + min_sizes(list): the min sizes of generated prior boxes. + max_sizes(list): the max sizes of generated prior boxes. + aspect_ratios(list): the aspect ratios of generated prior boxes. + variance(list): the variances to be encoded in prior boxes. + flip(bool, optional, default=False): Whether to flip aspect ratios. + clip(bool, optional, default=False)): Whether to clip + out-of-boundary boxes. + step_w(int, optional, default=0.0): Prior boxes step across + width, 0.0 for auto calculation. + step_h(int, optional, default=0.0): Prior boxes step across + height, 0.0 for auto calculation. + offset(float, optional, default=0.5): Prior boxes center offset. + name(str, optional, default=None): Name of the prior box layer. + + Returns: + boxes(variable): the output prior boxes of PriorBoxOp. The layout is + [H, W, num_priors, 4]. H is the height of input, W is the width + of input, num_priors is the box count of each position. Where num_priors = + len(aspect_ratios) * len(min_sizes) + len(max_sizes) + Variances(variable): the expanded variances of PriorBoxOp. The layout + is [H, W, num_priors, 4]. H is the height of input, W is the width + of input, num_priors is the box count of each position. Where num_priors = + len(aspect_ratios) * len(min_sizes) + len(max_sizes) + Examples: + .. code-block:: python + + data = fluid.layers.data(name="data", shape=[3, 32, 32], dtype="float32") + conv2d = fluid.layers.conv2d( + input=data, num_filters=2, filter_size=3) + box, var = fluid.layers.prior_box(conv2d, data, + min_size, max_size, aspect_ratio, + variance, flip, clip, + step_w, step_h, offset) + """ + helper = LayerHelper("prior_box", **locals()) + dtype = helper.input_dtype() + + box = helper.create_tmp_variable(dtype) + var = helper.create_tmp_variable(dtype) + helper.append_op( + type="prior_box", + inputs={"Input": input, + "Image": image}, + outputs={"Boxes": box, + "Variances": var}, + attrs={ + 'min_sizes': min_sizes, + 'max_sizes': max_sizes, + 'aspect_ratios': aspect_ratios, + 'variances': variance, + 'flip': flip, + 'clip': clip, + 'step_w': step_w, + 'step_h': step_h, + 'offset': offset + }) + return box, var + + +def prior_boxes(inputs, + image, + min_ratio, + max_ratio, + aspect_ratios, + base_size, + steps=None, + step_w=None, + step_h=None, + offset=0.5, + variance=[0.1, 0.1, 0.1, 0.1], + flip=False, + clip=False, + name=None): + """ + **Prior_boxes** + + Generate prior boxes for SSD(Single Shot MultiBox Detector) algorithm. + Each position of the inputs produces many prior boxes respectly, the number + of prior boxes which is produced by inputs respectly is determined by + the count of min_ratio, max_ratio and aspect_ratios, The size of the + box is in range(min_ratio, max_ratio) interval, which is generated in + sequence according to the aspect_ratios. + + Args: + inputs(list): The list of input variables, the format of all variables is NCHW. + image(variable): The input image data of PriorBoxOp, the layout is NCHW. + min_ratio(int): the min ratio of generated prior boxes. + max_ratio(int): the max ratio of generated prior boxes. + aspect_ratios(list): the aspect ratios of generated prior boxes. + The length of input and aspect_ratios must be equal. + base_size(int): the base_size is used to get min_size and max_size + according to min_ratio and max_ratio. + step_w(list, optional, default=None): Prior boxes step across width. + If step_w[i] == 0.0, the prior boxes step across width of the inputs[i] + will be automatically calculated. + step_h(list, optional, default=None): Prior boxes step across height, + If step_h[i] == 0.0, the prior boxes step across height of the inputs[i] + will be automatically calculated. + offset(float, optional, default=0.5): Prior boxes center offset. + variance(list, optional, default=[0.1, 0.1, 0.1, 0.1]): the variances + to be encoded in prior boxes. + flip(bool, optional, default=False): Whether to flip aspect ratios. + clip(bool, optional, default=False): Whether to clip out-of-boundary boxes. + name(str, optional, None): Name of the prior box layer. + + Returns: + boxes(variable): the output prior boxes of PriorBoxOp. The layout is + [num_priors, 4]. num_priors is the total box count of each + position of inputs. + Variances(variable): the expanded variances of PriorBoxOp. The layout + is [num_priors, 4]. num_priors is the total box count of each + position of inputs + + Examples: + .. code-block:: python + + prior_boxes( + inputs = [conv1, conv2, conv3, conv4, conv5, conv6], + image = data, + min_ratio = 20, # 0.20 + max_ratio = 90, # 0.90 + steps = [8., 16., 32., 64., 100., 300.], + aspect_ratios = [[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]], + base_size = 300, + offset = 0.5, + variance = [0.1,0.1,0.1,0.1], + flip=True, + clip=True) + """ + assert isinstance(inputs, list), 'inputs should be a list.' + num_layer = len(inputs) + assert num_layer > 2 # TODO(zcd): currently, num_layer must be bigger than two. + + min_sizes = [] + max_sizes = [] + if num_layer > 2: + step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2))) + for ratio in xrange(min_ratio, max_ratio + 1, step): + min_sizes.append(base_size * ratio / 100.) + max_sizes.append(base_size * (ratio + step) / 100.) + min_sizes = [base_size * .10] + min_sizes + max_sizes = [base_size * .20] + max_sizes + + if step_h: + assert isinstance(step_h,list) and len(step_h) == num_layer, \ + 'step_h should be list and inputs and step_h should have same length' + if step_w: + assert isinstance(step_w,list) and len(step_w) == num_layer, \ + 'step_w should be list and inputs and step_w should have same length' + if steps: + assert isinstance(steps,list) and len(steps) == num_layer, \ + 'steps should be list and inputs and step_w should have same length' + step_w = steps + step_h = steps + if aspect_ratios: + assert isinstance(aspect_ratios, list) and len(aspect_ratios) == num_layer, \ + 'aspect_ratios should be list and inputs and aspect_ratios should ' \ + 'have same length' + + box_results = [] + var_results = [] + for i, input in enumerate(inputs): + min_size = min_sizes[i] + max_size = max_sizes[i] + aspect_ratio = [] + if not isinstance(min_size, list): + min_size = [min_size] + if not isinstance(max_size, list): + max_size = [max_size] + if aspect_ratios: + aspect_ratio = aspect_ratios[i] + if not isinstance(aspect_ratio, list): + aspect_ratio = [aspect_ratio] + + box, var = prior_box(input, image, min_size, max_size, aspect_ratio, + variance, flip, clip, step_w[i] + if step_w else 0.0, step_h[i] + if step_w else 0.0, offset) + + box_results.append(box) + var_results.append(var) + + if len(box_results) == 1: + box = box_results[0] + var = var_results[0] + else: + axis = 3 + reshaped_boxes = [] + reshaped_vars = [] + for i in range(len(box_results)): + reshaped_boxes += [flatten(box_results[i], axis=3)] + reshaped_vars += [flatten(var_results[i], axis=3)] + + helper = LayerHelper("concat", **locals()) + dtype = helper.input_dtype() + box = helper.create_tmp_variable(dtype) + var = helper.create_tmp_variable(dtype) + + box = concat(reshaped_boxes) + var = concat(reshaped_vars) + + return box, var diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index f0bcddaf9a..4d2de38c35 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -67,9 +67,8 @@ __all__ = [ 'beam_search', 'row_conv', 'reshape_with_axis', + 'flatten', 'multiplex', - 'prior_box', - 'prior_boxes', 'layer_norm', ] @@ -3149,242 +3148,33 @@ def reshape_with_axis(input, axis): return out -def prior_box(input, - image, - min_sizes, - max_sizes, - aspect_ratios, - variance, - flip=False, - clip=False, - step_w=0.0, - step_h=0.0, - offset=0.5, - name=None): +def flatten(input, axis=1): """ - **Prior_box** - - Generate prior boxes for SSD(Single Shot MultiBox Detector) algorithm. - Each position of the input produce N prior boxes, N is determined by - the count of min_sizes, max_sizes and aspect_ratios, The size of the - box is in range(min_size, max_size) interval, which is generated in - sequence according to the aspect_ratios. - + **Flatten Layer** + ReshapeWithAxis is used to merge adjacent dimensions according to axis. Args: - input(variable): The input feature data of PriorBox, - the layout is NCHW. - image(variable): The input image data of PriorBox, the - layout is NCHW. - min_sizes(list): the min sizes of generated prior boxes. - max_sizes(list): the max sizes of generated prior boxes. - aspect_ratios(list): the aspect ratios of generated prior boxes. - variance(list): the variances to be encoded in prior boxes. - flip(bool, optional, default=False): Whether to flip aspect ratios. - clip(bool, optional, default=False)): Whether to clip - out-of-boundary boxes. - step_w(int, optional, default=0.0): Prior boxes step across - width, 0.0 for auto calculation. - step_h(int, optional, default=0.0): Prior boxes step across - height, 0.0 for auto calculation. - offset(float, optional, default=0.5): Prior boxes center offset. - name(str, optional, default=None): Name of the prior box layer. - + input(variable): The input tensor. + axis(int): Returns: - boxes(variable): the output prior boxes of PriorBoxOp. The layout is - [H, W, num_priors, 4]. H is the height of input, W is the width - of input, num_priors is the box count of each position. Where num_priors = - len(aspect_ratios) * len(min_sizes) + len(max_sizes) - Variances(variable): the expanded variances of PriorBoxOp. The layout - is [H, W, num_priors, 4]. H is the height of input, W is the width - of input, num_priors is the box count of each position. Where num_priors = - len(aspect_ratios) * len(min_sizes) + len(max_sizes) + Variable: A tensor variable. Examples: .. code-block:: python - - data = fluid.layers.data(name="data", shape=[3, 32, 32], dtype="float32") - conv2d = fluid.layers.conv2d( - input=data, num_filters=2, filter_size=3) - box, var = fluid.layers.prior_box(conv2d, data, - min_size, max_size, aspect_ratio, - variance, flip, clip, - step_w, step_h, offset) + x = fluid.layers.data(name="data", shape=[3, 32, 32], dtype="float32") + reshaped = fluid.layers.reshape_with_axis(input=x, axis=2) + reshaped.shape + >> [-1, 1024] """ - helper = LayerHelper("prior_box", **locals()) - dtype = helper.input_dtype() - - box = helper.create_tmp_variable(dtype) - var = helper.create_tmp_variable(dtype) - helper.append_op( - type="prior_box", - inputs={"Input": input, - "Image": image}, - outputs={"Boxes": box, - "Variances": var}, - attrs={ - 'min_sizes': min_sizes, - 'max_sizes': max_sizes, - 'aspect_ratios': aspect_ratios, - 'variances': variance, - 'flip': flip, - 'clip': clip, - 'step_w': step_w, - 'step_h': step_h, - 'offset': offset - }) - return box, var - - -def prior_boxes(inputs, - image, - min_ratio, - max_ratio, - aspect_ratios, - base_size, - steps=None, - step_w=None, - step_h=None, - offset=0.5, - variance=[0.1, 0.1, 0.1, 0.1], - flip=False, - clip=False, - name=None): - """ - **Prior_boxes** - - Generate prior boxes for SSD(Single Shot MultiBox Detector) algorithm. - Each position of the inputs produces many prior boxes respectly, the number - of prior boxes which is produced by inputs respectly is determined by - the count of min_ratio, max_ratio and aspect_ratios, The size of the - box is in range(min_ratio, max_ratio) interval, which is generated in - sequence according to the aspect_ratios. - - Args: - inputs(list): The list of input variables, the format of all variables is NCHW. - image(variable): The input image data of PriorBoxOp, the layout is NCHW. - min_ratio(int): the min ratio of generated prior boxes. - max_ratio(int): the max ratio of generated prior boxes. - aspect_ratios(list): the aspect ratios of generated prior boxes. - The length of input and aspect_ratios must be equal. - base_size(int): the base_size is used to get min_size and max_size - according to min_ratio and max_ratio. - step_w(list, optional, default=None): Prior boxes step across width. - If step_w[i] == 0.0, the prior boxes step across width of the inputs[i] - will be automatically calculated. - step_h(list, optional, default=None): Prior boxes step across height, - If step_h[i] == 0.0, the prior boxes step across height of the inputs[i] - will be automatically calculated. - offset(float, optional, default=0.5): Prior boxes center offset. - variance(list, optional, default=[0.1, 0.1, 0.1, 0.1]): the variances - to be encoded in prior boxes. - flip(bool, optional, default=False): Whether to flip aspect ratios. - clip(bool, optional, default=False): Whether to clip out-of-boundary boxes. - name(str, optional, None): Name of the prior box layer. - - Returns: - boxes(variable): the output prior boxes of PriorBoxOp. The layout is - [num_priors, 4]. num_priors is the total box count of each - position of inputs. - Variances(variable): the expanded variances of PriorBoxOp. The layout - is [num_priors, 4]. num_priors is the total box count of each - position of inputs - - Examples: - .. code-block:: python + assert len(input.shape) > axis and axis > 0, \ + "the axis should be litter than input.shape's." + input_shape = input.shape - prior_boxes( - inputs = [conv1, conv2, conv3, conv4, conv5, conv6], - image = data, - min_ratio = 20, # 0.20 - max_ratio = 90, # 0.90 - steps = [8., 16., 32., 64., 100., 300.], - aspect_ratios = [[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]], - base_size = 300, - offset = 0.5, - variance = [0.1,0.1,0.1,0.1], - flip=True, - clip=True) - """ - assert isinstance(inputs, list), 'inputs should be a list.' - num_layer = len(inputs) - assert num_layer > 2 # TODO(zcd): currently, num_layer must be bigger than two. - - min_sizes = [] - max_sizes = [] - if num_layer > 2: - step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2))) - for ratio in xrange(min_ratio, max_ratio + 1, step): - min_sizes.append(base_size * ratio / 100.) - max_sizes.append(base_size * (ratio + step) / 100.) - min_sizes = [base_size * .10] + min_sizes - max_sizes = [base_size * .20] + max_sizes - - if step_h: - assert isinstance(step_h,list) and len(step_h) == num_layer, \ - 'step_h should be list and inputs and step_h should have same length' - if step_w: - assert isinstance(step_w,list) and len(step_w) == num_layer, \ - 'step_w should be list and inputs and step_w should have same length' - if steps: - assert isinstance(steps,list) and len(steps) == num_layer, \ - 'steps should be list and inputs and step_w should have same length' - step_w = steps - step_h = steps - if aspect_ratios: - assert isinstance(aspect_ratios, list) and len(aspect_ratios) == num_layer, \ - 'aspect_ratios should be list and inputs and aspect_ratios should ' \ - 'have same length' - - box_results = [] - var_results = [] - for i, input in enumerate(inputs): - min_size = min_sizes[i] - max_size = max_sizes[i] - aspect_ratio = [] - if not isinstance(min_size, list): - min_size = [min_size] - if not isinstance(max_size, list): - max_size = [max_size] - if aspect_ratios: - aspect_ratio = aspect_ratios[i] - if not isinstance(aspect_ratio, list): - aspect_ratio = [aspect_ratio] - - box, var = prior_box(input, image, min_size, max_size, aspect_ratio, - variance, flip, clip, step_w[i] - if step_w else 0.0, step_h[i] - if step_w else 0.0, offset) - - box_results.append(box) - var_results.append(var) - - if len(box_results) == 1: - box = box_results[0] - var = var_results[0] - else: - axis = 3 - reshaped_boxes = [] - reshaped_vars = [] - for i in range(len(box_results)): - reshaped_boxes += [reshape_with_axis(box_results[i], axis=[axis])] - reshaped_vars += [reshape_with_axis(var_results[i], axis=[axis])] - - helper = LayerHelper("concat", **locals()) - dtype = helper.input_dtype() - box = helper.create_tmp_variable(dtype) - var = helper.create_tmp_variable(dtype) - - axis = 0 - helper.append_op( - type="concat", - inputs={"X": reshaped_boxes}, - outputs={"Out": box}, - attrs={'axis': axis}) + new_shape = [-1, reduce(mul, input_shape[axis:len(input_shape)], 1)] - var = helper.create_tmp_variable(dtype) - helper.append_op( - type="concat", - inputs={"X": reshaped_vars}, - outputs={"Out": var}, - attrs={'axis': axis}) - - return box, var + helper = LayerHelper('reshape', **locals()) + out = helper.create_tmp_variable(helper.input_dtype()) + helper.append_op( + type='reshape', + inputs={'X': [input]}, + outputs={'Out': [out]}, + attrs={'shape': new_shape}) + return out -- GitLab From 74f7aff397871b1f658e5f0d5195beb94794551f Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Sun, 11 Feb 2018 16:46:28 +0800 Subject: [PATCH 061/217] add unit test --- .../object_detection/test_prior_boxes.py | 87 +++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 python/paddle/v2/fluid/tests/object_detection/test_prior_boxes.py diff --git a/python/paddle/v2/fluid/tests/object_detection/test_prior_boxes.py b/python/paddle/v2/fluid/tests/object_detection/test_prior_boxes.py new file mode 100644 index 0000000000..50b5249d98 --- /dev/null +++ b/python/paddle/v2/fluid/tests/object_detection/test_prior_boxes.py @@ -0,0 +1,87 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import numpy as np +import paddle.v2.fluid as fluid +import paddle.v2.fluid.layers.detection as detection +import paddle.v2.fluid.core as core +import unittest + + +def prior_box_output(data_shape): + images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32') + conv1 = fluid.layers.conv2d( + input=images, num_filters=3, filter_size=3, stride=2, use_cudnn=False) + conv2 = fluid.layers.conv2d( + input=conv1, num_filters=3, filter_size=3, stride=2, use_cudnn=False) + conv3 = fluid.layers.conv2d( + input=conv2, num_filters=3, filter_size=3, stride=2, use_cudnn=False) + conv4 = fluid.layers.conv2d( + input=conv3, num_filters=3, filter_size=3, stride=2, use_cudnn=False) + conv5 = fluid.layers.conv2d( + input=conv4, num_filters=3, filter_size=3, stride=2, use_cudnn=False) + + box, var = detection.prior_boxes( + inputs=[conv1, conv2, conv3, conv4, conv5, conv5], + image=images, + min_ratio=20, + max_ratio=90, + # steps=[8, 16, 32, 64, 100, 300], + aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]], + base_size=300, + offset=0.5, + flip=True, + clip=True) + return box, var + + +def main(use_cuda): + if use_cuda: # prior_box only support CPU. + return + + box, var = prior_box_output(data_shape=[3, 224, 224]) + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + batch = [128] + + for i in range(1): + # print("iteration : %d" % i) + x = np.random.random(batch + data_shape).astype("float32") + tensor_x = core.LoDTensor() + tensor_x.set(x, place) + box, var = exe.run(fluid.default_main_program(), + feed={'pixel': tensor_x}, + fetch_list=[box, var]) + box_arr = np.array(box) + var_arr = np.array(var) + assert box_arr.shape[1] == 4 + assert var_arr.shape[1] == 4 + assert box_arr.shape[0] == var_arr.shape[0] + + +class TestFitALine(unittest.TestCase): + def test_cpu(self): + with self.program_scope_guard(): + main(use_cuda=False) + + def test_cuda(self): + with self.program_scope_guard(): + main(use_cuda=True) + + +if __name__ == '__main__': + unittest.main() -- GitLab From 01f4bcb57ee99a9d5c2d52cbc7cbce4c4a0454c8 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Sun, 11 Feb 2018 16:54:01 +0800 Subject: [PATCH 062/217] remove inputs/outputs from Operator --- python/paddle/v2/fluid/distribute_transpiler.py | 2 +- python/paddle/v2/fluid/framework.py | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/python/paddle/v2/fluid/distribute_transpiler.py b/python/paddle/v2/fluid/distribute_transpiler.py index ff84e609e2..f84481adf7 100644 --- a/python/paddle/v2/fluid/distribute_transpiler.py +++ b/python/paddle/v2/fluid/distribute_transpiler.py @@ -424,7 +424,7 @@ class DistributeTranspiler: # change output's ParamOut variable outputs = self._get_output_map_from_op(self.program.global_block().vars, opt_op) - opt_op.outputs["ParamOut"] = new_inputs["Param"] + outputs["ParamOut"] = new_inputs["Param"] optimize_block.append_op( type=opt_op.type, diff --git a/python/paddle/v2/fluid/framework.py b/python/paddle/v2/fluid/framework.py index a517db68c5..35d3df785b 100644 --- a/python/paddle/v2/fluid/framework.py +++ b/python/paddle/v2/fluid/framework.py @@ -400,9 +400,6 @@ class Operator(object): """ self.block = block self.desc = desc - # for clone a new operator - self.inputs = inputs - self.outputs = outputs self.attrs = attrs if len(self.desc.type()) != 0: return -- GitLab From 07bb4139776cafecbdcc11d663e38e22a2163a96 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sun, 11 Feb 2018 16:59:57 +0800 Subject: [PATCH 063/217] Revert changes --- paddle/fluid/framework/mixed_vector.h | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/paddle/fluid/framework/mixed_vector.h b/paddle/fluid/framework/mixed_vector.h index 837b5fa7f6..a06e34d551 100644 --- a/paddle/fluid/framework/mixed_vector.h +++ b/paddle/fluid/framework/mixed_vector.h @@ -110,10 +110,6 @@ class Vector { T* end() { return size() == 0 ? &EmptyDummy() : &this->operator[](size()); } - const T* cbegin() const { return begin(); } - - const T* cend() const { return end(); } - T& front() { return *begin(); } T& back() { @@ -125,10 +121,15 @@ class Vector { const T* begin() const { return size() == 0 ? &EmptyDummy() : &this->operator[](0); } + const T* end() const { return size() == 0 ? &EmptyDummy() : &this->operator[](size()); } + const T* cbegin() const { return begin(); } + + const T* cend() const { return end(); } + const T& back() const { auto it = end(); --it; @@ -244,10 +245,8 @@ class Vector { // implicit cast operator. Vector can be cast to std::vector implicitly. operator std::vector() const { std::vector result; - if (size() != 0) { - result.resize(size()); - std::copy(begin(), end(), result.begin()); - } + result.resize(size()); + std::copy(begin(), end(), result.begin()); return result; } -- GitLab From 42912e48cb0ce5c62c450e42373de09b04c30513 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sun, 11 Feb 2018 17:28:01 +0800 Subject: [PATCH 064/217] rename switch_kernel.md to kernel_selection.md --- doc/design/{switch_kernel.md => kernel_selection.md} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename doc/design/{switch_kernel.md => kernel_selection.md} (100%) diff --git a/doc/design/switch_kernel.md b/doc/design/kernel_selection.md similarity index 100% rename from doc/design/switch_kernel.md rename to doc/design/kernel_selection.md -- GitLab From 5d5dcedc841452e4135275087ffc0ad03ebf47f4 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Sun, 11 Feb 2018 18:45:25 +0800 Subject: [PATCH 065/217] merge build docs with build using docker --- doc/build_and_install/build_cn.md | 124 ------------------ doc/build_and_install/build_en.md | 124 ------------------ .../build_from_source_cn.rst | 111 +++++++++++++--- .../build_from_source_en.rst | 120 +++++++++++++---- 4 files changed, 182 insertions(+), 297 deletions(-) delete mode 100644 doc/build_and_install/build_cn.md delete mode 100644 doc/build_and_install/build_en.md diff --git a/doc/build_and_install/build_cn.md b/doc/build_and_install/build_cn.md deleted file mode 100644 index 4a80a52451..0000000000 --- a/doc/build_and_install/build_cn.md +++ /dev/null @@ -1,124 +0,0 @@ -# 用Docker编译和测试PaddlePaddle - -## 需要的软硬件 - -为了开发PaddlePaddle,我们需要 - -1. 一台电脑,可以装的是 Linux, BSD, Windows 或者 MacOS 操作系统,以及 -1. Docker。 - -不需要依赖其他任何软件了。即便是 Python 和 GCC 都不需要,因为我们会把所有编译工具都安装进一个 Docker image 里。 - -## 总体流程 - -1. 获取源码 - - ```bash - git clone https://github.com/paddlepaddle/paddle - ``` - -2. 安装开发工具到 Docker image 里 - - ```bash - cd paddle; docker build -t paddle:dev . - ``` - - 请注意这个命令结尾处的 `.`;它表示 `docker build` 应该读取当前目录下的 [`Dockerfile`文件](https://github.com/PaddlePaddle/Paddle/blob/develop/Dockerfile),按照其内容创建一个名为 `paddle:dev` 的 Docker image,并且把各种开发工具安装进去。 - -3. 编译 - - 以下命令启动一个 Docker container 来执行 `paddle:dev` 这个 Docker image,同时把当前目录(源码树根目录)映射为 container 里的 `/paddle` 目录,并且运行 `Dockerfile` 描述的默认入口程序 [`build.sh`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/scripts/docker/build.sh)。这个脚本调用 `cmake` 和 `make` 来编译 `/paddle` 里的源码,结果输出到 `/paddle/build`,也就是本地的源码树根目录里的 `build` 子目录。 - - ```bash - docker run --rm -v $PWD:/paddle paddle:dev - ``` - - 上述命令编译出一个 CUDA-enabled 版本。如果我们只需要编译一个只支持 CPU 的版本,可以用 - - ```bash - docker run --rm -e WITH_GPU=OFF -v $PWD:/paddle paddle:dev - ``` - -4. 运行单元测试 - - 用本机的第一个 GPU 来运行包括 GPU 单元测试在内的所有单元测试: - - ```bash - NV_GPU=0 nvidia-docker run --rm -v $PWD:/paddle paddle:dev bash -c "cd /paddle/build; ctest" - ``` - - 如果编译的时候我们用了 `WITH_GPU=OFF` 选项,那么编译过程只会产生 CPU-based 单元测试,那么我们也就不需要 nvidia-docker 来运行单元测试了。我们只需要: - - ```bash - docker run --rm -v $PWD:/paddle paddle:dev bash -c "cd /paddle/build; ctest" - ``` - - 有时候我们只想运行一个特定的单元测试,比如 `memory_test`,我们可以 - - ```bash - nvidia-docker run --rm -v $PWD:/paddle paddle:dev bash -c "cd /paddle/build; ctest -V -R memory_test" - ``` - -5. 清理 - - 有时候我们会希望清理掉已经下载的第三方依赖以及已经编译的二进制文件。此时只需要: - - ```bash - rm -rf build - ``` - -## 为什么要 Docker 呀? - -- 什么是 Docker? - - 如果您没有听说 Docker,可以把它想象为一个类似 virtualenv 的系统,但是虚拟的不仅仅是 Python 的运行环境。 - -- Docker 还是虚拟机? - - 有人用虚拟机来类比 Docker。需要强调的是:Docker 不会虚拟任何硬件,Docker container 里运行的编译工具实际上都是在本机的 CPU 和操作系统上直接运行的,性能和把编译工具安装在本机运行一样。 - -- 为什么用 Docker? - - 把工具和配置都安装在一个 Docker image 里可以标准化编译环境。这样如果遇到问题,其他人可以复现问题以便帮助。 - - 另外,对于习惯使用Windows和MacOS的开发者来说,使用Docker就不用配置交叉编译环境了。 - -- 我可以选择不用Docker吗? - - 当然可以。大家可以用把开发工具安装进入 Docker image 一样的方式,把这些工具安装到本机。这篇文档介绍基于 Docker 的开发流程,是因为这个流程比其他方法都更简便。 - -- 学习 Docker 有多难? - - 理解 Docker 并不难,大概花十分钟看一下[这篇文章](https://zhuanlan.zhihu.com/p/19902938)。这可以帮您省掉花一小时安装和配置各种开发工具,以及切换机器时需要新安装的辛苦。别忘了 PaddlePaddle 更新可能导致需要新的开发工具。更别提简化问题复现带来的好处了。 - -- 我可以用 IDE 吗? - - 当然可以,因为源码就在本机上。IDE 默认调用 make 之类的程序来编译源码,我们只需要配置 IDE 来调用 Docker 命令编译源码即可。 - - 很多 PaddlePaddle 开发者使用 Emacs。他们在自己的 `~/.emacs` 配置文件里加两行 - - ```emacs - (global-set-key "\C-cc" 'compile) - (setq compile-command - "docker run --rm -it -v $(git rev-parse --show-toplevel):/paddle paddle:dev") - ``` - - 就可以按 `Ctrl-C` 和 `c` 键来启动编译了。 - -- 可以并行编译吗? - - 是的。我们的 Docker image 运行一个 [Bash 脚本](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/scripts/docker/build.sh)。这个脚本调用 `make -j$(nproc)` 来启动和 CPU 核一样多的进程来并行编译。 - -## 可能碰到的问题 - -- Docker 需要 sudo - - 如果用自己的电脑开发,自然也就有管理员权限(sudo)了。如果用公用的电脑开发,需要请管理员安装和配置好 Docker。此外,PaddlePaddle 项目在努力开始支持其他不需要 sudo 的集装箱技术,比如 rkt。 - -- 在 Windows/MacOS 上编译很慢 - - Docker 在 Windows 和 MacOS 都可以运行。不过实际上是运行在一个 Linux 虚拟机上。可能需要注意给这个虚拟机多分配一些 CPU 和内存,以保证编译高效。具体做法请参考[这个issue](https://github.com/PaddlePaddle/Paddle/issues/627)。 - -- 磁盘不够 - - 本文中的例子里,`docker run` 命令里都用了 `--rm` 参数,这样保证运行结束之后的 containers 不会保留在磁盘上。可以用 `docker ps -a` 命令看到停止后但是没有删除的 containers。`docker build` 命令有时候会产生一些中间结果,是没有名字的 images,也会占用磁盘。可以参考[这篇文章](https://zaiste.net/posts/removing_docker_containers/)来清理这些内容。 diff --git a/doc/build_and_install/build_en.md b/doc/build_and_install/build_en.md deleted file mode 100644 index 91c41ef8ce..0000000000 --- a/doc/build_and_install/build_en.md +++ /dev/null @@ -1,124 +0,0 @@ -# Build using Docker - -## What Developers Need - -To contribute to PaddlePaddle, you need - -1. A computer -- Linux, BSD, Windows, MacOS, and -1. Docker. - -Nothing else. Not even Python and GCC, because you can install all build tools into a Docker image. We run all the tools by running this image. - -## General Process - -1. Retrieve source code. - - ```bash - git clone https://github.com/paddlepaddle/paddle - ``` - -2. Install build tools into a Docker image. - - ```bash - cd paddle; docker build -t paddle:dev . - ``` - - Please be aware of the `.` at the end of the command, which refers to the [`./Dockerfile` file](https://github.com/PaddlePaddle/Paddle/blob/develop/Dockerfile). `docker build` follows instructions in this file to create a Docker image named `paddle:dev`, and installs building tools into it. - -3. Build from source. - - This following command starts a Docker container that executes the Docker image `paddle:dev`, mapping the current directory to `/paddle/` in the container, and runs the default entry-point [`build.sh`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/scripts/docker/build.sh) as specified in the Dockefile. `build.sh` invokes `cmake` and `make` to build PaddlePaddle source code, which had been mapped to `/paddle`, and writes outputs to `/paddle/build`, which maps to `build` in the current source directory on the computer. - - ```bash - docker run -v $PWD:/paddle paddle:dev - ``` - - Above command builds a CUDA-enabled version. If we want to build a CPU-only version, we can type - - ```bash - docker run -e WITH_GPU=OFF -v $PWD:/paddle paddle:dev - ``` - -4. Run unit tests. - - To run all unit tests using the first GPU of a node: - - ```bash - NV_GPU=0 nvidia-docker run -v $PWD:/paddle paddle:dev bash -c "cd /paddle/build; ctest" - ``` - - If we used `WITH_GPU=OFF` at build time, it generates only CPU-based unit tests, and we don't need nvidia-docker to run them. We can just run - - ```bash - docker run -v $PWD:/paddle paddle:dev bash -c "cd /paddle/build; ctest" - ``` - - Sometimes we want to run a specific unit test, say `memory_test`, we can run - - ```bash - nvidia-docker run -v $PWD:/paddle paddle:dev bash -c "cd /paddle/build; ctest -V -R memory_test" - ``` - -5. Clean Build. - - Sometimes, we might want to clean all thirt-party dependents and built binaries. To do so, just - - ```bash - rm -rf build - ``` - -## Docker, Or Not? - -- What is Docker? - - If you haven't heard of it, consider it something like Python's virtualenv. - -- Docker or virtual machine? - - Some people compare Docker with VMs, but Docker doesn't virtualize any hardware nor running a guest OS, which means there is no compromise on the performance. - -- Why Docker? - - Using a Docker image of build tools standardizes the building environment, which makes it easier for others to reproduce your problems and to help. - - Also, some build tools don't run on Windows or Mac or BSD, but Docker runs almost everywhere, so developers can use whatever computer they want. - -- Can I choose not to use Docker? - - Sure, you don't have to install build tools into a Docker image; instead, you can install them in your local computer. This document exists because Docker would make the development way easier. - -- How difficult is it to learn Docker? - - It takes you ten minutes to read [an introductory article](https://docs.docker.com/get-started) and saves you more than one hour to install all required build tools, configure them, especially when new versions of PaddlePaddle require some new tools. Not even to mention the time saved when other people trying to reproduce the issue you have. - -- Can I use my favorite IDE? - - Yes, of course. The source code resides on your local computer, and you can edit it using whatever editor you like. - - Many PaddlePaddle developers are using Emacs. They add the following few lines into their `~/.emacs` configure file: - - ```emacs - (global-set-key "\C-cc" 'compile) - (setq compile-command - "docker run --rm -it -v $(git rev-parse --show-toplevel):/paddle paddle:dev") - ``` - - so they could type `Ctrl-C` and `c` to build PaddlePaddle from source. - -- Does Docker do parallel building? - - Our building Docker image runs a [Bash script](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/scripts/docker/build.sh), which calls `make -j$(nproc)` to starts as many processes as the number of your CPU cores. - -## Some Gotchas - -- Docker requires sudo - - An owner of a computer has the administrative privilege, a.k.a., sudo, and Docker requires this privilege to work properly. If you use a shared computer for development, please ask the administrator to install and configure Docker. We will do our best to support rkt, another container technology that doesn't require sudo. - -- Docker on Windows/MacOS builds slowly - - On Windows and MacOS, Docker containers run in a Linux VM. You might want to give this VM some more memory and CPUs so to make the building efficient. Please refer to [this issue](https://github.com/PaddlePaddle/Paddle/issues/627) for details. - -- Not enough disk space - - Examples in this article uses option `--rm` with the `docker run` command. This option ensures that stopped containers do not exist on hard disks. We can use `docker ps -a` to list all containers, including stopped. Sometimes `docker build` generates some intermediate dangling images, which also take disk space. To clean them, please refer to [this article](https://zaiste.net/posts/removing_docker_containers/). diff --git a/doc/build_and_install/build_from_source_cn.rst b/doc/build_and_install/build_from_source_cn.rst index ff904b1022..fec2d412f0 100644 --- a/doc/build_and_install/build_from_source_cn.rst +++ b/doc/build_and_install/build_from_source_cn.rst @@ -1,14 +1,26 @@ 从源码编译 ====================== +.. _requirements: + +需要的软硬件 +---------------- + +为了编译PaddlePaddle,我们需要 + +1. 一台电脑,可以装的是 Linux, Windows 或者 MacOS 操作系统 +1. Docker + +不需要依赖其他任何软件了。即便是 Python 和 GCC 都不需要,因为我们会把所有编译工具都安装进一个 Docker 镜像里。 + .. _build_step: 编译方法 ---------------- -PaddlePaddle主要使用 `CMake `_ 以及GCC, G++作为编译工具。 -我们推荐您使用PaddlePaddle Docker编译环境镜像完成编译,这样可以免去单独安装编译依赖的步骤,可选的不同编译环境Docker镜像 -可以在 `这里 `_ 找到。 +PaddlePaddle需要使用Docker环境完成编译,这样可以免去单独安装编译依赖的步骤,可选的不同编译环境Docker镜像 +可以在 `这里 `_ 找到。或者 +参考下述可选步骤,从源码中构建用于编译PaddlePaddle的Docker镜像。 如果您选择不使用Docker镜像,则需要在本机安装下面章节列出的 `编译依赖`_ 之后才能开始编译的步骤。 @@ -16,15 +28,19 @@ PaddlePaddle主要使用 `CMake `_ 以及GCC, G++作为编译 .. code-block:: bash + # 1. 获取源码 git clone https://github.com/PaddlePaddle/Paddle.git cd Paddle - # 如果使用Docker编译环境,执行下面的命令编译CPU-Only的二进制 + # 2. 可选步骤:源码中构建用于编译PaddlePaddle的Docker镜像 + docker build -t paddle:dev . + # 3. 执行下面的命令编译CPU-Only的二进制 docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=OFF" paddlepaddle/paddle_manylinux_devel:cuda8.0_cudnn5 bash -x /paddle/paddle/scripts/docker/build.sh - # 如果不使用Docker编译环境,执行下面的命令 - mkdir build - cd build - cmake -DWITH_GPU=OFF -DWITH_TESTING=OFF .. - make + # 4. 或者也可以使用为上述可选步骤构建的镜像(必须先执行第2步) + docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=OFF" paddle:dev + +注:上述命令把当前目录(源码树根目录)映射为 container 里的 :code:`/paddle` 目录。如果使用自行 +构建的镜像(上述第4步)会执行 :code:`Dockerfile` 描述的默认入口程序 :code:`build.sh` 可以省略步骤3中 +最后的执行脚本的命令。 编译完成后会在build/python/dist目录下生成输出的whl包,可以选在在当前机器安装也可以拷贝到目标机器安装: @@ -50,28 +66,83 @@ PaddlePaddle主要使用 `CMake `_ 以及GCC, G++作为编译 如果您期望在编译完成后立即执行所有的单元测试,可以按照下面的方法: -使用Docker的情况下,设置 :code:`RUN_TEST=ON` 和 :code:`WITH_TESTING=ON` 就会在完成编译之后,立即执行单元测试。 +设置 :code:`RUN_TEST=ON` 和 :code:`WITH_TESTING=ON` 就会在完成编译之后,立即执行单元测试。 开启 :code:`WITH_GPU=ON` 可以指定同时执行GPU上的单元测试。 .. code-block:: bash docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=ON" -e "RUN_TEST=ON" paddlepaddle/paddle_manylinux_devel:cuda8.0_cudnn5 bash -x /paddle/paddle/scripts/docker/build.sh -如果不使用Docker,可以执行ctest命令即可: +如果期望执行其中一个单元测试,(比如 :code:`test_sum_op` ): .. code-block:: bash - mkdir build - cd build - cmake -DWITH_GPU=OFF -DWITH_TESTING=OFF .. - make - ctest - # 指定执行其中一个单元测试 test_mul_op - ctest -R test_mul_op + docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=ON" -e "RUN_TEST=OFF" paddlepaddle/paddle_manylinux_devel:cuda8.0_cudnn5 /bin/bash + bash /paddle/paddle/scripts/docker/build.sh + cd /paddle/build + ctest -R test_sum_op -V + +.. _faq_docker: + +常见问题 +---------------- + +- 什么是 Docker? + + 如果您没有听说 Docker,可以把它想象为一个类似 virtualenv 的系统,但是虚拟的不仅仅是 Python 的运行环境。 + +- Docker 还是虚拟机? + + 有人用虚拟机来类比 Docker。需要强调的是:Docker 不会虚拟任何硬件,Docker container 里运行的编译工具实际上都是在本机的 CPU 和操作系统上直接运行的,性能和把编译工具安装在本机运行一样。 + +- 为什么用 Docker? + + 把工具和配置都安装在一个 Docker image 里可以标准化编译环境。这样如果遇到问题,其他人可以复现问题以便帮助。 + + 另外,对于习惯使用Windows和MacOS的开发者来说,使用Docker就不用配置交叉编译环境了。 + +- 我可以选择不用Docker吗? + + 当然可以。大家可以用把开发工具安装进入 Docker image 一样的方式,把这些工具安装到本机。这篇文档介绍基于 Docker 的开发流程,是因为这个流程比其他方法都更简便。 + +- 学习 Docker 有多难? + + 理解 Docker 并不难,大概花十分钟看一下[这篇文章](https://zhuanlan.zhihu.com/p/19902938)。这可以帮您省掉花一小时安装和配置各种开发工具,以及切换机器时需要新安装的辛苦。别忘了 PaddlePaddle 更新可能导致需要新的开发工具。更别提简化问题复现带来的好处了。 + +- 我可以用 IDE 吗? + + 当然可以,因为源码就在本机上。IDE 默认调用 make 之类的程序来编译源码,我们只需要配置 IDE 来调用 Docker 命令编译源码即可。 + + 很多 PaddlePaddle 开发者使用 Emacs。他们在自己的 `~/.emacs` 配置文件里加两行 + + ```emacs + (global-set-key "\C-cc" 'compile) + (setq compile-command + "docker run --rm -it -v $(git rev-parse --show-toplevel):/paddle paddle:dev") + ``` + + 就可以按 `Ctrl-C` 和 `c` 键来启动编译了。 + +- 可以并行编译吗? + + 是的。我们的 Docker image 运行一个 [Bash 脚本](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/scripts/docker/build.sh)。这个脚本调用 `make -j$(nproc)` 来启动和 CPU 核一样多的进程来并行编译。 + +- Docker 需要 sudo + + 如果用自己的电脑开发,自然也就有管理员权限(sudo)了。如果用公用的电脑开发,需要请管理员安装和配置好 Docker。此外,PaddlePaddle 项目在努力开始支持其他不需要 sudo 的集装箱技术,比如 rkt。 + +- 在 Windows/MacOS 上编译很慢 + + Docker 在 Windows 和 MacOS 都可以运行。不过实际上是运行在一个 Linux 虚拟机上。可能需要注意给这个虚拟机多分配一些 CPU 和内存,以保证编译高效。具体做法请参考[这个issue](https://github.com/PaddlePaddle/Paddle/issues/627)。 + +- 磁盘不够 + + 本文中的例子里,`docker run` 命令里都用了 `--rm` 参数,这样保证运行结束之后的 containers 不会保留在磁盘上。可以用 `docker ps -a` 命令看到停止后但是没有删除的 containers。`docker build` 命令有时候会产生一些中间结果,是没有名字的 images,也会占用磁盘。可以参考[这篇文章](https://zaiste.net/posts/removing_docker_containers/)来清理这些内容。 + .. _compile_deps: -编译依赖 +附录:编译依赖 ---------------- PaddlePaddle编译需要使用到下面的依赖(包含但不限于),其他的依赖软件,会自动在编译时下载。 @@ -91,7 +162,7 @@ PaddlePaddle编译需要使用到下面的依赖(包含但不限于),其 .. _build_options: -编译选项 +附录:编译选项 ---------------- PaddlePaddle的编译选项,包括生成CPU/GPU二进制文件、链接何种BLAS库等。 diff --git a/doc/build_and_install/build_from_source_en.rst b/doc/build_and_install/build_from_source_en.rst index 718fb869c2..29a1439e4c 100644 --- a/doc/build_and_install/build_from_source_en.rst +++ b/doc/build_and_install/build_from_source_en.rst @@ -1,32 +1,45 @@ Build from Sources ========================== -.. _build_step: +.. _requirements: -How To Build +Requirements ---------------- -PaddlePaddle mainly uses `CMake `_ and GCC, G++ as compile -tools. We recommend you to use our pre-built Docker image to run the build -to avoid installing dependencies by yourself. We have several build environment -Docker images `here `_ . +To build PaddlePaddle, you need + +1. A computer -- Linux, Windows, MacOS. +1. Docker. + +Nothing else. Not even Python and GCC, because you can install all build tools into a Docker image. +We run all the tools by running this image. + +.. _build_step: -If you choose not to use Docker image for your build, you need to install the -below `Compile Dependencies`_ before run the build. +How To Build +---------------- -Then run: +You need to use Docker to build PaddlePaddle +to avoid installing dependencies by yourself. We have several pre-built +Docker images `here `_ , +Or you can build your own image from source as the optional step below: .. code-block:: bash + # 1. clone the source code git clone https://github.com/PaddlePaddle/Paddle.git cd Paddle - # run the following command to build a CPU-Only binaries if you are using docker + # 2. Optional: build development docker image from source + docker build -t paddle:dev . + # 3. Run the following command to build a CPU-Only binaries docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=OFF" paddlepaddle/paddle_manylinux_devel:cuda8.0_cudnn5 bash -x /paddle/paddle/scripts/docker/build.sh - # else run these commands - mkdir build - cd build - cmake -DWITH_GPU=OFF -DWITH_TESTING=OFF .. - make + # 4. Or, use your built Docker image to build PaddlePaddle (must run step 2) + docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=OFF" paddle:dev + +NOTE: The above command try to mount the current working directory (root directory of source code) +into :code:`/paddle` directory inside docker container. If you are using your own image +(Step 4) it will run default entry-point :code:`build.sh` , so you could omit the last +command in step 3. When the compile finishes, you can get the output whl package under build/python/dist, then you can choose to install the whl on local @@ -61,22 +74,75 @@ Set :code:`WITH_GPU=ON` Can also run tests on GPU. docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=ON" -e "RUN_TEST=ON" paddlepaddle/paddle_manylinux_devel:cuda8.0_cudnn5 bash -x paddle/paddle/scripts/docker/build.sh -If you don't use Docker, just run ctest will start the tests: +If you wish to run only one unit test, like :code:`test_sum_op`: .. code-block:: bash - mkdir build - cd build - cmake -DWITH_GPU=OFF -DWITH_TESTING=ON .. - make - ctest - # run a single test like test_mul_op - ctest -R test_mul_op + docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=ON" -e "RUN_TEST=OFF" paddlepaddle/paddle_manylinux_devel:cuda8.0_cudnn5 /bin/bash + bash /paddle/paddle/scripts/docker/build.sh + cd /paddle/build + ctest -R test_sum_op -V + +.. _faq_docker: + +Frequently Asked Questions +---------------- + +- What is Docker? + + If you haven't heard of it, consider it something like Python's virtualenv. + +- Docker or virtual machine? + + Some people compare Docker with VMs, but Docker doesn't virtualize any hardware nor running a guest OS, which means there is no compromise on the performance. + +- Why Docker? + + Using a Docker image of build tools standardizes the building environment, which makes it easier for others to reproduce your problems and to help. + + Also, some build tools don't run on Windows or Mac or BSD, but Docker runs almost everywhere, so developers can use whatever computer they want. +- Can I choose not to use Docker? + + Sure, you don't have to install build tools into a Docker image; instead, you can install them on your local computer. This document exists because Docker would make the development way easier. + +- How difficult is it to learn Docker? + + It takes you ten minutes to read [an introductory article](https://docs.docker.com/get-started) and saves you more than one hour to install all required build tools, configure them, especially when new versions of PaddlePaddle require some new tools. Not even to mention the time saved when other people trying to reproduce the issue you have. + +- Can I use my favorite IDE? + + Yes, of course. The source code resides on your local computer, and you can edit it using whatever editor you like. + + Many PaddlePaddle developers are using Emacs. They add the following few lines into their `~/.emacs` configure file: + + ```emacs + (global-set-key "\C-cc" 'compile) + (setq compile-command + "docker run --rm -it -v $(git rev-parse --show-toplevel):/paddle paddle:dev") + ``` + + so they could type `Ctrl-C` and `c` to build PaddlePaddle from source. + +- Does Docker do parallel building? + + Our building Docker image runs a [Bash script](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/scripts/docker/build.sh), which calls `make -j$(nproc)` to starts as many processes as the number of your CPU cores. + +- Docker requires sudo + + An owner of a computer has the administrative privilege, a.k.a., sudo, and Docker requires this privilege to work properly. If you use a shared computer for development, please ask the administrator to install and configure Docker. We will do our best to support rkt, another container technology that doesn't require sudo. + +- Docker on Windows/MacOS builds slowly + + On Windows and MacOS, Docker containers run in a Linux VM. You might want to give this VM some more memory and CPUs so to make the building efficient. Please refer to [this issue](https://github.com/PaddlePaddle/Paddle/issues/627) for details. + +- Not enough disk space + + Examples in this article use option `--rm` with the `docker run` command. This option ensures that stopped containers do not exist on hard disks. We can use `docker ps -a` to list all containers, including stopped. Sometimes `docker build` generates some intermediate dangling images, which also take disk space. To clean them, please refer to [this article](https://zaiste.net/posts/removing_docker_containers/). .. _compile_deps: -Compile Dependencies +Appendix: Compile Dependencies ---------------- PaddlePaddle need the following dependencies when compiling, other dependencies @@ -97,17 +163,13 @@ will be downloaded automatically. .. _build_options: -Build Options +Appendix: Build Options ---------------- Build options include whether build binaries for CPU or GPU, which BLAS library to use etc. You may pass these settings when running cmake. For detailed cmake tutorial please refer to `here `_ 。 -.. _build_options_bool: - -Bool Type Options ----------------- You can add :code:`-D` argument to pass such options, like: -- GitLab From d641d5ac336d29471fe5206d45f717ef9cc62f4e Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Sun, 11 Feb 2018 17:09:53 +0800 Subject: [PATCH 066/217] follow comments --- python/paddle/v2/fluid/layers/detection.py | 212 ++++++++---------- python/paddle/v2/fluid/layers/nn.py | 87 ------- .../object_detection/test_prior_boxes.py | 12 +- 3 files changed, 93 insertions(+), 218 deletions(-) diff --git a/python/paddle/v2/fluid/layers/detection.py b/python/paddle/v2/fluid/layers/detection.py index b0c25c11de..cc38796042 100644 --- a/python/paddle/v2/fluid/layers/detection.py +++ b/python/paddle/v2/fluid/layers/detection.py @@ -17,11 +17,8 @@ All layers just related to the detection neural network. from ..layer_helper import LayerHelper from ..framework import Variable -from ..param_attr import ParamAttr -from ..framework import Variable -from layer_function_generator import autodoc from tensor import concat -from nn import flatten +from ops import reshape import math __all__ = [ @@ -30,91 +27,6 @@ __all__ = [ ] -def prior_box(input, - image, - min_sizes, - max_sizes, - aspect_ratios, - variance, - flip=False, - clip=False, - step_w=0.0, - step_h=0.0, - offset=0.5, - name=None): - """ - **Prior_box** - - Generate prior boxes for SSD(Single Shot MultiBox Detector) algorithm. - Each position of the input produce N prior boxes, N is determined by - the count of min_sizes, max_sizes and aspect_ratios, The size of the - box is in range(min_size, max_size) interval, which is generated in - sequence according to the aspect_ratios. - - Args: - input(variable): The input feature data of PriorBox, - the layout is NCHW. - image(variable): The input image data of PriorBox, the - layout is NCHW. - min_sizes(list): the min sizes of generated prior boxes. - max_sizes(list): the max sizes of generated prior boxes. - aspect_ratios(list): the aspect ratios of generated prior boxes. - variance(list): the variances to be encoded in prior boxes. - flip(bool, optional, default=False): Whether to flip aspect ratios. - clip(bool, optional, default=False)): Whether to clip - out-of-boundary boxes. - step_w(int, optional, default=0.0): Prior boxes step across - width, 0.0 for auto calculation. - step_h(int, optional, default=0.0): Prior boxes step across - height, 0.0 for auto calculation. - offset(float, optional, default=0.5): Prior boxes center offset. - name(str, optional, default=None): Name of the prior box layer. - - Returns: - boxes(variable): the output prior boxes of PriorBoxOp. The layout is - [H, W, num_priors, 4]. H is the height of input, W is the width - of input, num_priors is the box count of each position. Where num_priors = - len(aspect_ratios) * len(min_sizes) + len(max_sizes) - Variances(variable): the expanded variances of PriorBoxOp. The layout - is [H, W, num_priors, 4]. H is the height of input, W is the width - of input, num_priors is the box count of each position. Where num_priors = - len(aspect_ratios) * len(min_sizes) + len(max_sizes) - Examples: - .. code-block:: python - - data = fluid.layers.data(name="data", shape=[3, 32, 32], dtype="float32") - conv2d = fluid.layers.conv2d( - input=data, num_filters=2, filter_size=3) - box, var = fluid.layers.prior_box(conv2d, data, - min_size, max_size, aspect_ratio, - variance, flip, clip, - step_w, step_h, offset) - """ - helper = LayerHelper("prior_box", **locals()) - dtype = helper.input_dtype() - - box = helper.create_tmp_variable(dtype) - var = helper.create_tmp_variable(dtype) - helper.append_op( - type="prior_box", - inputs={"Input": input, - "Image": image}, - outputs={"Boxes": box, - "Variances": var}, - attrs={ - 'min_sizes': min_sizes, - 'max_sizes': max_sizes, - 'aspect_ratios': aspect_ratios, - 'variances': variance, - 'flip': flip, - 'clip': clip, - 'step_w': step_w, - 'step_h': step_h, - 'offset': offset - }) - return box, var - - def prior_boxes(inputs, image, min_ratio, @@ -128,20 +40,19 @@ def prior_boxes(inputs, variance=[0.1, 0.1, 0.1, 0.1], flip=False, clip=False, + min_sizes=None, + max_sizes=None, name=None): """ **Prior_boxes** Generate prior boxes for SSD(Single Shot MultiBox Detector) algorithm. - Each position of the inputs produces many prior boxes respectly, the number - of prior boxes which is produced by inputs respectly is determined by - the count of min_ratio, max_ratio and aspect_ratios, The size of the - box is in range(min_ratio, max_ratio) interval, which is generated in - sequence according to the aspect_ratios. + The details of this algorithm, please refer the section 2.2 of SSD paper + (SSD: Single Shot MultiBox Detector)`_ . Args: - inputs(list): The list of input variables, the format of all variables is NCHW. - image(variable): The input image data of PriorBoxOp, the layout is NCHW. + inputs(list): The list of input Variables, the format of all Variables is NCHW. + image(Variable): The input image data of PriorBoxOp, the layout is NCHW. min_ratio(int): the min ratio of generated prior boxes. max_ratio(int): the max ratio of generated prior boxes. aspect_ratios(list): the aspect ratios of generated prior boxes. @@ -159,13 +70,17 @@ def prior_boxes(inputs, to be encoded in prior boxes. flip(bool, optional, default=False): Whether to flip aspect ratios. clip(bool, optional, default=False): Whether to clip out-of-boundary boxes. + min_sizes(list, optional, default=None): If `len(inputs) <=2`, min_sizes must + be set up, and the length of min_sizes should equal to the length of inputs. + max_sizes(list, optional, default=None): If `len(inputs) <=2`, max_sizes must + be set up, and the length of min_sizes should equal to the length of inputs. name(str, optional, None): Name of the prior box layer. Returns: - boxes(variable): the output prior boxes of PriorBoxOp. The layout is + boxes(Variable): the output prior boxes of PriorBoxOp. The layout is [num_priors, 4]. num_priors is the total box count of each position of inputs. - Variances(variable): the expanded variances of PriorBoxOp. The layout + Variances(Variable): the expanded variances of PriorBoxOp. The layout is [num_priors, 4]. num_priors is the total box count of each position of inputs @@ -185,13 +100,60 @@ def prior_boxes(inputs, flip=True, clip=True) """ + + def _prior_box_(input, + image, + min_sizes, + max_sizes, + aspect_ratios, + variance, + flip=False, + clip=False, + step_w=0.0, + step_h=0.0, + offset=0.5, + name=None): + helper = LayerHelper("prior_box", **locals()) + dtype = helper.input_dtype() + + box = helper.create_tmp_variable(dtype) + var = helper.create_tmp_variable(dtype) + helper.append_op( + type="prior_box", + inputs={"Input": input, + "Image": image}, + outputs={"Boxes": box, + "Variances": var}, + attrs={ + 'min_sizes': min_sizes, + 'max_sizes': max_sizes, + 'aspect_ratios': aspect_ratios, + 'variances': variance, + 'flip': flip, + 'clip': clip, + 'step_w': step_w, + 'step_h': step_h, + 'offset': offset + }) + return box, var + + def _reshape_with_axis_(input, axis=1): + if not (axis > 0 and axis < len(input.shape)): + raise ValueError( + "The axis should be smaller than the arity of input's shape.") + new_shape = [-1, reduce(mul, input.shape[axis:len(input.shape)], 1)] + out = reshape([input], shape=new_shape) + return out + assert isinstance(inputs, list), 'inputs should be a list.' num_layer = len(inputs) - assert num_layer > 2 # TODO(zcd): currently, num_layer must be bigger than two. - min_sizes = [] - max_sizes = [] - if num_layer > 2: + if num_layer <= 2: + assert min_sizes is not None and max_sizes is not None + assert len(min_sizes) == num_layer and len(max_sizes) == num_layer + else: + min_sizes = [] + max_sizes = [] step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2))) for ratio in xrange(min_ratio, max_ratio + 1, step): min_sizes.append(base_size * ratio / 100.) @@ -199,21 +161,29 @@ def prior_boxes(inputs, min_sizes = [base_size * .10] + min_sizes max_sizes = [base_size * .20] + max_sizes + if aspect_ratios: + if not (isinstance(aspect_ratios, list) and + len(aspect_ratios) == num_layer): + raise ValueError( + 'aspect_ratios should be list and the length of inputs ' + 'and aspect_ratios should be the same.') if step_h: - assert isinstance(step_h,list) and len(step_h) == num_layer, \ - 'step_h should be list and inputs and step_h should have same length' + if not (isinstance(step_h, list) and len(step_h) == num_layer): + raise ValueError( + 'step_h should be list and the length of inputs and ' + 'step_h should be the same.') if step_w: - assert isinstance(step_w,list) and len(step_w) == num_layer, \ - 'step_w should be list and inputs and step_w should have same length' + if not (isinstance(step_w, list) and len(step_w) == num_layer): + raise ValueError( + 'step_w should be list and the length of inputs and ' + 'step_w should be the same.') if steps: - assert isinstance(steps,list) and len(steps) == num_layer, \ - 'steps should be list and inputs and step_w should have same length' + if not (isinstance(steps, list) and len(steps) == num_layer): + raise ValueError( + 'steps should be list and the length of inputs and ' + 'step_w should be the same.') step_w = steps step_h = steps - if aspect_ratios: - assert isinstance(aspect_ratios, list) and len(aspect_ratios) == num_layer, \ - 'aspect_ratios should be list and inputs and aspect_ratios should ' \ - 'have same length' box_results = [] var_results = [] @@ -230,10 +200,10 @@ def prior_boxes(inputs, if not isinstance(aspect_ratio, list): aspect_ratio = [aspect_ratio] - box, var = prior_box(input, image, min_size, max_size, aspect_ratio, - variance, flip, clip, step_w[i] - if step_w else 0.0, step_h[i] - if step_w else 0.0, offset) + box, var = _prior_box_(input, image, min_size, max_size, aspect_ratio, + variance, flip, clip, step_w[i] + if step_w else 0.0, step_h[i] + if step_w else 0.0, offset) box_results.append(box) var_results.append(var) @@ -242,17 +212,11 @@ def prior_boxes(inputs, box = box_results[0] var = var_results[0] else: - axis = 3 reshaped_boxes = [] reshaped_vars = [] for i in range(len(box_results)): - reshaped_boxes += [flatten(box_results[i], axis=3)] - reshaped_vars += [flatten(var_results[i], axis=3)] - - helper = LayerHelper("concat", **locals()) - dtype = helper.input_dtype() - box = helper.create_tmp_variable(dtype) - var = helper.create_tmp_variable(dtype) + reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3)) + reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3)) box = concat(reshaped_boxes) var = concat(reshaped_vars) diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index 4d2de38c35..5ebd329fc0 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -21,8 +21,6 @@ from ..framework import Variable from ..param_attr import ParamAttr from layer_function_generator import autodoc from tensor import concat -import math -from operator import mul __all__ = [ 'fc', @@ -66,8 +64,6 @@ __all__ = [ 'nce', 'beam_search', 'row_conv', - 'reshape_with_axis', - 'flatten', 'multiplex', 'layer_norm', ] @@ -3095,86 +3091,3 @@ def multiplex(inputs, index): 'Ids': index}, outputs={'Out': [out]}) return out - - -def reshape_with_axis(input, axis): - """ - **ReshapeWithAxis Layer** - - ReshapeWithAxis is used to merge adjacent dimensions according to axis. - - Args: - input(variable): The input tensor. - axis(list): The axis which is used to merge the adjacent dimensions. - - Returns: - Variable: A tensor variable. - - Examples: - .. code-block:: python - - x = fluid.layers.data(name="data", shape=[3, 32, 32], dtype="float32") - reshaped = fluid.layers.reshape_with_axis(input=x, axis=[2]) - reshaped.shape - >> [-1, 1024] - reshaped = fluid.layers.reshape_with_axis(input=x, axis=[1,3]) - reshaped.shape - >> [-1, 96, 32] - """ - assert isinstance(axis, list), "axis should be list." - assert len(input.shape) > len( - axis), "the length of axis should be litter than input.shape's." - input_shape = input.shape - temp = 0 - for ax in axis: - assert ax < len(input.shape) and ax > 0, \ - 'The data of Axis should be between 1 and len(input.shape)' - assert ax > temp, 'Axis should be incremented sequence' - temp = ax - axis += [len(input.shape)] - - new_shape = [] - for i in range(len(axis) - 1): - new_shape += [reduce(mul, input_shape[axis[i]:axis[i + 1]], 1)] - new_shape = [-1] + new_shape - - helper = LayerHelper('reshape', **locals()) - out = helper.create_tmp_variable(helper.input_dtype()) - helper.append_op( - type='reshape', - inputs={'X': [input]}, - outputs={'Out': [out]}, - attrs={'shape': new_shape}) - return out - - -def flatten(input, axis=1): - """ - **Flatten Layer** - ReshapeWithAxis is used to merge adjacent dimensions according to axis. - Args: - input(variable): The input tensor. - axis(int): - Returns: - Variable: A tensor variable. - Examples: - .. code-block:: python - x = fluid.layers.data(name="data", shape=[3, 32, 32], dtype="float32") - reshaped = fluid.layers.reshape_with_axis(input=x, axis=2) - reshaped.shape - >> [-1, 1024] - """ - assert len(input.shape) > axis and axis > 0, \ - "the axis should be litter than input.shape's." - input_shape = input.shape - - new_shape = [-1, reduce(mul, input_shape[axis:len(input_shape)], 1)] - - helper = LayerHelper('reshape', **locals()) - out = helper.create_tmp_variable(helper.input_dtype()) - helper.append_op( - type='reshape', - inputs={'X': [input]}, - outputs={'Out': [out]}, - attrs={'shape': new_shape}) - return out diff --git a/python/paddle/v2/fluid/tests/object_detection/test_prior_boxes.py b/python/paddle/v2/fluid/tests/object_detection/test_prior_boxes.py index 50b5249d98..1b093c6463 100644 --- a/python/paddle/v2/fluid/tests/object_detection/test_prior_boxes.py +++ b/python/paddle/v2/fluid/tests/object_detection/test_prior_boxes.py @@ -51,15 +51,15 @@ def main(use_cuda): if use_cuda: # prior_box only support CPU. return - box, var = prior_box_output(data_shape=[3, 224, 224]) + data_shape = [3, 224, 224] + box, var = prior_box_output(data_shape) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) batch = [128] - for i in range(1): - # print("iteration : %d" % i) + for _ in range(1): x = np.random.random(batch + data_shape).astype("float32") tensor_x = core.LoDTensor() tensor_x.set(x, place) @@ -75,12 +75,10 @@ def main(use_cuda): class TestFitALine(unittest.TestCase): def test_cpu(self): - with self.program_scope_guard(): - main(use_cuda=False) + main(use_cuda=False) def test_cuda(self): - with self.program_scope_guard(): - main(use_cuda=True) + main(use_cuda=True) if __name__ == '__main__': -- GitLab From 892cc28c7b7c77ead20f17d5644a4e9482906404 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sun, 11 Feb 2018 20:03:07 +0800 Subject: [PATCH 067/217] Fix bug --- paddle/fluid/framework/mixed_vector.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/framework/mixed_vector.h b/paddle/fluid/framework/mixed_vector.h index a06e34d551..6e5ceefadd 100644 --- a/paddle/fluid/framework/mixed_vector.h +++ b/paddle/fluid/framework/mixed_vector.h @@ -106,9 +106,11 @@ class Vector { // std::vector iterator methods. Based on CPU data access method size_t size() const { return size_; } - T* begin() { return size() == 0 ? &EmptyDummy() : &this->operator[](0); } + T* begin() { return capacity() == 0 ? &EmptyDummy() : &this->operator[](0); } - T* end() { return size() == 0 ? &EmptyDummy() : &this->operator[](size()); } + T* end() { + return capacity() == 0 ? &EmptyDummy() : &this->operator[](size()); + } T& front() { return *begin(); } @@ -119,11 +121,11 @@ class Vector { } const T* begin() const { - return size() == 0 ? &EmptyDummy() : &this->operator[](0); + return capacity() == 0 ? &EmptyDummy() : &this->operator[](0); } const T* end() const { - return size() == 0 ? &EmptyDummy() : &this->operator[](size()); + return capacity() == 0 ? &EmptyDummy() : &this->operator[](size()); } const T* cbegin() const { return begin(); } -- GitLab From 1dceb99e86052355fea7c83d9f636ea681aa8d18 Mon Sep 17 00:00:00 2001 From: Yuan Gao Date: Sun, 11 Feb 2018 20:17:26 +0800 Subject: [PATCH 068/217] add detection output python api (#8389) --- python/paddle/v2/fluid/layers/__init__.py | 3 + python/paddle/v2/fluid/layers/detection.py | 116 ++++++++++++++++++ .../paddle/v2/fluid/tests/test_detection.py | 53 ++++++++ 3 files changed, 172 insertions(+) create mode 100644 python/paddle/v2/fluid/layers/detection.py create mode 100644 python/paddle/v2/fluid/tests/test_detection.py diff --git a/python/paddle/v2/fluid/layers/__init__.py b/python/paddle/v2/fluid/layers/__init__.py index a83dd3db74..89b9f30668 100644 --- a/python/paddle/v2/fluid/layers/__init__.py +++ b/python/paddle/v2/fluid/layers/__init__.py @@ -16,6 +16,8 @@ import ops from ops import * import nn from nn import * +import detection +from detection import * import io from io import * import tensor @@ -28,6 +30,7 @@ import math_op_patch from math_op_patch import * __all__ = [] +__all__ += detection.__all__ __all__ += nn.__all__ __all__ += io.__all__ __all__ += tensor.__all__ diff --git a/python/paddle/v2/fluid/layers/detection.py b/python/paddle/v2/fluid/layers/detection.py new file mode 100644 index 0000000000..054443cb43 --- /dev/null +++ b/python/paddle/v2/fluid/layers/detection.py @@ -0,0 +1,116 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +All layers just related to the detection neural network. +""" + +from ..layer_helper import LayerHelper + +__all__ = ['detection_output', ] + + +def detection_output(scores, + loc, + prior_box, + prior_box_var, + background_label=0, + nms_threshold=0.3, + nms_top_k=400, + keep_top_k=200, + score_threshold=0.01, + nms_eta=1.0): + """ + **Detection Output Layer** + + This layer applies the NMS to the output of network and computes the + predict bounding box location. The output's shape of this layer could + be zero if there is no valid bounding box. + + Args: + scores(Variable): A 3-D Tensor with shape [N, C, M] represents the + predicted confidence predictions. N is the batch size, C is the + class number, M is number of bounding boxes. For each category + there are total M scores which corresponding M bounding boxes. + loc(Variable): A 3-D Tensor with shape [N, M, 4] represents the + predicted locations of M bounding bboxes. N is the batch size, + and each bounding box has four coordinate values and the layout + is [xmin, ymin, xmax, ymax]. + prior_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes, + each box is represented as [xmin, ymin, xmax, ymax], + [xmin, ymin] is the left top coordinate of the anchor box, + if the input is image feature map, they are close to the origin + of the coordinate system. [xmax, ymax] is the right bottom + coordinate of the anchor box. + prior_box_var(Variable): A 2-D Tensor with shape [M, 4] holds M group + of variance. + background_label(float): The index of background label, + the background label will be ignored. If set to -1, then all + categories will be considered. + nms_threshold(float): The threshold to be used in NMS. + nms_top_k(int): Maximum number of detections to be kept according + to the confidences aftern the filtering detections based on + score_threshold. + keep_top_k(int): Number of total bboxes to be kept per image after + NMS step. -1 means keeping all bboxes after NMS step. + score_threshold(float): Threshold to filter out bounding boxes with + low confidence score. If not provided, consider all boxes. + nms_eta(float): The parameter for adaptive NMS. + + Returns: + The detected bounding boxes which are a Tensor. + + Examples: + .. code-block:: python + + pb = layers.data(name='prior_box', shape=[10, 4], + append_batch_size=False, dtype='float32') + pbv = layers.data(name='prior_box_var', shape=[10, 4], + append_batch_size=False, dtype='float32') + loc = layers.data(name='target_box', shape=[21, 4], + append_batch_size=False, dtype='float32') + scores = layers.data(name='scores', shape=[2, 21, 10], + append_batch_size=False, dtype='float32') + nmsed_outs = fluid.layers.detection_output(scores=scores, + loc=loc, + prior_box=pb, + prior_box_var=pbv) + """ + + helper = LayerHelper("detection_output", **locals()) + decoded_box = helper.create_tmp_variable(dtype=loc.dtype) + helper.append_op( + type="box_coder", + inputs={ + 'PriorBox': prior_box, + 'PriorBoxVar': prior_box_var, + 'TargetBox': loc + }, + outputs={'OutputBox': decoded_box}, + attrs={'code_type': 'decode_center_size'}) + nmsed_outs = helper.create_tmp_variable(dtype=decoded_box.dtype) + + helper.append_op( + type="multiclass_nms", + inputs={'Scores': scores, + 'BBoxes': decoded_box}, + outputs={'Out': nmsed_outs}, + attrs={ + 'background_label': 0, + 'nms_threshold': nms_threshold, + 'nms_top_k': nms_top_k, + 'keep_top_k': keep_top_k, + 'score_threshold': score_threshold, + 'nms_eta': 1.0 + }) + return nmsed_outs diff --git a/python/paddle/v2/fluid/tests/test_detection.py b/python/paddle/v2/fluid/tests/test_detection.py new file mode 100644 index 0000000000..75498ad770 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_detection.py @@ -0,0 +1,53 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import unittest + +import paddle.v2.fluid.layers as layers +from paddle.v2.fluid.framework import Program, program_guard + + +class TestBook(unittest.TestCase): + def test_detection_output(self): + program = Program() + with program_guard(program): + pb = layers.data( + name='prior_box', + shape=[10, 4], + append_batch_size=False, + dtype='float32') + pbv = layers.data( + name='prior_box_var', + shape=[10, 4], + append_batch_size=False, + dtype='float32') + loc = layers.data( + name='target_box', + shape=[20, 4], + append_batch_size=False, + dtype='float32') + scores = layers.data( + name='scores', + shape=[2, 20, 10], + append_batch_size=False, + dtype='float32') + out = layers.detection_output( + scores=scores, loc=loc, prior_box=pb, prior_box_var=pbv) + self.assertIsNotNone(out) + print(str(program)) + + +if __name__ == '__main__': + unittest.main() -- GitLab From 77a6e1c670aece73d208222e483a76fbfe361cd6 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sun, 11 Feb 2018 21:28:14 +0800 Subject: [PATCH 069/217] Disable unstable tests --- .../{test_rnn_encoder_decoder.py => notest_rnn_encoder_decoer.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename python/paddle/v2/fluid/tests/book/{test_rnn_encoder_decoder.py => notest_rnn_encoder_decoer.py} (100%) diff --git a/python/paddle/v2/fluid/tests/book/test_rnn_encoder_decoder.py b/python/paddle/v2/fluid/tests/book/notest_rnn_encoder_decoer.py similarity index 100% rename from python/paddle/v2/fluid/tests/book/test_rnn_encoder_decoder.py rename to python/paddle/v2/fluid/tests/book/notest_rnn_encoder_decoer.py -- GitLab From 6f625f9c2f8861c1ec4c345e6abc33b3936cc080 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sun, 11 Feb 2018 22:35:33 +0800 Subject: [PATCH 070/217] Disable unstable unittest --- paddle/fluid/inference/tests/book/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/inference/tests/book/CMakeLists.txt b/paddle/fluid/inference/tests/book/CMakeLists.txt index 9fe76afb58..cddd5a786c 100644 --- a/paddle/fluid/inference/tests/book/CMakeLists.txt +++ b/paddle/fluid/inference/tests/book/CMakeLists.txt @@ -29,6 +29,6 @@ inference_test(image_classification ARGS vgg resnet) inference_test(label_semantic_roles) inference_test(recognize_digits ARGS mlp) inference_test(recommender_system) -inference_test(rnn_encoder_decoder) +#inference_test(rnn_encoder_decoder) inference_test(understand_sentiment) inference_test(word2vec) -- GitLab From bbff442eee03df799edc74bc354ff16ad77684ca Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Sun, 11 Feb 2018 22:19:14 +0800 Subject: [PATCH 071/217] follow comments of qingqing --- python/paddle/v2/fluid/layers/detection.py | 47 +++++++++---------- .../test_prior_boxes.py | 22 +++++---- 2 files changed, 35 insertions(+), 34 deletions(-) rename python/paddle/v2/fluid/tests/{object_detection => }/test_prior_boxes.py (84%) diff --git a/python/paddle/v2/fluid/layers/detection.py b/python/paddle/v2/fluid/layers/detection.py index cc38796042..657f3e22fb 100644 --- a/python/paddle/v2/fluid/layers/detection.py +++ b/python/paddle/v2/fluid/layers/detection.py @@ -19,30 +19,28 @@ from ..layer_helper import LayerHelper from ..framework import Variable from tensor import concat from ops import reshape +from operator import mul import math -__all__ = [ - 'prior_box', - 'prior_boxes', -] - - -def prior_boxes(inputs, - image, - min_ratio, - max_ratio, - aspect_ratios, - base_size, - steps=None, - step_w=None, - step_h=None, - offset=0.5, - variance=[0.1, 0.1, 0.1, 0.1], - flip=False, - clip=False, - min_sizes=None, - max_sizes=None, - name=None): +__all__ = ['prior_box', ] + + +def prior_box(inputs, + image, + min_ratio, + max_ratio, + aspect_ratios, + base_size, + steps=None, + step_w=None, + step_h=None, + offset=0.5, + variance=[0.1, 0.1, 0.1, 0.1], + flip=False, + clip=False, + min_sizes=None, + max_sizes=None, + name=None): """ **Prior_boxes** @@ -140,9 +138,10 @@ def prior_boxes(inputs, def _reshape_with_axis_(input, axis=1): if not (axis > 0 and axis < len(input.shape)): raise ValueError( - "The axis should be smaller than the arity of input's shape.") + "The axis should be smaller than the arity of input and bigger than 0." + ) new_shape = [-1, reduce(mul, input.shape[axis:len(input.shape)], 1)] - out = reshape([input], shape=new_shape) + out = reshape(x=input, shape=new_shape) return out assert isinstance(inputs, list), 'inputs should be a list.' diff --git a/python/paddle/v2/fluid/tests/object_detection/test_prior_boxes.py b/python/paddle/v2/fluid/tests/test_prior_boxes.py similarity index 84% rename from python/paddle/v2/fluid/tests/object_detection/test_prior_boxes.py rename to python/paddle/v2/fluid/tests/test_prior_boxes.py index 1b093c6463..74d292020c 100644 --- a/python/paddle/v2/fluid/tests/object_detection/test_prior_boxes.py +++ b/python/paddle/v2/fluid/tests/test_prior_boxes.py @@ -33,7 +33,7 @@ def prior_box_output(data_shape): conv5 = fluid.layers.conv2d( input=conv4, num_filters=3, filter_size=3, stride=2, use_cudnn=False) - box, var = detection.prior_boxes( + box, var = detection.prior_box( inputs=[conv1, conv2, conv3, conv4, conv5, conv5], image=images, min_ratio=20, @@ -57,20 +57,22 @@ def main(use_cuda): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - batch = [128] + batch = [4] # batch is not used in the prior_box. + + assert box.shape[1] == 4 + assert var.shape[1] == 4 + assert box.shape == var.shape + assert len(box.shape) == 2 for _ in range(1): x = np.random.random(batch + data_shape).astype("float32") tensor_x = core.LoDTensor() tensor_x.set(x, place) - box, var = exe.run(fluid.default_main_program(), - feed={'pixel': tensor_x}, - fetch_list=[box, var]) - box_arr = np.array(box) - var_arr = np.array(var) - assert box_arr.shape[1] == 4 - assert var_arr.shape[1] == 4 - assert box_arr.shape[0] == var_arr.shape[0] + boxes, vars = exe.run(fluid.default_main_program(), + feed={'pixel': tensor_x}, + fetch_list=[box, var]) + assert vars.shape == var.shape + assert boxes.shape == box.shape class TestFitALine(unittest.TestCase): -- GitLab From afe63228682aa43518f6df5d183a62fa79fbcce7 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Mon, 12 Feb 2018 09:50:13 +0800 Subject: [PATCH 072/217] follow comment --- doc/build_and_install/index_cn.rst | 1 - doc/build_and_install/index_en.rst | 2 -- 2 files changed, 3 deletions(-) diff --git a/doc/build_and_install/index_cn.rst b/doc/build_and_install/index_cn.rst index 4220ff2279..c0b60f5589 100644 --- a/doc/build_and_install/index_cn.rst +++ b/doc/build_and_install/index_cn.rst @@ -13,7 +13,6 @@ PaddlePaddle提供pip和Docker的安装方式: pip_install_cn.rst docker_install_cn.rst - build_cn.md 编译流程 ++++++++ diff --git a/doc/build_and_install/index_en.rst b/doc/build_and_install/index_en.rst index db6b5be742..7e0ca5bcbd 100644 --- a/doc/build_and_install/index_en.rst +++ b/doc/build_and_install/index_en.rst @@ -13,8 +13,6 @@ You can choose either pip or Docker to complete your install: pip_install_en.rst docker_install_en.rst - build_en.md - Build from Source ----------------- -- GitLab From 9a05c9075043345e34b4461ded2ce92ba6501ae4 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Mon, 12 Feb 2018 10:38:31 +0800 Subject: [PATCH 073/217] fix StridedNumelCopyWithAxis --- paddle/fluid/operators/strided_memcpy.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/operators/strided_memcpy.h b/paddle/fluid/operators/strided_memcpy.h index 385124305e..4036d1091d 100644 --- a/paddle/fluid/operators/strided_memcpy.h +++ b/paddle/fluid/operators/strided_memcpy.h @@ -58,6 +58,7 @@ inline void StridedNumelCopyWithAxis(const platform::DeviceContext& ctx, int64_t before = dst_stride_numel[0] / dst_stride_numel[axis]; int64_t src_after = src_stride_numel[axis]; int64_t dst_after = dst_stride_numel[axis]; + int64_t copy_size = std::min(src_after, dst_after); auto place = ctx.GetPlace(); PADDLE_ENFORCE_EQ(src_stride_numel.size(), dst_stride_numel.size(), @@ -82,14 +83,14 @@ inline void StridedNumelCopyWithAxis(const platform::DeviceContext& ctx, if (platform::is_cpu_place(place)) { auto& cpu_place = boost::get(place); memory::Copy(cpu_place, dst + i * dst_after, cpu_place, - src + i * src_after, sizeof(T) * src_after); + src + i * src_after, sizeof(T) * copy_size); } else { #ifdef PADDLE_WITH_CUDA auto& gpu_place = boost::get(place); auto& cuda_ctx = reinterpret_cast(ctx); memory::Copy(gpu_place, dst + i * dst_after, gpu_place, - src + i * src_after, sizeof(T) * src_after, + src + i * src_after, sizeof(T) * copy_size, cuda_ctx.stream()); #else PADDLE_THROW("Paddle is not compiled with GPU"); -- GitLab From 91a2188301b82151560c59501cca45785d34cfcb Mon Sep 17 00:00:00 2001 From: wanghaox Date: Mon, 12 Feb 2018 10:39:59 +0800 Subject: [PATCH 074/217] update detection_map --- paddle/fluid/operators/detection_map_op.cc | 98 ++++++++++++------- paddle/fluid/operators/detection_map_op.h | 44 ++++----- .../v2/fluid/tests/test_detection_map_op.py | 14 ++- 3 files changed, 87 insertions(+), 69 deletions(-) diff --git a/paddle/fluid/operators/detection_map_op.cc b/paddle/fluid/operators/detection_map_op.cc index cc4b6202c0..48308a11b4 100644 --- a/paddle/fluid/operators/detection_map_op.cc +++ b/paddle/fluid/operators/detection_map_op.cc @@ -24,25 +24,28 @@ class DetectionMAPOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Detection"), - "Input(Detection) of DetectionMAPOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("DetectRes"), + "Input(DetectRes) of DetectionMAPOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) of DetectionMAPOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("OutPosCount"), - "Output(OutPosCount) of DetectionMAPOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("OutTruePos"), - "Output(OutTruePos) of DetectionMAPOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("OutFalsePos"), - "Output(OutFalsePos) of DetectionMAPOp should not be null."); + PADDLE_ENFORCE( + ctx->HasOutput("AccumPosCount"), + "Output(AccumPosCount) of DetectionMAPOp should not be null."); + PADDLE_ENFORCE( + ctx->HasOutput("AccumTruePos"), + "Output(AccumTruePos) of DetectionMAPOp should not be null."); + PADDLE_ENFORCE( + ctx->HasOutput("AccumFalsePos"), + "Output(AccumFalsePos) of DetectionMAPOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("MAP"), "Output(MAP) of DetectionMAPOp should not be null."); - auto det_dims = ctx->GetInputDim("Detection"); + auto det_dims = ctx->GetInputDim("DetectRes"); PADDLE_ENFORCE_EQ(det_dims.size(), 2UL, - "The rank of Input(Detection) must be 2, " + "The rank of Input(DetectRes) must be 2, " "the shape is [N, 6]."); PADDLE_ENFORCE_EQ(det_dims[1], 6UL, - "The shape is of Input(Detection) [N, 6]."); + "The shape is of Input(DetectRes) [N, 6]."); auto label_dims = ctx->GetInputDim("Label"); PADDLE_ENFORCE_EQ(label_dims.size(), 2UL, "The rank of Input(Label) must be 2, " @@ -50,8 +53,17 @@ class DetectionMAPOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(label_dims[1], 6UL, "The shape is of Input(Label) [N, 6]."); - auto map_dim = framework::make_ddim({1}); - ctx->SetOutputDim("MAP", map_dim); + if (ctx->HasInput("PosCount")) { + PADDLE_ENFORCE(ctx->HasInput("TruePos"), + "Input(TruePos) of DetectionMAPOp should not be null when " + "Input(TruePos) is not null."); + PADDLE_ENFORCE( + ctx->HasInput("FalsePos"), + "Input(FalsePos) of DetectionMAPOp should not be null when " + "Input(FalsePos) is not null."); + } + + ctx->SetOutputDim("MAP", framework::make_ddim({1})); } protected: @@ -59,7 +71,7 @@ class DetectionMAPOp : public framework::OperatorWithKernel { const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType( - ctx.Input("Detection")->type()), + ctx.Input("DetectRes")->type()), ctx.device_context()); } }; @@ -68,6 +80,14 @@ class DetectionMAPOpMaker : public framework::OpProtoAndCheckerMaker { public: DetectionMAPOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("DetectRes", + "(LoDTensor) A 2-D LoDTensor with shape [M, 6] represents the " + "detections. Each row has 6 values: " + "[label, confidence, xmin, ymin, xmax, ymax], M is the total " + "number of detect results in this mini-batch. For each instance, " + "the offsets in first dimension are called LoD, the number of " + "offset is N + 1, if LoD[i + 1] - LoD[i] == 0, means there is " + "no detected data."); AddInput("Label", "(LoDTensor) A 2-D LoDTensor with shape[N, 6] represents the" "Labeled ground-truth data. Each row has 6 values: " @@ -76,38 +96,43 @@ class DetectionMAPOpMaker : public framework::OpProtoAndCheckerMaker { "instance, the offsets in first dimension are called LoD, " "the number of offset is N + 1, if LoD[i + 1] - LoD[i] == 0, " "means there is no ground-truth data."); - AddInput("Detection", - "(LoDTensor) A 2-D LoDTensor with shape [M, 6] represents the " - "detections. Each row has 6 values: " - "[label, confidence, xmin, ymin, xmax, ymax], M is the total " - "number of detections in this mini-batch. For each instance, " - "the offsets in first dimension are called LoD, the number of " - "offset is N + 1, if LoD[i + 1] - LoD[i] == 0, means there is " - "no detected data."); AddInput("PosCount", "(Tensor) A tensor with shape [Ncls, 1], store the " - "input positive example count of each class.") + "input positive example count of each class, Ncls is the count of " + "input classification. " + "This input is used to pass the AccumPosCount generated by the " + "previous mini-batch when the multi mini-batches cumulative " + "calculation carried out. " + "When the input(PosCount) is empty, the cumulative " + "calculation is not carried out, and only the results of the " + "current mini-batch are calculated.") .AsDispensable(); AddInput("TruePos", - "(LodTensor) A 2-D LodTensor with shape [Ntp, 2], store the " - "input true positive example of each class.") + "(LoDTensor) A 2-D LoDTensor with shape [Ntp, 2], store the " + "input true positive example of each class." + "This input is used to pass the AccumTruePos generated by the " + "previous mini-batch when the multi mini-batches cumulative " + "calculation carried out. ") .AsDispensable(); AddInput("FalsePos", - "(LodTensor) A 2-D LodTensor with shape [Nfp, 2], store the " - "input false positive example of each class.") + "(LoDTensor) A 2-D LoDTensor with shape [Nfp, 2], store the " + "input false positive example of each class." + "This input is used to pass the AccumFalsePos generated by the " + "previous mini-batch when the multi mini-batches cumulative " + "calculation carried out. ") .AsDispensable(); - AddOutput("OutPosCount", + AddOutput("AccumPosCount", "(Tensor) A tensor with shape [Ncls, 1], store the " "positive example count of each class. It combines the input " "input(PosCount) and the positive example count computed from " "input(Detection) and input(Label)."); - AddOutput("OutTruePos", - "(LodTensor) A LodTensor with shape [Ntp', 2], store the " + AddOutput("AccumTruePos", + "(LoDTensor) A LoDTensor with shape [Ntp', 2], store the " "true positive example of each class. It combines the " "input(TruePos) and the true positive examples computed from " "input(Detection) and input(Label)."); - AddOutput("OutFalsePos", - "(LodTensor) A LodTensor with shape [Nfp', 2], store the " + AddOutput("AccumFalsePos", + "(LoDTensor) A LoDTensor with shape [Nfp', 2], store the " "false positive example of each class. It combines the " "input(FalsePos) and the false positive examples computed from " "input(Detection) and input(Label)."); @@ -115,10 +140,11 @@ class DetectionMAPOpMaker : public framework::OpProtoAndCheckerMaker { "(Tensor) A tensor with shape [1], store the mAP evaluate " "result of the detection."); - AddAttr("overlap_threshold", - "(float) " - "The jaccard overlap threshold of detection output and " - "ground-truth data.") + AddAttr( + "overlap_threshold", + "(float) " + "The lower bound jaccard overlap threshold of detection output and " + "ground-truth data.") .SetDefault(.3f); AddAttr("evaluate_difficult", "(bool, default true) " diff --git a/paddle/fluid/operators/detection_map_op.h b/paddle/fluid/operators/detection_map_op.h index 0379a3328a..0f5f588e9c 100644 --- a/paddle/fluid/operators/detection_map_op.h +++ b/paddle/fluid/operators/detection_map_op.h @@ -54,7 +54,7 @@ template class DetectionMAPOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - auto* in_detect = ctx.Input("Detection"); + auto* in_detect = ctx.Input("DetectRes"); auto* in_label = ctx.Input("Label"); auto* out_map = ctx.Output("MAP"); @@ -62,9 +62,9 @@ class DetectionMAPOpKernel : public framework::OpKernel { auto* in_true_pos = ctx.Input("TruePos"); auto* in_false_pos = ctx.Input("FalsePos"); - auto* out_pos_count = ctx.Output("OutPosCount"); - auto* out_true_pos = ctx.Output("OutTruePos"); - auto* out_false_pos = ctx.Output("OutFalsePos"); + auto* out_pos_count = ctx.Output("AccumPosCount"); + auto* out_true_pos = ctx.Output("AccumTruePos"); + auto* out_false_pos = ctx.Output("AccumFalsePos"); float overlap_threshold = ctx.Attr("overlap_threshold"); float evaluate_difficult = ctx.Attr("evaluate_difficult"); @@ -265,28 +265,22 @@ class DetectionMAPOpKernel : public framework::OpKernel { label_pos_count[i] = pos_count_data[i]; } - const T* true_pos_data = input_true_pos.data(); - auto true_pos_data_lod = input_true_pos.lod(); - for (int i = 0; i < true_pos_data_lod.size(); ++i) { - for (int j = true_pos_data_lod[0][i]; j < true_pos_data_lod[0][i + 1]; - ++j) { - T score = true_pos_data[j * 2]; - int flag = 1; - if (true_pos_data[j * 2 + 1] < kEPS) flag = 0; - true_pos[i].push_back(std::make_pair(score, flag)); - } - } - const T* false_pos_data = input_false_pos.data(); - auto false_pos_data_lod = input_false_pos.lod(); - for (int i = 0; i < false_pos_data_lod.size(); ++i) { - for (int j = false_pos_data_lod[0][i]; j < false_pos_data_lod[0][i + 1]; - ++j) { - T score = false_pos_data[j * 2]; - int flag = 1; - if (false_pos_data[j * 2 + 1] < kEPS) flag = 0; - false_pos[i].push_back(std::make_pair(score, flag)); + auto SetData = [](const framework::LoDTensor& pos_tensor, + std::map>>& pos) { + const T* pos_data = pos_tensor.data(); + auto pos_data_lod = pos_tensor.lod(); + for (int i = 0; i < pos_data_lod.size(); ++i) { + for (int j = pos_data_lod[0][i]; j < pos_data_lod[0][i + 1]; ++j) { + T score = pos_data[j * 2]; + int flag = 1; + if (pos_data[j * 2 + 1] < kEPS) flag = 0; + pos[i].push_back(std::make_pair(score, flag)); + } } - } + }; + + SetData(input_true_pos, true_pos); + SetData(input_false_pos, false_pos); return; } diff --git a/python/paddle/v2/fluid/tests/test_detection_map_op.py b/python/paddle/v2/fluid/tests/test_detection_map_op.py index ec57ca4ad5..70ccd885d8 100644 --- a/python/paddle/v2/fluid/tests/test_detection_map_op.py +++ b/python/paddle/v2/fluid/tests/test_detection_map_op.py @@ -37,7 +37,7 @@ class TestDetectionMAPOp(OpTest): self.inputs = { 'Label': (self.label, self.label_lod), - 'Detection': (self.detect, self.detect_lod), + 'DetectRes': (self.detect, self.detect_lod), 'PosCount': self.class_pos_count, 'TruePos': (self.true_pos, self.true_pos_lod), 'FalsePos': (self.false_pos, self.false_pos_lod) @@ -45,7 +45,7 @@ class TestDetectionMAPOp(OpTest): else: self.inputs = { 'Label': (self.label, self.label_lod), - 'Detection': (self.detect, self.detect_lod), + 'DetectRes': (self.detect, self.detect_lod), } self.attrs = { @@ -61,9 +61,9 @@ class TestDetectionMAPOp(OpTest): self.outputs = { 'MAP': self.mAP, - 'OutPosCount': self.out_class_pos_count, - 'OutTruePos': (self.out_true_pos, self.out_true_pos_lod), - 'OutFalsePos': (self.out_false_pos, self.out_false_pos_lod) + 'AccumPosCount': self.out_class_pos_count, + 'AccumTruePos': (self.out_true_pos, self.out_true_pos_lod), + 'AccumFalsePos': (self.out_false_pos, self.out_false_pos_lod) } def init_test_case(self): @@ -175,9 +175,7 @@ class TestDetectionMAPOp(OpTest): false_pos[label].append([score, fp]) for (label, label_pos_num) in label_count.items(): - if label_pos_num == 0 or label not in true_pos: - continue - + if label_pos_num == 0 or label not in true_pos: continue label_true_pos = true_pos[label] label_false_pos = false_pos[label] -- GitLab From 49c50c9fda76f0e3ca7b157ab84cf03919b23166 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Sun, 11 Feb 2018 15:22:00 +0800 Subject: [PATCH 075/217] Add multiBox API --- python/paddle/v2/fluid/layers/detection.py | 159 +++++++++++++++++- python/paddle/v2/fluid/nets.py | 33 ++++ .../paddle/v2/fluid/tests/test_detection.py | 61 +++++++ 3 files changed, 251 insertions(+), 2 deletions(-) diff --git a/python/paddle/v2/fluid/layers/detection.py b/python/paddle/v2/fluid/layers/detection.py index 054443cb43..bbe2765e13 100644 --- a/python/paddle/v2/fluid/layers/detection.py +++ b/python/paddle/v2/fluid/layers/detection.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,8 +16,19 @@ All layers just related to the detection neural network. """ from ..layer_helper import LayerHelper +from ..param_attr import ParamAttr +from ..framework import Variable +from layer_function_generator import autodoc +from tensor import concat +from ops import reshape +from ..nets import img_conv_with_bn +from nn import transpose +import math -__all__ = ['detection_output', ] +__all__ = [ + 'detection_output', + 'multi_box_head', +] def detection_output(scores, @@ -114,3 +125,147 @@ def detection_output(scores, 'nms_eta': 1.0 }) return nmsed_outs + + +def multi_box_head(inputs, + num_classes, + min_sizes=None, + max_sizes=None, + min_ratio=None, + max_ratio=None, + aspect_ratios=None, + flip=False, + share_location=True, + kernel_size=1, + pad=1, + stride=1, + use_batchnorm=False, + base_size=None, + name=None): + """ + **Multi Box Head** + + input many Variable, and return mbox_loc, mbox_conf + + Args: + inputs(list): The list of input Variables, the format + of all Variables is NCHW. + num_classes(int): The number of calss. + min_sizes(list, optional, default=None): The length of + min_size is used to compute the the number of prior box. + If the min_size is None, it will be computed according + to min_ratio and max_ratio. + max_sizes(list, optional, default=None): The length of max_size + is used to compute the the number of prior box. + min_ratio(int): If the min_sizes is None, min_ratio and min_ratio + will be used to compute the min_sizes and max_sizes. + max_ratio(int): If the min_sizes is None, min_ratio and min_ratio + will be used to compute the min_sizes and max_sizes. + aspect_ratios(list): The number of the aspect ratios is used to + compute the number of prior box. + base_size(int): the base_size is used to get min_size + and max_size according to min_ratio and max_ratio. + flip(bool, optional, default=False): Whether to flip + aspect ratios. + name(str, optional, None): Name of the prior box layer. + + Returns: + + mbox_loc(Variable): the output prior boxes of PriorBoxOp. The layout is + [num_priors, 4]. num_priors is the total box count of each + position of inputs. + mbox_conf(Variable): the expanded variances of PriorBoxOp. The layout + is [num_priors, 4]. num_priors is the total box count of each + position of inputs + + Examples: + .. code-block:: python + + + """ + + assert isinstance(inputs, list), 'inputs should be a list.' + + if min_sizes is not None: + assert len(inputs) == len(min_sizes) + + if max_sizes is not None: + assert len(inputs) == len(max_sizes) + + if min_sizes is None: + # if min_sizes is None, min_sizes and max_sizes + # will be set according to max_ratio and min_ratio + assert max_ratio is not None and min_ratio is not None + min_sizes = [] + max_sizes = [] + num_layer = len(inputs) + step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2))) + for ratio in xrange(min_ratio, max_ratio + 1, step): + min_sizes.append(base_size * ratio / 100.) + max_sizes.append(base_size * (ratio + step) / 100.) + min_sizes = [base_size * .10] + min_sizes + max_sizes = [base_size * .20] + max_sizes + + if aspect_ratios is not None: + assert len(inputs) == len(aspect_ratios) + + mbox_locs = [] + mbox_confs = [] + for i, input in enumerate(inputs): + min_size = min_sizes[i] + if type(min_size) is not list: + min_size = [min_size] + + max_size = [] + if max_sizes is not None: + max_size = max_sizes[i] + if type(max_size) is not list: + max_size = [max_size] + if max_size: + assert len(max_size) == len( + min_size), "max_size and min_size should have same length." + + aspect_ratio = [] + if aspect_ratios is not None: + aspect_ratio = aspect_ratios[i] + if type(aspect_ratio) is not list: + aspect_ratio = [aspect_ratio] + + num_priors_per_location = 0 + if max_sizes is not None: + num_priors_per_location = len(min_size) + len(aspect_ratio) * len( + min_size) + len(max_size) + else: + num_priors_per_location = len(min_size) + len(aspect_ratio) * len( + min_size) + if flip: + num_priors_per_location += len(aspect_ratio) * len(min_size) + + # mbox_loc + num_loc_output = num_priors_per_location * 4 + if share_location: + num_loc_output *= num_classes + + mbox_loc = img_conv_with_bn( + input=input, + conv_num_filter=num_loc_output, + conv_padding=pad, + conv_stride=stride, + conv_filter_size=kernel_size, + conv_with_batchnorm=use_batchnorm) + mbox_loc = transpose(mbox_loc, perm=[0, 2, 3, 1]) + mbox_locs.append(mbox_loc) + + # get the number of prior box + num_conf_output = num_priors_per_location * num_classes + conf_loc = img_conv_with_bn( + input=input, + conv_num_filter=num_conf_output, + conv_padding=pad, + conv_stride=stride, + conv_filter_size=kernel_size, + conv_with_batchnorm=use_batchnorm) + conf_loc = transpose(conf_loc, perm=[0, 2, 3, 1]) + mbox_confs.append(conf_loc) + + return mbox_locs, mbox_confs diff --git a/python/paddle/v2/fluid/nets.py b/python/paddle/v2/fluid/nets.py index be7878f869..b7deccfd1f 100644 --- a/python/paddle/v2/fluid/nets.py +++ b/python/paddle/v2/fluid/nets.py @@ -18,6 +18,7 @@ __all__ = [ "sequence_conv_pool", "glu", "scaled_dot_product_attention", + "img_conv_with_bn", ] @@ -107,6 +108,38 @@ def img_conv_group(input, return pool_out +def img_conv_with_bn(input, + conv_num_filter, + conv_padding=1, + conv_filter_size=3, + conv_stride=1, + conv_act=None, + param_attr=None, + conv_with_batchnorm=False, + conv_batchnorm_drop_rate=0.0, + use_cudnn=True): + """ + Image Convolution Group, Used for vgg net. + """ + conv2d = layers.conv2d( + input=input, + num_filters=conv_num_filter, + filter_size=conv_filter_size, + padding=conv_padding, + stride=conv_stride, + param_attr=param_attr, + act=conv_act, + use_cudnn=use_cudnn) + + if conv_with_batchnorm: + conv2d = layers.batch_norm(input=conv2d) + drop_rate = conv_batchnorm_drop_rate + if abs(drop_rate) > 1e-5: + conv2d = layers.dropout(x=conv2d, dropout_prob=drop_rate) + + return conv2d + + def sequence_conv_pool(input, num_filters, filter_size, diff --git a/python/paddle/v2/fluid/tests/test_detection.py b/python/paddle/v2/fluid/tests/test_detection.py index 75498ad770..d2207f1bfa 100644 --- a/python/paddle/v2/fluid/tests/test_detection.py +++ b/python/paddle/v2/fluid/tests/test_detection.py @@ -13,7 +13,13 @@ # limitations under the License. from __future__ import print_function +import paddle.v2.fluid as fluid +import paddle.v2.fluid.core as core +import paddle.v2.fluid.layers as layers +import paddle.v2.fluid.layers.detection as detection +from paddle.v2.fluid.framework import Program, program_guard import unittest +import numpy as np import paddle.v2.fluid.layers as layers from paddle.v2.fluid.framework import Program, program_guard @@ -49,5 +55,60 @@ class TestBook(unittest.TestCase): print(str(program)) +class TestMultiBoxHead(unittest.TestCase): + def test_prior_box(self): + data_shape = [3, 224, 224] + mbox_locs, mbox_confs = self.multi_box_output(data_shape) + # print mbox_locs.shape + # print mbox_confs.shape + # assert len(box.shape) == 2 + # assert box.shape == var.shape + # assert box.shape[1] == 4 + + def multi_box_output(self, data_shape): + images = fluid.layers.data( + name='pixel', shape=data_shape, dtype='float32') + conv1 = fluid.layers.conv2d( + input=images, + num_filters=3, + filter_size=3, + stride=2, + use_cudnn=False) + conv2 = fluid.layers.conv2d( + input=conv1, + num_filters=3, + filter_size=3, + stride=2, + use_cudnn=False) + conv3 = fluid.layers.conv2d( + input=conv2, + num_filters=3, + filter_size=3, + stride=2, + use_cudnn=False) + conv4 = fluid.layers.conv2d( + input=conv3, + num_filters=3, + filter_size=3, + stride=2, + use_cudnn=False) + conv5 = fluid.layers.conv2d( + input=conv4, + num_filters=3, + filter_size=3, + stride=2, + use_cudnn=False) + + mbox_locs, mbox_confs = detection.multi_box_head( + inputs=[conv1, conv2, conv3, conv4, conv5, conv5], + num_classes=21, + min_ratio=20, + max_ratio=90, + aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]], + base_size=300, + flip=True) + return mbox_locs, mbox_confs + + if __name__ == '__main__': unittest.main() -- GitLab From 51912a7a77ace39de00e7b8198c37d9c85491614 Mon Sep 17 00:00:00 2001 From: Yancey Date: Mon, 12 Feb 2018 12:48:59 +0800 Subject: [PATCH 076/217] Append cmd in manylinux dockerfile.x86 (#8397) * append cmd in manylinux dockerfile.x86 * add new line --- tools/manylinux1/Dockerfile.x64 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/manylinux1/Dockerfile.x64 b/tools/manylinux1/Dockerfile.x64 index 0f1b833130..93cab692e3 100644 --- a/tools/manylinux1/Dockerfile.x64 +++ b/tools/manylinux1/Dockerfile.x64 @@ -52,3 +52,5 @@ RUN wget -O /opt/swig-2.0.12.tar.gz https://sourceforge.net/projects/swig/files/ RUN mkdir -p /src && cd /src && git clone https://github.com/NVIDIA/nccl.git nccl && cd nccl &&\ make -j `nproc` install && cd .. && rm -rf nccl + +CMD ["bash", "/paddle/paddle/scripts/docker/build.sh"] -- GitLab From 8a0dd2409e7cebe146b8a93103c0de71577bc533 Mon Sep 17 00:00:00 2001 From: qingqing01 Date: Mon, 12 Feb 2018 12:54:24 +0800 Subject: [PATCH 077/217] Expose softmax_with_cross_entropy and smooth_l1 into Python API. (#8375) * Add softmax_with_cross_entropy and smooth_l1 in Python API. * Fix doc format. --- paddle/fluid/operators/smooth_l1_loss_op.cc | 14 +-- python/paddle/v2/fluid/layers/nn.py | 121 ++++++++++++++++++++ python/paddle/v2/fluid/tests/test_layers.py | 18 +++ 3 files changed, 145 insertions(+), 8 deletions(-) diff --git a/paddle/fluid/operators/smooth_l1_loss_op.cc b/paddle/fluid/operators/smooth_l1_loss_op.cc index be4c7a56a8..e6eede23ee 100644 --- a/paddle/fluid/operators/smooth_l1_loss_op.cc +++ b/paddle/fluid/operators/smooth_l1_loss_op.cc @@ -44,7 +44,6 @@ class SmoothL1LossOp : public framework::OperatorWithKernel { } }; -template class SmoothL1LossOpMaker : public framework::OpProtoAndCheckerMaker { public: SmoothL1LossOpMaker(OpProto* proto, OpAttrChecker* op_checker) @@ -73,10 +72,10 @@ class SmoothL1LossOpMaker : public framework::OpProtoAndCheckerMaker { AddOutput("Out", "(Tensor, default Tensor) A tensor with rank be 2. " "The output smooth l1 loss with shape [batch_size, 1]."); - AddAttr("sigma", - "Hyper parameter of smooth l1 loss op." - "A float scalar with default value 3.0.") - .SetDefault(3.0); + AddAttr("sigma", + "Hyper parameter of smooth l1 loss op." + "A float scalar with default value 3.0.") + .SetDefault(1.0); AddComment(R"DOC( Smooth L1 Loss Operator. @@ -133,9 +132,8 @@ class SmoothL1LossGradOp : public framework::OperatorWithKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(smooth_l1_loss, ops::SmoothL1LossOp, - ops::SmoothL1LossOpMaker, smooth_l1_loss_grad, - ops::SmoothL1LossGradOp); +REGISTER_OP(smooth_l1_loss, ops::SmoothL1LossOp, ops::SmoothL1LossOpMaker, + smooth_l1_loss_grad, ops::SmoothL1LossGradOp); REGISTER_OP_CPU_KERNEL( smooth_l1_loss, ops::SmoothL1LossKernel); diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index 5ebd329fc0..051b536818 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -66,6 +66,8 @@ __all__ = [ 'row_conv', 'multiplex', 'layer_norm', + 'softmax_with_cross_entropy', + 'smooth_l1', ] @@ -3091,3 +3093,122 @@ def multiplex(inputs, index): 'Ids': index}, outputs={'Out': [out]}) return out + + +def softmax_with_cross_entropy(logits, label, soft_label=False): + """ + **Softmax With Cross Entropy Operator.** + + Cross entropy loss with softmax is used as the output layer extensively. This + operator computes the softmax normalized values for each row of the input + tensor, after which cross-entropy loss is computed. This provides a more + numerically stable gradient. + + Because this operator performs a softmax on logits internally, it expects + unscaled logits. This operator should not be used with the output of + softmax operator since that would produce incorrect results. + + When the attribute soft_label is set false, this operators expects mutually + exclusive hard labels, each sample in a batch is in exactly one class with a + probability of 1.0. Each sample in the batch will have a single label. + + The equation is as follows: + + 1) Hard label (one-hot label, so every sample has exactly one class) + + .. math:: + + loss_j = -\\text{logit}_{label_j} + + \\log\\left(\\sum_{i=0}^{K}\\exp(\\text{logit}_i)\\right), j = 1,..., K + + 2) Soft label (each sample can have a distribution over all classes) + + .. math:: + + loss_j = -\\sum_{i=0}^{K}\\text{label}_i + \\left(\\text{logit}_i - \\log\\left(\\sum_{i=0}^{K} + \\exp(\\text{logit}_i)\\right)\\right), j = 1,...,K + + Args: + logits (Variable): The unscaled log probabilities, which is a 2-D tensor + with shape [N x K]. N is the batch_size, and K is the class number. + label (Variable): The ground truth which is a 2-D tensor. If soft_label + is set to false, Label is a Tensor with shape [N x 1]. If + soft_label is set to true, Label is a Tensor with + soft_label (bool): A flag to indicate whether to interpretate the given + labels as soft labels. By default, `soft_label` is set to False. + Returns: + Variable: The cross entropy loss is a 2-D tensor with shape [N x 1]. + + Examples: + .. code-block:: python + + data = fluid.layers.data(name='data', shape=[128], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + fc = fluid.layers.fc(input=data, size=100) + out = fluid.layers.softmax_with_cross_entropy(logits=fc, label=label) + """ + helper = LayerHelper('softmax_with_cross_entropy', **locals()) + softmax = helper.create_tmp_variable(dtype=logits.dtype) + loss = helper.create_tmp_variable(dtype=logits.dtype) + helper.append_op( + type='softmax_with_cross_entropy', + inputs={'Logits': logits, + 'Label': label}, + outputs={'Softmax': softmax, + 'Loss': loss}, + attrs={'soft_label': soft_label}) + return loss + + +def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None): + """ + **Smooth L1 Loss Operator. ** + + This operator computes the smooth l1 loss for X and Y. + The operator takes the first dimension of X and Y as batch size. + For each instance, it computes the smooth l1 loss element by element first + and then sums all the losses. So the shape of Out is [batch_size, 1]. + + Args: + x (Variable): A tensor with rank at least 2. The input value of smooth + l1 loss op with shape [batch_size, dim1, ..., dimN]. + y (Variable): A tensor with rank at least 2. The target value of smooth + l1 loss op with same shape as x. + inside_weight (Variable|None): A tensor with rank at least 2. This + input is optional and should have same shape with x. If provided, + the result of (x - y) will be multiplied by this tensor element by + element. + outside_weight (Variable|None): A tensor with rank at least 2. This + input is optional and should have same shape with x. If provided, + the out smooth l1 loss will be multiplied by this tensor element + by element. + sigma (float|None): Hyper parameter of smooth l1 loss op. A float scalar + with default value 1.0. + Returns: + Variable: A tensor with rank be 2. The output smooth l1 loss with + shape [batch_size, 1]. + + Examples: + .. code-block:: python + + data = fluid.layers.data(name='data', shape=[128], dtype='float32') + label = fluid.layers.data(name='label', shape=[100], dtype='int64') + fc = fluid.layers.fc(input=data, size=100) + out = fluid.layers.smooth_l1(logits=fc, label=label) + """ + helper = LayerHelper('smooth_l1_loss', **locals()) + diff = helper.create_tmp_variable(dtype=x.dtype) + loss = helper.create_tmp_variable(dtype=x.dtype) + helper.append_op( + type='smooth_l1_loss', + inputs={ + 'X': x, + 'Y': y, + 'InsideWeight': inside_weight, + 'OutsideWeight': outside_weight + }, + outputs={'Diff': diff, + 'Out': loss}, + attrs={'sigma': sigma}) + return loss diff --git a/python/paddle/v2/fluid/tests/test_layers.py b/python/paddle/v2/fluid/tests/test_layers.py index fa46f86973..50ef820424 100644 --- a/python/paddle/v2/fluid/tests/test_layers.py +++ b/python/paddle/v2/fluid/tests/test_layers.py @@ -309,6 +309,24 @@ class TestBook(unittest.TestCase): self.assertIsNotNone(out) print(str(program)) + def test_softmax_with_cross_entropy(self): + program = Program() + with program_guard(program): + x = layers.data(name='x', shape=[16], dtype='float32') + y = layers.data(name='label', shape=[1], dtype='int64') + loss = layers.softmax_with_cross_entropy(x, y) + self.assertIsNotNone(loss) + print(str(program)) + + def test_smooth_l1(self): + program = Program() + with program_guard(program): + x = layers.data(name='x', shape=[4], dtype='float32') + y = layers.data(name='label', shape=[4], dtype='float32') + loss = layers.smooth_l1(x, y) + self.assertIsNotNone(loss) + print(str(program)) + if __name__ == '__main__': unittest.main() -- GitLab From da02a5812c4a8947a9e20d8d590e67165d7703c5 Mon Sep 17 00:00:00 2001 From: Tao Luo Date: Mon, 12 Feb 2018 13:10:45 +0800 Subject: [PATCH 078/217] refine inference_lib_dist after code move, and add it to docker/build.sh (#8379) * refine inference_lib_dist after code move, and add it to docker/build.sh * remove is_directory in inference_lib.cmake --- cmake/inference_lib.cmake | 18 ++++++++---------- paddle/scripts/docker/build.sh | 12 ++++++++++++ 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/cmake/inference_lib.cmake b/cmake/inference_lib.cmake index 7d53554358..df18663772 100644 --- a/cmake/inference_lib.cmake +++ b/cmake/inference_lib.cmake @@ -16,12 +16,10 @@ function(copy TARGET) foreach(index RANGE ${len}) list(GET copy_lib_SRCS ${index} src) list(GET copy_lib_DSTS ${index} dst) - add_custom_command(TARGET ${TARGET} PRE_BUILD COMMAND mkdir -p "${dst}") - if(IS_DIRECTORY ${src}) - add_custom_command(TARGET ${TARGET} PRE_BUILD COMMAND cp -r "${src}" "${dst}") - else() - add_custom_command(TARGET ${TARGET} PRE_BUILD COMMAND cp "${src}" "${dst}") - endif() + add_custom_command(TARGET ${TARGET} PRE_BUILD + COMMAND mkdir -p "${dst}" + COMMAND cp -r "${src}" "${dst}" + COMMENT "copying ${src} -> ${dst}") endforeach() endfunction() @@ -53,11 +51,11 @@ IF(NOT PROTOBUF_FOUND) ENDIF(NOT PROTOBUF_FOUND) # paddle fluid module -set(src_dir "${PADDLE_SOURCE_DIR}/paddle") -set(dst_dir "${CMAKE_INSTALL_PREFIX}/paddle") +set(src_dir "${PADDLE_SOURCE_DIR}/paddle/fluid") +set(dst_dir "${CMAKE_INSTALL_PREFIX}/paddle/fluid") set(module "framework") copy(framework_lib DEPS framework_py_proto - SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/details/*.h ${PADDLE_BINARY_DIR}/paddle/framework/framework.pb.h + SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/details/*.h ${PADDLE_BINARY_DIR}/paddle/fluid/framework/framework.pb.h DSTS ${dst_dir}/${module} ${dst_dir}/${module}/details ${dst_dir}/${module} ) @@ -69,7 +67,7 @@ copy(memory_lib set(module "inference") copy(inference_lib DEPENDS paddle_fluid_shared - SRCS ${src_dir}/${module}/*.h ${PADDLE_BINARY_DIR}/paddle/inference/libpaddle_fluid.so + SRCS ${src_dir}/${module}/*.h ${PADDLE_BINARY_DIR}/paddle/fluid/inference/libpaddle_fluid.so DSTS ${dst_dir}/${module} ${dst_dir}/${module} ) diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index 1486d5ed25..442a7ea883 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -204,6 +204,17 @@ function gen_capi_package() { fi } +function gen_fluid_inference_lib() { + if [ ${WITH_C_API:-OFF} == "OFF" ] ; then + cat < Date: Sun, 11 Feb 2018 09:51:02 +0000 Subject: [PATCH 079/217] Refine the inference unittests. --- paddle/fluid/framework/lod_tensor.cc | 8 +- .../tests/book/test_inference_word2vec.cc | 10 +- .../tests/book/test_image_classification.py | 4 +- .../tests/book/test_label_semantic_roles.py | 37 ++--- .../v2/fluid/tests/book/test_word2vec.py | 127 +++++++++--------- 5 files changed, 101 insertions(+), 85 deletions(-) diff --git a/paddle/fluid/framework/lod_tensor.cc b/paddle/fluid/framework/lod_tensor.cc index 05c67e453d..70a2a65266 100644 --- a/paddle/fluid/framework/lod_tensor.cc +++ b/paddle/fluid/framework/lod_tensor.cc @@ -31,8 +31,14 @@ std::ostream &operator<<(std::ostream &os, const LoD &lod) { os << "{"; for (auto &v : lod) { os << "{"; + bool is_first = true; for (auto &i : v) { - os << i << ","; + if (is_first) { + os << i; + is_first = false; + } else { + os << ", " << i; + } } os << "}"; } diff --git a/paddle/fluid/inference/tests/book/test_inference_word2vec.cc b/paddle/fluid/inference/tests/book/test_inference_word2vec.cc index 93376b6824..a62b0a37c6 100644 --- a/paddle/fluid/inference/tests/book/test_inference_word2vec.cc +++ b/paddle/fluid/inference/tests/book/test_inference_word2vec.cc @@ -31,12 +31,12 @@ TEST(inference, word2vec) { paddle::framework::LoDTensor first_word, second_word, third_word, fourth_word; paddle::framework::LoD lod{{0, 1}}; - int64_t dict_size = 2072; // Hard-coding the size of dictionary + int64_t dict_size = 2073; // The size of dictionary - SetupLoDTensor(first_word, lod, static_cast(0), dict_size); - SetupLoDTensor(second_word, lod, static_cast(0), dict_size); - SetupLoDTensor(third_word, lod, static_cast(0), dict_size); - SetupLoDTensor(fourth_word, lod, static_cast(0), dict_size); + SetupLoDTensor(first_word, lod, static_cast(0), dict_size - 1); + SetupLoDTensor(second_word, lod, static_cast(0), dict_size - 1); + SetupLoDTensor(third_word, lod, static_cast(0), dict_size - 1); + SetupLoDTensor(fourth_word, lod, static_cast(0), dict_size - 1); std::vector cpu_feeds; cpu_feeds.push_back(&first_word); diff --git a/python/paddle/v2/fluid/tests/book/test_image_classification.py b/python/paddle/v2/fluid/tests/book/test_image_classification.py index ffbe5bdbd6..4b764ee3b3 100644 --- a/python/paddle/v2/fluid/tests/book/test_image_classification.py +++ b/python/paddle/v2/fluid/tests/book/test_image_classification.py @@ -182,7 +182,9 @@ def infer(use_cuda, save_dirname=None): fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) # The input's dimension of conv should be 4-D or 5-D. - tensor_img = numpy.random.rand(1, 3, 32, 32).astype("float32") + # Use normilized image pixels as input data, which should be in the range [0, 1.0]. + batch_size = 1 + tensor_img = numpy.random.rand(batch_size, 3, 32, 32).astype("float32") # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. diff --git a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py index f33e81186b..f5fb3ed36d 100644 --- a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py @@ -26,7 +26,7 @@ import unittest word_dict, verb_dict, label_dict = conll05.get_dict() word_dict_len = len(word_dict) label_dict_len = len(label_dict) -pred_len = len(verb_dict) +pred_dict_len = len(verb_dict) mark_dict_len = 2 word_dim = 32 @@ -53,7 +53,7 @@ def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, # 8 features predicate_embedding = fluid.layers.embedding( input=predicate, - size=[pred_len, word_dim], + size=[pred_dict_len, word_dim], dtype='float32', is_sparse=IS_SPARSE, param_attr='vemb') @@ -234,6 +234,7 @@ def train(use_cuda, save_dirname=None): # Set the threshold low to speed up the CI test if float(pass_precision) > 0.05: if save_dirname is not None: + # TODO(liuyiqun): Change the target to crf_decode fluid.io.save_inference_model(save_dirname, [ 'word_data', 'verb_data', 'ctx_n2_data', 'ctx_n1_data', 'ctx_0_data', 'ctx_p1_data', @@ -259,14 +260,14 @@ def infer(use_cuda, save_dirname=None): fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) lod = [0, 4, 10] - ts_word = create_random_lodtensor(lod, place, low=0, high=1) - ts_pred = create_random_lodtensor(lod, place, low=0, high=1) - ts_ctx_n2 = create_random_lodtensor(lod, place, low=0, high=1) - ts_ctx_n1 = create_random_lodtensor(lod, place, low=0, high=1) - ts_ctx_0 = create_random_lodtensor(lod, place, low=0, high=1) - ts_ctx_p1 = create_random_lodtensor(lod, place, low=0, high=1) - ts_ctx_p2 = create_random_lodtensor(lod, place, low=0, high=1) - ts_mark = create_random_lodtensor(lod, place, low=0, high=1) + word = create_random_lodtensor(lod, place, low=0, high=word_dict_len - 1) + pred = create_random_lodtensor(lod, place, low=0, high=pred_dict_len - 1) + ctx_n2 = create_random_lodtensor(lod, place, low=0, high=word_dict_len - 1) + ctx_n1 = create_random_lodtensor(lod, place, low=0, high=word_dict_len - 1) + ctx_0 = create_random_lodtensor(lod, place, low=0, high=word_dict_len - 1) + ctx_p1 = create_random_lodtensor(lod, place, low=0, high=word_dict_len - 1) + ctx_p2 = create_random_lodtensor(lod, place, low=0, high=word_dict_len - 1) + mark = create_random_lodtensor(lod, place, low=0, high=mark_dict_len - 1) # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. @@ -281,14 +282,14 @@ def infer(use_cuda, save_dirname=None): results = exe.run(inference_program, feed={ - feed_target_names[0]: ts_word, - feed_target_names[1]: ts_pred, - feed_target_names[2]: ts_ctx_n2, - feed_target_names[3]: ts_ctx_n1, - feed_target_names[4]: ts_ctx_0, - feed_target_names[5]: ts_ctx_p1, - feed_target_names[6]: ts_ctx_p2, - feed_target_names[7]: ts_mark + feed_target_names[0]: word, + feed_target_names[1]: pred, + feed_target_names[2]: ctx_n2, + feed_target_names[3]: ctx_n1, + feed_target_names[4]: ctx_0, + feed_target_names[5]: ctx_p1, + feed_target_names[6]: ctx_p2, + feed_target_names[7]: mark }, fetch_list=fetch_targets, return_numpy=False) diff --git a/python/paddle/v2/fluid/tests/book/test_word2vec.py b/python/paddle/v2/fluid/tests/book/test_word2vec.py index 69bfbcee69..d30f623085 100644 --- a/python/paddle/v2/fluid/tests/book/test_word2vec.py +++ b/python/paddle/v2/fluid/tests/book/test_word2vec.py @@ -1,5 +1,6 @@ # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. -# # Licensed under the Apache License, Version 2.0 (the "License"); +# +# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # @@ -21,6 +22,7 @@ import sys def create_random_lodtensor(lod, place, low, high): + # The range of data elements is [low, high] data = np.random.random_integers(low, high, [lod[-1], 1]).astype("int64") res = fluid.LoDTensor() res.set(data, place) @@ -28,54 +30,7 @@ def create_random_lodtensor(lod, place, low, high): return res -def infer(use_cuda, save_dirname=None): - if save_dirname is None: - return - - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) - - # Use fluid.io.load_inference_model to obtain the inference program desc, - # the feed_target_names (the names of variables that will be feeded - # data using feed operators), and the fetch_targets (variables that - # we want to obtain data from using fetch operators). - [inference_program, feed_target_names, - fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) - - word_dict = paddle.dataset.imikolov.build_dict() - dict_size = len(word_dict) - 1 - - # Setup input, by creating 4 words, and setting up lod required for - # lookup_table_op - lod = [0, 1] - first_word = create_random_lodtensor(lod, place, low=0, high=dict_size) - second_word = create_random_lodtensor(lod, place, low=0, high=dict_size) - third_word = create_random_lodtensor(lod, place, low=0, high=dict_size) - fourth_word = create_random_lodtensor(lod, place, low=0, high=dict_size) - - assert feed_target_names[0] == 'firstw' - assert feed_target_names[1] == 'secondw' - assert feed_target_names[2] == 'thirdw' - assert feed_target_names[3] == 'forthw' - - # Construct feed as a dictionary of {feed_target_name: feed_target_data} - # and results will contain a list of data corresponding to fetch_targets. - results = exe.run(inference_program, - feed={ - feed_target_names[0]: first_word, - feed_target_names[1]: second_word, - feed_target_names[2]: third_word, - feed_target_names[3]: fourth_word - }, - fetch_list=fetch_targets, - return_numpy=False) - print(results[0].lod()) - np_data = np.array(results[0]) - print("Inference Shape: ", np_data.shape) - print("Inference results: ", np_data) - - -def train(use_cuda, is_sparse, parallel, save_dirname): +def train(use_cuda, is_sparse, is_parallel, save_dirname): PASS_NUM = 100 EMBED_SIZE = 32 HIDDEN_SIZE = 256 @@ -130,7 +85,7 @@ def train(use_cuda, is_sparse, parallel, save_dirname): forth_word = fluid.layers.data(name='forthw', shape=[1], dtype='int64') next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64') - if not parallel: + if not is_parallel: avg_cost, predict_word = __network__( [first_word, second_word, third_word, forth_word, next_word]) else: @@ -176,11 +131,61 @@ def train(use_cuda, is_sparse, parallel, save_dirname): raise AssertionError("Cost is too large {0:2.2}".format(avg_cost_np[0])) -def main(use_cuda, is_sparse, parallel): +def infer(use_cuda, save_dirname=None): + if save_dirname is None: + return + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + + # Use fluid.io.load_inference_model to obtain the inference program desc, + # the feed_target_names (the names of variables that will be feeded + # data using feed operators), and the fetch_targets (variables that + # we want to obtain data from using fetch operators). + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + + word_dict = paddle.dataset.imikolov.build_dict() + dict_size = len(word_dict) + + # Setup inputs, by creating 4 words, the lod of which should be [0, 1] + lod = [0, 1] + first_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1) + second_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1) + third_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1) + fourth_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1) + + assert feed_target_names[0] == 'firstw' + assert feed_target_names[1] == 'secondw' + assert feed_target_names[2] == 'thirdw' + assert feed_target_names[3] == 'forthw' + + # Construct feed as a dictionary of {feed_target_name: feed_target_data} + # and results will contain a list of data corresponding to fetch_targets. + results = exe.run(inference_program, + feed={ + feed_target_names[0]: first_word, + feed_target_names[1]: second_word, + feed_target_names[2]: third_word, + feed_target_names[3]: fourth_word + }, + fetch_list=fetch_targets, + return_numpy=False) + print(results[0].lod()) + np_data = np.array(results[0]) + print("Inference Shape: ", np_data.shape) + + +def main(use_cuda, is_sparse, is_parallel): if use_cuda and not fluid.core.is_compiled_with_cuda(): return - save_dirname = "word2vec.inference.model" - train(use_cuda, is_sparse, parallel, save_dirname) + + if not is_parallel: + save_dirname = "word2vec.inference.model" + else: + save_dirname = None + + train(use_cuda, is_sparse, is_parallel, save_dirname) infer(use_cuda, save_dirname) @@ -193,10 +198,10 @@ class W2VTest(unittest.TestCase): pass -def inject_test_method(use_cuda, is_sparse, parallel): +def inject_test_method(use_cuda, is_sparse, is_parallel): fn_name = "test_{0}_{1}_{2}".format("cuda" if use_cuda else "cpu", "sparse" if is_sparse else "dense", "parallel" - if parallel else "normal") + if is_parallel else "normal") def __impl__(*args, **kwargs): prog = fluid.Program() @@ -204,10 +209,12 @@ def inject_test_method(use_cuda, is_sparse, parallel): scope = fluid.core.Scope() with fluid.scope_guard(scope): with fluid.program_guard(prog, startup_prog): - main(use_cuda=use_cuda, is_sparse=is_sparse, parallel=parallel) + main( + use_cuda=use_cuda, + is_sparse=is_sparse, + is_parallel=is_parallel) - # run only 2 cases: use_cuda is either True or False - if is_sparse == False and parallel == False: + if use_cuda and is_sparse: fn = __impl__ else: # skip the other test when on CI server @@ -219,8 +226,8 @@ def inject_test_method(use_cuda, is_sparse, parallel): for use_cuda in (False, True): for is_sparse in (False, True): - for parallel in (False, True): - inject_test_method(use_cuda, is_sparse, parallel) + for is_parallel in (False, True): + inject_test_method(use_cuda, is_sparse, is_parallel) if __name__ == '__main__': unittest.main() -- GitLab From dca9941e4bbd6e1976d8a4a4dd1bba3ae0e36200 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Mon, 12 Feb 2018 13:58:41 +0800 Subject: [PATCH 080/217] pass size when copy --- paddle/fluid/operators/concat_op.h | 4 ++-- paddle/fluid/operators/split_op.h | 2 +- paddle/fluid/operators/strided_memcpy.h | 9 ++++----- python/paddle/v2/fluid/distribute_transpiler.py | 5 +++++ 4 files changed, 12 insertions(+), 8 deletions(-) diff --git a/paddle/fluid/operators/concat_op.h b/paddle/fluid/operators/concat_op.h index 878e530585..c8a4292932 100644 --- a/paddle/fluid/operators/concat_op.h +++ b/paddle/fluid/operators/concat_op.h @@ -38,7 +38,7 @@ class ConcatKernel : public framework::OpKernel { auto in_stride = framework::stride_numel(in->dims()); StridedNumelCopyWithAxis(ctx.device_context(), axis, out->data() + output_offset, out_stride, - in->data(), in_stride); + in->data(), in_stride, in_stride[axis]); output_offset += in_stride[axis]; } } @@ -59,7 +59,7 @@ class ConcatGradKernel : public framework::OpKernel { auto out_stride = framework::stride_numel(out->dims()); StridedNumelCopyWithAxis(ctx.device_context(), axis, out->data(), out_stride, in->data() + input_offset, - in_stride); + in_stride, out_stride[axis]); input_offset += out_stride[axis]; } } diff --git a/paddle/fluid/operators/split_op.h b/paddle/fluid/operators/split_op.h index 06bcf82620..54420e1bf6 100644 --- a/paddle/fluid/operators/split_op.h +++ b/paddle/fluid/operators/split_op.h @@ -38,7 +38,7 @@ class SplitOpKernel : public framework::OpKernel { auto out_stride = framework::stride_numel(out->dims()); StridedNumelCopyWithAxis(ctx.device_context(), axis, out->data(), out_stride, in->data() + input_offset, - in_stride); + in_stride, out_stride[axis]); input_offset += out_stride[axis]; } } diff --git a/paddle/fluid/operators/strided_memcpy.h b/paddle/fluid/operators/strided_memcpy.h index 4036d1091d..4c7b90693a 100644 --- a/paddle/fluid/operators/strided_memcpy.h +++ b/paddle/fluid/operators/strided_memcpy.h @@ -54,11 +54,11 @@ inline void StridedNumelCopyWithAxis(const platform::DeviceContext& ctx, int64_t axis, T* dst, const framework::DDim& dst_stride_numel, const T* src, - const framework::DDim& src_stride_numel) { + const framework::DDim& src_stride_numel, + int64_t size) { int64_t before = dst_stride_numel[0] / dst_stride_numel[axis]; int64_t src_after = src_stride_numel[axis]; int64_t dst_after = dst_stride_numel[axis]; - int64_t copy_size = std::min(src_after, dst_after); auto place = ctx.GetPlace(); PADDLE_ENFORCE_EQ(src_stride_numel.size(), dst_stride_numel.size(), @@ -83,15 +83,14 @@ inline void StridedNumelCopyWithAxis(const platform::DeviceContext& ctx, if (platform::is_cpu_place(place)) { auto& cpu_place = boost::get(place); memory::Copy(cpu_place, dst + i * dst_after, cpu_place, - src + i * src_after, sizeof(T) * copy_size); + src + i * src_after, sizeof(T) * size); } else { #ifdef PADDLE_WITH_CUDA auto& gpu_place = boost::get(place); auto& cuda_ctx = reinterpret_cast(ctx); memory::Copy(gpu_place, dst + i * dst_after, gpu_place, - src + i * src_after, sizeof(T) * copy_size, - cuda_ctx.stream()); + src + i * src_after, sizeof(T) * size, cuda_ctx.stream()); #else PADDLE_THROW("Paddle is not compiled with GPU"); #endif diff --git a/python/paddle/v2/fluid/distribute_transpiler.py b/python/paddle/v2/fluid/distribute_transpiler.py index f84481adf7..689920af0c 100644 --- a/python/paddle/v2/fluid/distribute_transpiler.py +++ b/python/paddle/v2/fluid/distribute_transpiler.py @@ -121,6 +121,7 @@ def split_dense_variable(var_list, block_size += dim1 - remains # update split_count after aligning split_count = int(math.ceil(var_numel / float(block_size))) + print("###split var ", var.name, var.shape, block_size, split_count) for block_id in xrange(split_count): curr_block_size = min(block_size, var_numel - ( (block_id) * block_size)) @@ -255,6 +256,7 @@ class DistributeTranspiler: splited_shape = [rows] if len(orig_shape) >= 2: splited_shape.extend(orig_shape[1:]) + print("###splited: ", size, rows, splited_shape) var = program.global_block().create_var( name="%s.block%d" % (varname, i), psersistable=False, @@ -262,6 +264,7 @@ class DistributeTranspiler: type=orig_var.type, shape=splited_shape) # flattend splited var var_mapping[varname].append(var) + print("###created split var ", var) return var_mapping def _clone_var(self, block, var): @@ -528,6 +531,8 @@ class DistributeTranspiler: """ # step5 pserver_program = Program() + print("param mapping on pserver: #### ", + self.param_grad_ep_mapping[endpoint]["params"]) for v in self.param_grad_ep_mapping[endpoint]["params"]: self._clone_var(pserver_program.global_block(), v) for v in self.param_grad_ep_mapping[endpoint]["grads"]: -- GitLab From f95e05a388d9d9fb541affd9480dc2ae8636d04f Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Mon, 12 Feb 2018 06:07:48 +0000 Subject: [PATCH 081/217] Refine the inference unittests. --- .../fluid/inference/tests/book/CMakeLists.txt | 2 +- .../test_inference_label_semantic_roles.cc | 46 +++++++++++++++---- .../test_inference_understand_sentiment.cc | 7 ++- .../tests/book/test_label_semantic_roles.py | 1 - .../tests/book/test_understand_sentiment.py | 30 +++++++----- 5 files changed, 61 insertions(+), 25 deletions(-) diff --git a/paddle/fluid/inference/tests/book/CMakeLists.txt b/paddle/fluid/inference/tests/book/CMakeLists.txt index cddd5a786c..8db3e76e76 100644 --- a/paddle/fluid/inference/tests/book/CMakeLists.txt +++ b/paddle/fluid/inference/tests/book/CMakeLists.txt @@ -30,5 +30,5 @@ inference_test(label_semantic_roles) inference_test(recognize_digits ARGS mlp) inference_test(recommender_system) #inference_test(rnn_encoder_decoder) -inference_test(understand_sentiment) +inference_test(understand_sentiment ARGS conv lstm) inference_test(word2vec) diff --git a/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc b/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc index 55acd95f50..7b75aea73f 100644 --- a/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc +++ b/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc @@ -32,16 +32,42 @@ TEST(inference, label_semantic_roles) { paddle::framework::LoDTensor word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark; paddle::framework::LoD lod{{0, 4, 10}}; - - SetupLoDTensor(word, lod, static_cast(0), static_cast(1)); - SetupLoDTensor( - predicate, lod, static_cast(0), static_cast(1)); - SetupLoDTensor(ctx_n2, lod, static_cast(0), static_cast(1)); - SetupLoDTensor(ctx_n1, lod, static_cast(0), static_cast(1)); - SetupLoDTensor(ctx_0, lod, static_cast(0), static_cast(1)); - SetupLoDTensor(ctx_p1, lod, static_cast(0), static_cast(1)); - SetupLoDTensor(ctx_p2, lod, static_cast(0), static_cast(1)); - SetupLoDTensor(mark, lod, static_cast(0), static_cast(1)); + int64_t word_dict_len = 44068; + int64_t predicate_dict_len = 3162; + int64_t mark_dict_len = 2; + + SetupLoDTensor(word, + lod, + static_cast(0), + static_cast(word_dict_len - 1)); + SetupLoDTensor(predicate, + lod, + static_cast(0), + static_cast(predicate_dict_len - 1)); + SetupLoDTensor(ctx_n2, + lod, + static_cast(0), + static_cast(word_dict_len - 1)); + SetupLoDTensor(ctx_n1, + lod, + static_cast(0), + static_cast(word_dict_len - 1)); + SetupLoDTensor(ctx_0, + lod, + static_cast(0), + static_cast(word_dict_len - 1)); + SetupLoDTensor(ctx_p1, + lod, + static_cast(0), + static_cast(word_dict_len - 1)); + SetupLoDTensor(ctx_p2, + lod, + static_cast(0), + static_cast(word_dict_len - 1)); + SetupLoDTensor(mark, + lod, + static_cast(0), + static_cast(mark_dict_len - 1)); std::vector cpu_feeds; cpu_feeds.push_back(&word); diff --git a/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc b/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc index 3b29d52880..0167bc0a51 100644 --- a/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc +++ b/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc @@ -31,7 +31,12 @@ TEST(inference, understand_sentiment) { paddle::framework::LoDTensor words; paddle::framework::LoD lod{{0, 4, 10}}; - SetupLoDTensor(words, lod, static_cast(0), static_cast(10)); + int64_t word_dict_len = 5147; + + SetupLoDTensor(words, + lod, + static_cast(0), + static_cast(word_dict_len - 1)); std::vector cpu_feeds; cpu_feeds.push_back(&words); diff --git a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py index f5fb3ed36d..9248898fdf 100644 --- a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py @@ -296,7 +296,6 @@ def infer(use_cuda, save_dirname=None): print(results[0].lod()) np_data = np.array(results[0]) print("Inference Shape: ", np_data.shape) - print("Inference results: ", np_data) def main(use_cuda): diff --git a/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py b/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py index 6e0206d41d..1776128813 100644 --- a/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py +++ b/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py @@ -93,7 +93,7 @@ def create_random_lodtensor(lod, place, low, high): return res -def train(word_dict, net_method, use_cuda, save_dirname=None): +def train(word_dict, nn_type, use_cuda, save_dirname=None): BATCH_SIZE = 128 PASS_NUM = 5 dict_dim = len(word_dict) @@ -102,6 +102,11 @@ def train(word_dict, net_method, use_cuda, save_dirname=None): data = fluid.layers.data( name="words", shape=[1], dtype="int64", lod_level=1) label = fluid.layers.data(name="label", shape=[1], dtype="int64") + + if nn_type == "conv": + net_method = convolution_net + else: + net_method = stacked_lstm_net cost, acc_out, prediction = net_method( data, label, input_dim=dict_dim, class_dim=class_dim) @@ -132,7 +137,7 @@ def train(word_dict, net_method, use_cuda, save_dirname=None): net_method.__name__)) -def infer(use_cuda, save_dirname=None): +def infer(word_dict, use_cuda, save_dirname=None): if save_dirname is None: return @@ -146,10 +151,11 @@ def infer(use_cuda, save_dirname=None): [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + word_dict_len = len(word_dict) + lod = [0, 4, 10] - word_dict = paddle.dataset.imdb.word_dict() tensor_words = create_random_lodtensor( - lod, place, low=0, high=len(word_dict) - 1) + lod, place, low=0, high=word_dict_len - 1) # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. @@ -164,15 +170,15 @@ def infer(use_cuda, save_dirname=None): print("Inference results: ", np_data) -def main(word_dict, net_method, use_cuda): +def main(word_dict, nn_type, use_cuda): if use_cuda and not fluid.core.is_compiled_with_cuda(): return # Directory for saving the trained model - save_dirname = "understand_sentiment.inference.model" + save_dirname = "understand_sentiment_" + nn_type + ".inference.model" - train(word_dict, net_method, use_cuda, save_dirname) - infer(use_cuda, save_dirname) + train(word_dict, nn_type, use_cuda, save_dirname) + infer(word_dict, use_cuda, save_dirname) class TestUnderstandSentiment(unittest.TestCase): @@ -191,19 +197,19 @@ class TestUnderstandSentiment(unittest.TestCase): def test_conv_cpu(self): with self.new_program_scope(): - main(self.word_dict, net_method=convolution_net, use_cuda=False) + main(self.word_dict, nn_type="conv", use_cuda=False) def test_stacked_lstm_cpu(self): with self.new_program_scope(): - main(self.word_dict, net_method=stacked_lstm_net, use_cuda=False) + main(self.word_dict, nn_type="lstm", use_cuda=False) def test_conv_gpu(self): with self.new_program_scope(): - main(self.word_dict, net_method=convolution_net, use_cuda=True) + main(self.word_dict, nn_type="conv", use_cuda=True) def test_stacked_lstm_gpu(self): with self.new_program_scope(): - main(self.word_dict, net_method=stacked_lstm_net, use_cuda=True) + main(self.word_dict, nn_type="lstm", use_cuda=True) if __name__ == '__main__': -- GitLab From e9fa7a7b3a29f68c374e239054f261b41c03c985 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Mon, 12 Feb 2018 14:03:59 +0800 Subject: [PATCH 082/217] follow comments of qingqing and code refine --- python/paddle/v2/fluid/layers/detection.py | 74 ++++++++++++------ .../paddle/v2/fluid/tests/test_detection.py | 75 ++++--------------- 2 files changed, 66 insertions(+), 83 deletions(-) diff --git a/python/paddle/v2/fluid/layers/detection.py b/python/paddle/v2/fluid/layers/detection.py index b045e1c56c..aab9f032bd 100644 --- a/python/paddle/v2/fluid/layers/detection.py +++ b/python/paddle/v2/fluid/layers/detection.py @@ -18,10 +18,9 @@ All layers just related to the detection neural network. from ..layer_helper import LayerHelper from ..param_attr import ParamAttr from ..framework import Variable -from ..nets import img_conv_with_bn -from tensor import concat -from ops import reshape -from nn import transpose +import tensor +import ops +import nn import math __all__ = [ @@ -184,10 +183,10 @@ def prior_box(inputs, name(str, optional, None): Name of the prior box layer. Returns: - boxes(Variable): the output prior boxes of PriorBoxOp. + boxes(Variable): the output prior boxes of PriorBox. The layout is [num_priors, 4]. num_priors is the total box count of each position of inputs. - Variances(Variable): the expanded variances of PriorBoxOp. + Variances(Variable): the expanded variances of PriorBox. The layout is [num_priors, 4]. num_priors is the total box count of each position of inputs @@ -250,7 +249,7 @@ def prior_box(inputs, new_shape = [ -1, reduce(lambda x, y: x * y, input.shape[axis:len(input.shape)]) ] - out = reshape(x=input, shape=new_shape) + out = ops.reshape(x=input, shape=new_shape) return out assert isinstance(inputs, list), 'inputs should be a list.' @@ -326,8 +325,8 @@ def prior_box(inputs, reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3)) reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3)) - box = concat(reshaped_boxes) - var = concat(reshaped_vars) + box = tensor.concat(reshaped_boxes) + var = tensor.concat(reshaped_vars) return box, var @@ -345,12 +344,14 @@ def multi_box_head(inputs, pad=1, stride=1, use_batchnorm=False, - base_size=None, - name=None): + base_size=None): """ **Multi Box Head** - input many Variable, and return mbox_loc, mbox_conf + Generate prior boxes' location and confidence for SSD(Single + Shot MultiBox Detector)algorithm. The details of this algorithm, + please refer the section 2.1 of SSD paper (SSD: Single Shot + MultiBox Detector)`_ . Args: inputs(list): The list of input Variables, the format @@ -376,12 +377,12 @@ def multi_box_head(inputs, Returns: - mbox_loc(list): the output prior boxes of PriorBoxOp. The layout is - [num_priors, 4]. num_priors is the total box count of each - position of inputs. - mbox_conf(list): the expanded variances of PriorBoxOp. The layout - is [num_priors, 4]. num_priors is the total box count of each - position of inputs + mbox_loc(list): The predicted boxes' location of the inputs. + The layout of each element is [N, H, W, Priors]. Priors + is the number of predicted boxof each position of each input. + mbox_conf(list): The predicted boxes' confidence of the inputs. + The layout of each element is [N, H, W, Priors]. Priors + is the number of predicted box of each position of each input. Examples: .. code-block:: python @@ -396,6 +397,35 @@ def multi_box_head(inputs, flip=True) """ + def _conv_with_bn_(input, + conv_num_filter, + conv_padding=1, + conv_filter_size=3, + conv_stride=1, + conv_act=None, + param_attr=None, + conv_with_batchnorm=False, + conv_batchnorm_drop_rate=0.0, + use_cudnn=True): + + conv2d = nn.conv2d( + input=input, + num_filters=conv_num_filter, + filter_size=conv_filter_size, + padding=conv_padding, + stride=conv_stride, + param_attr=param_attr, + act=conv_act, + use_cudnn=use_cudnn) + + if conv_with_batchnorm: + conv2d = nn.batch_norm(input=conv2d) + drop_rate = conv_batchnorm_drop_rate + if abs(drop_rate) > 1e-5: + conv2d = nn.dropout(x=conv2d, dropout_prob=drop_rate) + + return conv2d + if not (isinstance(inputs, list)): raise ValueError('inputs should be a list.') @@ -469,26 +499,26 @@ def multi_box_head(inputs, if share_location: num_loc_output *= num_classes - mbox_loc = img_conv_with_bn( + mbox_loc = _conv_with_bn_( input=input, conv_num_filter=num_loc_output, conv_padding=pad, conv_stride=stride, conv_filter_size=kernel_size, conv_with_batchnorm=use_batchnorm) - mbox_loc = transpose(mbox_loc, perm=[0, 2, 3, 1]) + mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1]) mbox_locs.append(mbox_loc) # get conf_loc num_conf_output = num_priors_per_location * num_classes - conf_loc = img_conv_with_bn( + conf_loc = _conv_with_bn_( input=input, conv_num_filter=num_conf_output, conv_padding=pad, conv_stride=stride, conv_filter_size=kernel_size, conv_with_batchnorm=use_batchnorm) - conf_loc = transpose(conf_loc, perm=[0, 2, 3, 1]) + conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1]) mbox_confs.append(conf_loc) return mbox_locs, mbox_confs diff --git a/python/paddle/v2/fluid/tests/test_detection.py b/python/paddle/v2/fluid/tests/test_detection.py index d50efb3f74..2f1ecd6677 100644 --- a/python/paddle/v2/fluid/tests/test_detection.py +++ b/python/paddle/v2/fluid/tests/test_detection.py @@ -47,7 +47,7 @@ class TestBook(unittest.TestCase): out = layers.detection_output( scores=scores, loc=loc, prior_box=pb, prior_box_var=pbv) self.assertIsNotNone(out) - print(str(program)) + # print(str(program)) class TestPriorBox(unittest.TestCase): @@ -62,36 +62,11 @@ class TestPriorBox(unittest.TestCase): def prior_box_output(self, data_shape): images = fluid.layers.data( name='pixel', shape=data_shape, dtype='float32') - conv1 = fluid.layers.conv2d( - input=images, - num_filters=3, - filter_size=3, - stride=2, - use_cudnn=False) - conv2 = fluid.layers.conv2d( - input=conv1, - num_filters=3, - filter_size=3, - stride=2, - use_cudnn=False) - conv3 = fluid.layers.conv2d( - input=conv2, - num_filters=3, - filter_size=3, - stride=2, - use_cudnn=False) - conv4 = fluid.layers.conv2d( - input=conv3, - num_filters=3, - filter_size=3, - stride=2, - use_cudnn=False) - conv5 = fluid.layers.conv2d( - input=conv4, - num_filters=3, - filter_size=3, - stride=2, - use_cudnn=False) + conv1 = fluid.layers.conv2d(images, 3, 3, 2) + conv2 = fluid.layers.conv2d(conv1, 3, 3, 2) + conv3 = fluid.layers.conv2d(conv2, 3, 3, 2) + conv4 = fluid.layers.conv2d(conv3, 3, 3, 2) + conv5 = fluid.layers.conv2d(conv4, 3, 3, 2) box, var = detection.prior_box( inputs=[conv1, conv2, conv3, conv4, conv5, conv5], @@ -112,39 +87,17 @@ class TestMultiBoxHead(unittest.TestCase): data_shape = [3, 224, 224] mbox_locs, mbox_confs = self.multi_box_output(data_shape) + for loc, conf in zip(mbox_locs, mbox_confs): + assert loc.shape[1:3] == conf.shape[1:3] + def multi_box_output(self, data_shape): images = fluid.layers.data( name='pixel', shape=data_shape, dtype='float32') - conv1 = fluid.layers.conv2d( - input=images, - num_filters=3, - filter_size=3, - stride=2, - use_cudnn=False) - conv2 = fluid.layers.conv2d( - input=conv1, - num_filters=3, - filter_size=3, - stride=2, - use_cudnn=False) - conv3 = fluid.layers.conv2d( - input=conv2, - num_filters=3, - filter_size=3, - stride=2, - use_cudnn=False) - conv4 = fluid.layers.conv2d( - input=conv3, - num_filters=3, - filter_size=3, - stride=2, - use_cudnn=False) - conv5 = fluid.layers.conv2d( - input=conv4, - num_filters=3, - filter_size=3, - stride=2, - use_cudnn=False) + conv1 = fluid.layers.conv2d(images, 3, 3, 2) + conv2 = fluid.layers.conv2d(conv1, 3, 3, 2) + conv3 = fluid.layers.conv2d(conv2, 3, 3, 2) + conv4 = fluid.layers.conv2d(conv3, 3, 3, 2) + conv5 = fluid.layers.conv2d(conv4, 3, 3, 2) mbox_locs, mbox_confs = detection.multi_box_head( inputs=[conv1, conv2, conv3, conv4, conv5, conv5], -- GitLab From 865dfbe5c7ab60c00d860b1aa2f60c8dd677841c Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Mon, 12 Feb 2018 08:12:58 +0000 Subject: [PATCH 083/217] Use a new scope for inference in python unittest to avoid changing the value of variables for training. --- .../tests/book/notest_rnn_encoder_decoer.py | 54 +++++----- .../v2/fluid/tests/book/test_fit_a_line.py | 37 +++---- .../tests/book/test_image_classification.py | 38 +++---- .../tests/book/test_label_semantic_roles.py | 98 ++++++++++--------- .../fluid/tests/book/test_recognize_digits.py | 40 ++++---- .../tests/book/test_recommender_system.py | 92 ++++++++--------- .../tests/book/test_understand_sentiment.py | 50 +++++----- .../v2/fluid/tests/book/test_word2vec.py | 78 ++++++++------- 8 files changed, 258 insertions(+), 229 deletions(-) diff --git a/python/paddle/v2/fluid/tests/book/notest_rnn_encoder_decoer.py b/python/paddle/v2/fluid/tests/book/notest_rnn_encoder_decoer.py index 7fe43c680c..6d6ad50476 100644 --- a/python/paddle/v2/fluid/tests/book/notest_rnn_encoder_decoer.py +++ b/python/paddle/v2/fluid/tests/book/notest_rnn_encoder_decoer.py @@ -228,32 +228,34 @@ def infer(use_cuda, save_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) - # Use fluid.io.load_inference_model to obtain the inference program desc, - # the feed_target_names (the names of variables that will be feeded - # data using feed operators), and the fetch_targets (variables that - # we want to obtain data from using fetch operators). - [inference_program, feed_target_names, - fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) - - lod = [0, 4, 10] - word_data = create_random_lodtensor(lod, place, low=0, high=1) - trg_word = create_random_lodtensor(lod, place, low=0, high=1) - - # Construct feed as a dictionary of {feed_target_name: feed_target_data} - # and results will contain a list of data corresponding to fetch_targets. - assert feed_target_names[0] == 'source_sequence' - assert feed_target_names[1] == 'target_sequence' - results = exe.run(inference_program, - feed={ - feed_target_names[0]: word_data, - feed_target_names[1]: trg_word, - }, - fetch_list=fetch_targets, - return_numpy=False) - print(results[0].lod()) - np_data = np.array(results[0]) - print("Inference shape: ", np_data.shape) - print("Inference results: ", np_data) + inference_scope = fluid.core.Scope() + with fluid.scope_guard(inference_scope): + # Use fluid.io.load_inference_model to obtain the inference program desc, + # the feed_target_names (the names of variables that will be feeded + # data using feed operators), and the fetch_targets (variables that + # we want to obtain data from using fetch operators). + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + + lod = [0, 4, 10] + word_data = create_random_lodtensor(lod, place, low=0, high=1) + trg_word = create_random_lodtensor(lod, place, low=0, high=1) + + # Construct feed as a dictionary of {feed_target_name: feed_target_data} + # and results will contain a list of data corresponding to fetch_targets. + assert feed_target_names[0] == 'source_sequence' + assert feed_target_names[1] == 'target_sequence' + results = exe.run(inference_program, + feed={ + feed_target_names[0]: word_data, + feed_target_names[1]: trg_word, + }, + fetch_list=fetch_targets, + return_numpy=False) + print(results[0].lod()) + np_data = np.array(results[0]) + print("Inference shape: ", np_data.shape) + print("Inference results: ", np_data) def main(use_cuda): diff --git a/python/paddle/v2/fluid/tests/book/test_fit_a_line.py b/python/paddle/v2/fluid/tests/book/test_fit_a_line.py index b3332b4810..2c8a192558 100644 --- a/python/paddle/v2/fluid/tests/book/test_fit_a_line.py +++ b/python/paddle/v2/fluid/tests/book/test_fit_a_line.py @@ -72,23 +72,26 @@ def infer(use_cuda, save_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) - # Use fluid.io.load_inference_model to obtain the inference program desc, - # the feed_target_names (the names of variables that will be feeded - # data using feed operators), and the fetch_targets (variables that - # we want to obtain data from using fetch operators). - [inference_program, feed_target_names, - fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) - - # The input's dimension should be 2-D and the second dim is 13 - # The input data should be >= 0 - batch_size = 10 - tensor_x = numpy.random.uniform(0, 10, [batch_size, 13]).astype("float32") - assert feed_target_names[0] == 'x' - results = exe.run(inference_program, - feed={feed_target_names[0]: tensor_x}, - fetch_list=fetch_targets) - print("infer shape: ", results[0].shape) - print("infer results: ", results[0]) + inference_scope = fluid.core.Scope() + with fluid.scope_guard(inference_scope): + # Use fluid.io.load_inference_model to obtain the inference program desc, + # the feed_target_names (the names of variables that will be feeded + # data using feed operators), and the fetch_targets (variables that + # we want to obtain data from using fetch operators). + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + + # The input's dimension should be 2-D and the second dim is 13 + # The input data should be >= 0 + batch_size = 10 + tensor_x = numpy.random.uniform(0, 10, + [batch_size, 13]).astype("float32") + assert feed_target_names[0] == 'x' + results = exe.run(inference_program, + feed={feed_target_names[0]: tensor_x}, + fetch_list=fetch_targets) + print("infer shape: ", results[0].shape) + print("infer results: ", results[0]) def main(use_cuda): diff --git a/python/paddle/v2/fluid/tests/book/test_image_classification.py b/python/paddle/v2/fluid/tests/book/test_image_classification.py index 4b764ee3b3..aa3b023919 100644 --- a/python/paddle/v2/fluid/tests/book/test_image_classification.py +++ b/python/paddle/v2/fluid/tests/book/test_image_classification.py @@ -174,24 +174,26 @@ def infer(use_cuda, save_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) - # Use fluid.io.load_inference_model to obtain the inference program desc, - # the feed_target_names (the names of variables that will be feeded - # data using feed operators), and the fetch_targets (variables that - # we want to obtain data from using fetch operators). - [inference_program, feed_target_names, - fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) - - # The input's dimension of conv should be 4-D or 5-D. - # Use normilized image pixels as input data, which should be in the range [0, 1.0]. - batch_size = 1 - tensor_img = numpy.random.rand(batch_size, 3, 32, 32).astype("float32") - - # Construct feed as a dictionary of {feed_target_name: feed_target_data} - # and results will contain a list of data corresponding to fetch_targets. - results = exe.run(inference_program, - feed={feed_target_names[0]: tensor_img}, - fetch_list=fetch_targets) - print("infer results: ", results[0]) + inference_scope = fluid.core.Scope() + with fluid.scope_guard(inference_scope): + # Use fluid.io.load_inference_model to obtain the inference program desc, + # the feed_target_names (the names of variables that will be feeded + # data using feed operators), and the fetch_targets (variables that + # we want to obtain data from using fetch operators). + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + + # The input's dimension of conv should be 4-D or 5-D. + # Use normilized image pixels as input data, which should be in the range [0, 1.0]. + batch_size = 1 + tensor_img = numpy.random.rand(batch_size, 3, 32, 32).astype("float32") + + # Construct feed as a dictionary of {feed_target_name: feed_target_data} + # and results will contain a list of data corresponding to fetch_targets. + results = exe.run(inference_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets) + print("infer results: ", results[0]) def main(net_type, use_cuda): diff --git a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py index 9248898fdf..d03fd2f422 100644 --- a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py @@ -252,50 +252,60 @@ def infer(use_cuda, save_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) - # Use fluid.io.load_inference_model to obtain the inference program desc, - # the feed_target_names (the names of variables that will be feeded - # data using feed operators), and the fetch_targets (variables that - # we want to obtain data from using fetch operators). - [inference_program, feed_target_names, - fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) - - lod = [0, 4, 10] - word = create_random_lodtensor(lod, place, low=0, high=word_dict_len - 1) - pred = create_random_lodtensor(lod, place, low=0, high=pred_dict_len - 1) - ctx_n2 = create_random_lodtensor(lod, place, low=0, high=word_dict_len - 1) - ctx_n1 = create_random_lodtensor(lod, place, low=0, high=word_dict_len - 1) - ctx_0 = create_random_lodtensor(lod, place, low=0, high=word_dict_len - 1) - ctx_p1 = create_random_lodtensor(lod, place, low=0, high=word_dict_len - 1) - ctx_p2 = create_random_lodtensor(lod, place, low=0, high=word_dict_len - 1) - mark = create_random_lodtensor(lod, place, low=0, high=mark_dict_len - 1) - - # Construct feed as a dictionary of {feed_target_name: feed_target_data} - # and results will contain a list of data corresponding to fetch_targets. - assert feed_target_names[0] == 'word_data' - assert feed_target_names[1] == 'verb_data' - assert feed_target_names[2] == 'ctx_n2_data' - assert feed_target_names[3] == 'ctx_n1_data' - assert feed_target_names[4] == 'ctx_0_data' - assert feed_target_names[5] == 'ctx_p1_data' - assert feed_target_names[6] == 'ctx_p2_data' - assert feed_target_names[7] == 'mark_data' - - results = exe.run(inference_program, - feed={ - feed_target_names[0]: word, - feed_target_names[1]: pred, - feed_target_names[2]: ctx_n2, - feed_target_names[3]: ctx_n1, - feed_target_names[4]: ctx_0, - feed_target_names[5]: ctx_p1, - feed_target_names[6]: ctx_p2, - feed_target_names[7]: mark - }, - fetch_list=fetch_targets, - return_numpy=False) - print(results[0].lod()) - np_data = np.array(results[0]) - print("Inference Shape: ", np_data.shape) + inference_scope = fluid.core.Scope() + with fluid.scope_guard(inference_scope): + # Use fluid.io.load_inference_model to obtain the inference program desc, + # the feed_target_names (the names of variables that will be feeded + # data using feed operators), and the fetch_targets (variables that + # we want to obtain data from using fetch operators). + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + + lod = [0, 4, 10] + word = create_random_lodtensor( + lod, place, low=0, high=word_dict_len - 1) + pred = create_random_lodtensor( + lod, place, low=0, high=pred_dict_len - 1) + ctx_n2 = create_random_lodtensor( + lod, place, low=0, high=word_dict_len - 1) + ctx_n1 = create_random_lodtensor( + lod, place, low=0, high=word_dict_len - 1) + ctx_0 = create_random_lodtensor( + lod, place, low=0, high=word_dict_len - 1) + ctx_p1 = create_random_lodtensor( + lod, place, low=0, high=word_dict_len - 1) + ctx_p2 = create_random_lodtensor( + lod, place, low=0, high=word_dict_len - 1) + mark = create_random_lodtensor( + lod, place, low=0, high=mark_dict_len - 1) + + # Construct feed as a dictionary of {feed_target_name: feed_target_data} + # and results will contain a list of data corresponding to fetch_targets. + assert feed_target_names[0] == 'word_data' + assert feed_target_names[1] == 'verb_data' + assert feed_target_names[2] == 'ctx_n2_data' + assert feed_target_names[3] == 'ctx_n1_data' + assert feed_target_names[4] == 'ctx_0_data' + assert feed_target_names[5] == 'ctx_p1_data' + assert feed_target_names[6] == 'ctx_p2_data' + assert feed_target_names[7] == 'mark_data' + + results = exe.run(inference_program, + feed={ + feed_target_names[0]: word, + feed_target_names[1]: pred, + feed_target_names[2]: ctx_n2, + feed_target_names[3]: ctx_n1, + feed_target_names[4]: ctx_0, + feed_target_names[5]: ctx_p1, + feed_target_names[6]: ctx_p2, + feed_target_names[7]: mark + }, + fetch_list=fetch_targets, + return_numpy=False) + print(results[0].lod()) + np_data = np.array(results[0]) + print("Inference Shape: ", np_data.shape) def main(use_cuda): diff --git a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py index 244c1749cd..8586ad4dfe 100644 --- a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py +++ b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py @@ -165,25 +165,27 @@ def infer(use_cuda, save_dirname=None, param_filename=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) - # Use fluid.io.load_inference_model to obtain the inference program desc, - # the feed_target_names (the names of variables that will be feeded - # data using feed operators), and the fetch_targets (variables that - # we want to obtain data from using fetch operators). - [inference_program, feed_target_names, fetch_targets - ] = fluid.io.load_inference_model(save_dirname, exe, param_filename) - - # The input's dimension of conv should be 4-D or 5-D. - # Use normilized image pixels as input data, which should be in the range [-1.0, 1.0]. - batch_size = 1 - tensor_img = numpy.random.uniform(-1.0, 1.0, - [batch_size, 1, 28, 28]).astype("float32") - - # Construct feed as a dictionary of {feed_target_name: feed_target_data} - # and results will contain a list of data corresponding to fetch_targets. - results = exe.run(inference_program, - feed={feed_target_names[0]: tensor_img}, - fetch_list=fetch_targets) - print("infer results: ", results[0]) + inference_scope = fluid.core.Scope() + with fluid.scope_guard(inference_scope): + # Use fluid.io.load_inference_model to obtain the inference program desc, + # the feed_target_names (the names of variables that will be feeded + # data using feed operators), and the fetch_targets (variables that + # we want to obtain data from using fetch operators). + [inference_program, feed_target_names, fetch_targets + ] = fluid.io.load_inference_model(save_dirname, exe, param_filename) + + # The input's dimension of conv should be 4-D or 5-D. + # Use normilized image pixels as input data, which should be in the range [-1.0, 1.0]. + batch_size = 1 + tensor_img = numpy.random.uniform( + -1.0, 1.0, [batch_size, 1, 28, 28]).astype("float32") + + # Construct feed as a dictionary of {feed_target_name: feed_target_data} + # and results will contain a list of data corresponding to fetch_targets. + results = exe.run(inference_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets) + print("infer results: ", results[0]) def main(use_cuda, parallel, nn_type, combine): diff --git a/python/paddle/v2/fluid/tests/book/test_recommender_system.py b/python/paddle/v2/fluid/tests/book/test_recommender_system.py index 612d51e08e..72c2fea753 100644 --- a/python/paddle/v2/fluid/tests/book/test_recommender_system.py +++ b/python/paddle/v2/fluid/tests/book/test_recommender_system.py @@ -251,13 +251,6 @@ def infer(use_cuda, save_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) - # Use fluid.io.load_inference_model to obtain the inference program desc, - # the feed_target_names (the names of variables that will be feeded - # data using feed operators), and the fetch_targets (variables that - # we want to obtain data from using fetch operators). - [inference_program, feed_target_names, - fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) - def create_lod_tensor(data, lod=None): tensor = fluid.LoDTensor() if lod is None: @@ -275,44 +268,53 @@ def infer(use_cuda, save_dirname=None): tensor.set(flattened_data, place) return tensor - # Use the first data from paddle.dataset.movielens.test() as input - assert feed_target_names[0] == "user_id" - user_id = create_lod_tensor([[1]]) - - assert feed_target_names[1] == "gender_id" - gender_id = create_lod_tensor([[1]]) - - assert feed_target_names[2] == "age_id" - age_id = create_lod_tensor([[0]]) - - assert feed_target_names[3] == "job_id" - job_id = create_lod_tensor([[10]]) - - assert feed_target_names[4] == "movie_id" - movie_id = create_lod_tensor([[783]]) - - assert feed_target_names[5] == "category_id" - category_id = create_lod_tensor([[10], [8], [9]], [[0, 3]]) - - assert feed_target_names[6] == "movie_title" - movie_title = create_lod_tensor([[1069], [4140], [2923], [710], [988]], - [[0, 5]]) - - # Construct feed as a dictionary of {feed_target_name: feed_target_data} - # and results will contain a list of data corresponding to fetch_targets. - results = exe.run(inference_program, - feed={ - feed_target_names[0]: user_id, - feed_target_names[1]: gender_id, - feed_target_names[2]: age_id, - feed_target_names[3]: job_id, - feed_target_names[4]: movie_id, - feed_target_names[5]: category_id, - feed_target_names[6]: movie_title - }, - fetch_list=fetch_targets, - return_numpy=False) - print("inferred score: ", np.array(results[0])) + inference_scope = fluid.core.Scope() + with fluid.scope_guard(inference_scope): + # Use fluid.io.load_inference_model to obtain the inference program desc, + # the feed_target_names (the names of variables that will be feeded + # data using feed operators), and the fetch_targets (variables that + # we want to obtain data from using fetch operators). + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + + # Use the first data from paddle.dataset.movielens.test() as input + assert feed_target_names[0] == "user_id" + user_id = create_lod_tensor([[1]]) + + assert feed_target_names[1] == "gender_id" + gender_id = create_lod_tensor([[1]]) + + assert feed_target_names[2] == "age_id" + age_id = create_lod_tensor([[0]]) + + assert feed_target_names[3] == "job_id" + job_id = create_lod_tensor([[10]]) + + assert feed_target_names[4] == "movie_id" + movie_id = create_lod_tensor([[783]]) + + assert feed_target_names[5] == "category_id" + category_id = create_lod_tensor([[10], [8], [9]], [[0, 3]]) + + assert feed_target_names[6] == "movie_title" + movie_title = create_lod_tensor([[1069], [4140], [2923], [710], [988]], + [[0, 5]]) + + # Construct feed as a dictionary of {feed_target_name: feed_target_data} + # and results will contain a list of data corresponding to fetch_targets. + results = exe.run(inference_program, + feed={ + feed_target_names[0]: user_id, + feed_target_names[1]: gender_id, + feed_target_names[2]: age_id, + feed_target_names[3]: job_id, + feed_target_names[4]: movie_id, + feed_target_names[5]: category_id, + feed_target_names[6]: movie_title + }, + fetch_list=fetch_targets, + return_numpy=False) + print("inferred score: ", np.array(results[0])) def main(use_cuda): diff --git a/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py b/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py index 1776128813..a879e110f7 100644 --- a/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py +++ b/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py @@ -144,30 +144,32 @@ def infer(word_dict, use_cuda, save_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) - # Use fluid.io.load_inference_model to obtain the inference program desc, - # the feed_target_names (the names of variables that will be feeded - # data using feed operators), and the fetch_targets (variables that - # we want to obtain data from using fetch operators). - [inference_program, feed_target_names, - fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) - - word_dict_len = len(word_dict) - - lod = [0, 4, 10] - tensor_words = create_random_lodtensor( - lod, place, low=0, high=word_dict_len - 1) - - # Construct feed as a dictionary of {feed_target_name: feed_target_data} - # and results will contain a list of data corresponding to fetch_targets. - assert feed_target_names[0] == "words" - results = exe.run(inference_program, - feed={feed_target_names[0]: tensor_words}, - fetch_list=fetch_targets, - return_numpy=False) - print(results[0].lod()) - np_data = np.array(results[0]) - print("Inference Shape: ", np_data.shape) - print("Inference results: ", np_data) + inference_scope = fluid.core.Scope() + with fluid.scope_guard(inference_scope): + # Use fluid.io.load_inference_model to obtain the inference program desc, + # the feed_target_names (the names of variables that will be feeded + # data using feed operators), and the fetch_targets (variables that + # we want to obtain data from using fetch operators). + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + + word_dict_len = len(word_dict) + + lod = [0, 4, 10] + tensor_words = create_random_lodtensor( + lod, place, low=0, high=word_dict_len - 1) + + # Construct feed as a dictionary of {feed_target_name: feed_target_data} + # and results will contain a list of data corresponding to fetch_targets. + assert feed_target_names[0] == "words" + results = exe.run(inference_program, + feed={feed_target_names[0]: tensor_words}, + fetch_list=fetch_targets, + return_numpy=False) + print(results[0].lod()) + np_data = np.array(results[0]) + print("Inference Shape: ", np_data.shape) + print("Inference results: ", np_data) def main(word_dict, nn_type, use_cuda): diff --git a/python/paddle/v2/fluid/tests/book/test_word2vec.py b/python/paddle/v2/fluid/tests/book/test_word2vec.py index d30f623085..a9af95a7d9 100644 --- a/python/paddle/v2/fluid/tests/book/test_word2vec.py +++ b/python/paddle/v2/fluid/tests/book/test_word2vec.py @@ -138,42 +138,48 @@ def infer(use_cuda, save_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) - # Use fluid.io.load_inference_model to obtain the inference program desc, - # the feed_target_names (the names of variables that will be feeded - # data using feed operators), and the fetch_targets (variables that - # we want to obtain data from using fetch operators). - [inference_program, feed_target_names, - fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) - - word_dict = paddle.dataset.imikolov.build_dict() - dict_size = len(word_dict) - - # Setup inputs, by creating 4 words, the lod of which should be [0, 1] - lod = [0, 1] - first_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1) - second_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1) - third_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1) - fourth_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1) - - assert feed_target_names[0] == 'firstw' - assert feed_target_names[1] == 'secondw' - assert feed_target_names[2] == 'thirdw' - assert feed_target_names[3] == 'forthw' - - # Construct feed as a dictionary of {feed_target_name: feed_target_data} - # and results will contain a list of data corresponding to fetch_targets. - results = exe.run(inference_program, - feed={ - feed_target_names[0]: first_word, - feed_target_names[1]: second_word, - feed_target_names[2]: third_word, - feed_target_names[3]: fourth_word - }, - fetch_list=fetch_targets, - return_numpy=False) - print(results[0].lod()) - np_data = np.array(results[0]) - print("Inference Shape: ", np_data.shape) + inference_scope = fluid.core.Scope() + with fluid.scope_guard(inference_scope): + # Use fluid.io.load_inference_model to obtain the inference program desc, + # the feed_target_names (the names of variables that will be feeded + # data using feed operators), and the fetch_targets (variables that + # we want to obtain data from using fetch operators). + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + + word_dict = paddle.dataset.imikolov.build_dict() + dict_size = len(word_dict) + + # Setup inputs, by creating 4 words, the lod of which should be [0, 1] + lod = [0, 1] + first_word = create_random_lodtensor( + lod, place, low=0, high=dict_size - 1) + second_word = create_random_lodtensor( + lod, place, low=0, high=dict_size - 1) + third_word = create_random_lodtensor( + lod, place, low=0, high=dict_size - 1) + fourth_word = create_random_lodtensor( + lod, place, low=0, high=dict_size - 1) + + assert feed_target_names[0] == 'firstw' + assert feed_target_names[1] == 'secondw' + assert feed_target_names[2] == 'thirdw' + assert feed_target_names[3] == 'forthw' + + # Construct feed as a dictionary of {feed_target_name: feed_target_data} + # and results will contain a list of data corresponding to fetch_targets. + results = exe.run(inference_program, + feed={ + feed_target_names[0]: first_word, + feed_target_names[1]: second_word, + feed_target_names[2]: third_word, + feed_target_names[3]: fourth_word + }, + fetch_list=fetch_targets, + return_numpy=False) + print(results[0].lod()) + np_data = np.array(results[0]) + print("Inference Shape: ", np_data.shape) def main(use_cuda, is_sparse, is_parallel): -- GitLab From 8c302d4845b579a7d90d8123f5f31d7b2b4eb193 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Mon, 12 Feb 2018 16:19:37 +0800 Subject: [PATCH 084/217] remove kwargs in layer apis --- python/paddle/v2/fluid/layers/nn.py | 54 ++++++++++++++--------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index 5ebd329fc0..8d1cd85fce 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -829,12 +829,12 @@ def crf_decoding(input, param_attr, label=None): return viterbi_path -def cos_sim(X, Y, **kwargs): +def cos_sim(X, Y): """ This function performs the cosine similarity between two tensors X and Y and returns that as the output. """ - helper = LayerHelper('cos_sim', **kwargs) + helper = LayerHelper('cos_sim', **locals()) out = helper.create_tmp_variable(dtype=X.dtype) xnorm = helper.create_tmp_variable(dtype=X.dtype) ynorm = helper.create_tmp_variable(dtype=X.dtype) @@ -848,7 +848,7 @@ def cos_sim(X, Y, **kwargs): return out -def dropout(x, dropout_prob, is_test=False, seed=None, **kwargs): +def dropout(x, dropout_prob, is_test=False, seed=None): """ Computes dropout. @@ -877,7 +877,7 @@ def dropout(x, dropout_prob, is_test=False, seed=None, **kwargs): droped = fluid.layers.dropout(input=x, dropout_rate=0.5) """ - helper = LayerHelper('dropout', **kwargs) + helper = LayerHelper('dropout', **locals()) out = helper.create_tmp_variable(dtype=x.dtype) mask = helper.create_tmp_variable(dtype=x.dtype, stop_gradient=True) helper.append_op( @@ -894,7 +894,7 @@ def dropout(x, dropout_prob, is_test=False, seed=None, **kwargs): return out -def cross_entropy(input, label, **kwargs): +def cross_entropy(input, label, soft_label=False): """ **Cross Entropy Layer** @@ -903,15 +903,15 @@ def cross_entropy(input, label, **kwargs): computation. 1) One-hot cross-entropy: - `soft_label = False`, `Label[i, 0]` indicates the class index for sample i: + `soft_label = False`, `Label[i, 0]` indicates the class index for sample i: .. math:: Y[i] = -\log(X[i, Label[i]]) 2) Soft-label cross-entropy: - `soft_label = True`, `Label[i, j]` indicates the soft label of class j - for sample i: + `soft_label = True`, `Label[i, j]` indicates the soft label of class j + for sample i: .. math:: @@ -921,8 +921,8 @@ def cross_entropy(input, label, **kwargs): equals one. 3) One-hot cross-entropy with vecterized `label`: - As a special case of 2), when each row of 'label' has only one - non-zero element which is equal to 1, soft-label cross-entropy degenerates + As a special case of 2), when each row of 'label' has only one + non-zero element which is equal to 1, soft-label cross-entropy degenerates to a one-hot cross-entropy with one-hot label representation. Args: @@ -936,7 +936,7 @@ def cross_entropy(input, label, **kwargs): tensor with shape [N x 1]. When `soft_label` is set to `True`, `label` is a tensor with shape [N x D]. - soft_label (bool, via `**kwargs`): a flag indicating whether to + soft_label (bool): a flag indicating whether to interpretate the given labels as soft labels, default `False`. @@ -956,18 +956,18 @@ def cross_entropy(input, label, **kwargs): predict = fluid.layers.fc(input=net, size=classdim, act='softmax') cost = fluid.layers.cross_entropy(input=predict, label=label) """ - helper = LayerHelper('cross_entropy', **kwargs) + helper = LayerHelper('cross_entropy', **locals()) out = helper.create_tmp_variable(dtype=input.dtype) helper.append_op( type='cross_entropy', inputs={'X': [input], 'Label': [label]}, outputs={'Y': [out]}, - attrs=kwargs) + attrs={"soft_label": soft_label}) return out -def square_error_cost(input, label, **kwargs): +def square_error_cost(input, label): """ **Square error cost layer** @@ -1002,7 +1002,7 @@ def square_error_cost(input, label, **kwargs): cost = layers.square_error_cost(input=y_predict, label=y) """ - helper = LayerHelper('square_error_cost', **kwargs) + helper = LayerHelper('square_error_cost', **locals()) minus_out = helper.create_tmp_variable(dtype=input.dtype) helper.append_op( type='elementwise_sub', @@ -1017,12 +1017,12 @@ def square_error_cost(input, label, **kwargs): return square_out -def accuracy(input, label, k=1, correct=None, total=None, **kwargs): +def accuracy(input, label, k=1, correct=None, total=None): """ This function computes the accuracy using the input and label. The output is the top_k inputs and their indices. """ - helper = LayerHelper("accuracy", **kwargs) + helper = LayerHelper("accuracy", **locals()) topk_out = helper.create_tmp_variable(dtype=input.dtype) topk_indices = helper.create_tmp_variable(dtype="int64") helper.append_op( @@ -1055,13 +1055,12 @@ def chunk_eval(input, label, chunk_scheme, num_chunk_types, - excluded_chunk_types=None, - **kwargs): + excluded_chunk_types=None): """ This function computes and outputs the precision, recall and F1-score of chunk detection. """ - helper = LayerHelper("chunk_eval", **kwargs) + helper = LayerHelper("chunk_eval", **locals()) # prepare output precision = helper.create_tmp_variable(dtype="float32") @@ -1293,7 +1292,7 @@ def conv2d(input, return helper.append_activation(pre_act) -def sequence_pool(input, pool_type, **kwargs): +def sequence_pool(input, pool_type): """ This function add the operator for sequence pooling. It pools features of all time-steps of each instance, and is applied @@ -1343,7 +1342,7 @@ def sequence_pool(input, pool_type, **kwargs): sqrt_x = fluid.layers.sequence_pool(input=x, pool_type='sqrt') max_x = fluid.layers.sequence_pool(input=x, pool_type='max') """ - helper = LayerHelper('sequence_pool', input=input, **kwargs) + helper = LayerHelper('sequence_pool', **locals()) dtype = helper.input_dtype() pool_out = helper.create_tmp_variable(dtype) max_index = helper.create_tmp_variable(dtype) @@ -1363,7 +1362,7 @@ def sequence_pool(input, pool_type, **kwargs): return pool_out -def sequence_first_step(input, **kwargs): +def sequence_first_step(input): """ This funciton get the first step of sequence. @@ -1396,7 +1395,7 @@ def sequence_first_step(input, **kwargs): return sequence_pool(input=input, pool_type="first") -def sequence_last_step(input, **kwargs): +def sequence_last_step(input): """ This funciton get the last step of sequence. @@ -2336,7 +2335,8 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None): normed = fluid.layers.l2_normalize(x=data, axis=1) """ - if len(x.shape) == 1: axis = 0 + if len(x.shape) == 1: + axis = 0 helper = LayerHelper("l2_normalize", **locals()) @@ -2654,7 +2654,7 @@ def ctc_greedy_decoder(input, blank, name=None): return ctc_out -def warpctc(input, label, blank=0, norm_by_times=False, **kwargs): +def warpctc(input, label, blank=0, norm_by_times=False): """ An operator integrating the open source Warp-CTC library (https://github.com/baidu-research/warp-ctc) @@ -2695,7 +2695,7 @@ def warpctc(input, label, blank=0, norm_by_times=False, **kwargs): cost = layers.warpctc(input=y_predict, label=y) """ - helper = LayerHelper('warpctc', **kwargs) + helper = LayerHelper('warpctc', **locals()) loss_out = helper.create_tmp_variable(dtype=input.dtype) grad_out = helper.create_tmp_variable(dtype=input.dtype) helper.append_op( -- GitLab From 057efd1709db21744d672e3b1db74da561bd77ae Mon Sep 17 00:00:00 2001 From: qingqing01 Date: Mon, 12 Feb 2018 16:21:44 +0800 Subject: [PATCH 085/217] Implement multibox loss wrapper for SSD in Python API. (#8385) * Implement multibox loss wrapper in Python API. * Add some wrappers for SSD detection. * Fix conflicts. * Add unit testing for SSD loss wrapper. * Update doc in Python API. * Refine unit testing. * Add more unit testing and update some interface arguments. --- .../fluid/operators/mine_hard_examples_op.cc | 2 + python/paddle/v2/fluid/layers/__init__.py | 3 - python/paddle/v2/fluid/layers/detection.py | 374 +++++++++++++++++- .../paddle/v2/fluid/tests/test_detection.py | 81 +++- 4 files changed, 424 insertions(+), 36 deletions(-) diff --git a/paddle/fluid/operators/mine_hard_examples_op.cc b/paddle/fluid/operators/mine_hard_examples_op.cc index 73a6c0b679..540cf86741 100644 --- a/paddle/fluid/operators/mine_hard_examples_op.cc +++ b/paddle/fluid/operators/mine_hard_examples_op.cc @@ -237,6 +237,8 @@ class MineHardExamplesOp : public framework::OperatorWithKernel { } ctx->SetOutputDim("UpdatedMatchIndices", idx_dims); + // The first dimension of NegIndices will be set correcttly in Compute. + ctx->SetOutputDim("NegIndices", {-1, 1}); } protected: diff --git a/python/paddle/v2/fluid/layers/__init__.py b/python/paddle/v2/fluid/layers/__init__.py index cfbbf710b6..f4fb2ca279 100644 --- a/python/paddle/v2/fluid/layers/__init__.py +++ b/python/paddle/v2/fluid/layers/__init__.py @@ -16,8 +16,6 @@ import ops from ops import * import nn from nn import * -import detection -from detection import * import io from io import * import tensor @@ -33,7 +31,6 @@ from detection import * __all__ = [] __all__ += math_op_patch.__all__ -__all__ += detection.__all__ __all__ += nn.__all__ __all__ += io.__all__ __all__ += tensor.__all__ diff --git a/python/paddle/v2/fluid/layers/detection.py b/python/paddle/v2/fluid/layers/detection.py index 0f3256d765..659ebd5f76 100644 --- a/python/paddle/v2/fluid/layers/detection.py +++ b/python/paddle/v2/fluid/layers/detection.py @@ -1,10 +1,10 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -15,17 +15,31 @@ All layers just related to the detection neural network. """ +from layer_function_generator import generate_layer_fn from ..layer_helper import LayerHelper -from ..framework import Variable -from tensor import concat -from ops import reshape +import nn +import ops +import tensor import math __all__ = [ - 'detection_output', 'prior_box', + 'bipartite_match', + 'target_assign', + 'detection_output', + 'ssd_loss', ] +__auto__ = [ + 'iou_similarity', + 'box_coder', +] + +__all__ += __auto__ + +for _OP in set(__auto__): + globals()[_OP] = generate_layer_fn(_OP) + def detection_output(scores, loc, @@ -95,18 +109,13 @@ def detection_output(scores, """ helper = LayerHelper("detection_output", **locals()) - decoded_box = helper.create_tmp_variable(dtype=loc.dtype) - helper.append_op( - type="box_coder", - inputs={ - 'PriorBox': prior_box, - 'PriorBoxVar': prior_box_var, - 'TargetBox': loc - }, - outputs={'OutputBox': decoded_box}, - attrs={'code_type': 'decode_center_size'}) - nmsed_outs = helper.create_tmp_variable(dtype=decoded_box.dtype) + decoded_box = box_coder( + prior_box=prior_box, + prior_box_var=prior_box_var, + target_box=loc, + code_type='decode_center_size') + nmsed_outs = helper.create_tmp_variable(dtype=decoded_box.dtype) helper.append_op( type="multiclass_nms", inputs={'Scores': scores, @@ -246,7 +255,7 @@ def prior_box(inputs, new_shape = [ -1, reduce(lambda x, y: x * y, input.shape[axis:len(input.shape)]) ] - out = reshape(x=input, shape=new_shape) + out = ops.reshape(x=input, shape=new_shape) return out assert isinstance(inputs, list), 'inputs should be a list.' @@ -322,7 +331,332 @@ def prior_box(inputs, reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3)) reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3)) - box = concat(reshaped_boxes) - var = concat(reshaped_vars) + box = tensor.concat(reshaped_boxes) + var = tensor.concat(reshaped_vars) return box, var + + +def bipartite_match(dist_matrix, name=None): + """ + **Bipartite matchint operator** + + This operator is a greedy bipartite matching algorithm, which is used to + obtain the matching with the maximum distance based on the input + distance matrix. For input 2D matrix, the bipartite matching algorithm can + find the matched column for each row, also can find the matched row for + each column. And this operator only calculate matched indices from column + to row. For each instance, the number of matched indices is the number of + of columns of the input ditance matrix. + + There are two outputs to save matched indices and distance. + A simple description, this algothrim matched the best (maximum distance) + row entity to the column entity and the matched indices are not duplicated + in each row of ColToRowMatchIndices. If the column entity is not matched + any row entity, set -1 in ColToRowMatchIndices. + + Please note that the input DistMat can be LoDTensor (with LoD) or Tensor. + If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size. + If Tensor, the height of ColToRowMatchIndices is 1. + + Args: + dist_matrix(Variable): This input is a 2-D LoDTensor with shape + [K, M]. It is pair-wise distance matrix between the entities + represented by each row and each column. For example, assumed one + entity is A with shape [K], another entity is B with shape [M]. The + dist_matirx[i][j] is the distance between A[i] and B[j]. The bigger + the distance is, the better macthing the pairs are. Please note, + This tensor can contain LoD information to represent a batch of + inputs. One instance of this batch can contain different numbers of + entities. + Returns: + match_indices(Variable): A 2-D Tensor with shape [N, M] in int type. + N is the batch size. If match_indices[i][j] is -1, it + means B[j] does not match any entity in i-th instance. + Otherwise, it means B[j] is matched to row + match_indices[i][j] in i-th instance. The row number of + i-th instance is saved in match_indices[i][j]. + match_distance(Variable): A 2-D Tensor with shape [N, M] in float type. + N is batch size. If match_indices[i][j] is -1, + match_distance[i][j] is also -1.0. Otherwise, assumed + match_distance[i][j] = d, and the row offsets of each instance + are called LoD. Then match_distance[i][j] = dist_matrix[d+LoD[i]][j]. + """ + helper = LayerHelper('bipartite_match', **locals()) + match_indices = helper.create_tmp_variable(dtype='int32') + match_distance = helper.create_tmp_variable(dtype=dist_matrix.dtype) + helper.append_op( + type='bipartite_match', + inputs={'DistMat': dist_matrix}, + outputs={ + 'ColToRowMatchIndices': match_indices, + 'ColToRowMatchDist': match_distance + }) + return match_indices, match_distance + + +def target_assign(input, + matched_indices, + negative_indices=None, + mismatch_value=None, + name=None): + """ + **Target assigner operator** + + This operator can be, for given the target bounding boxes or labels, + to assign classification and regression targets to each prediction as well as + weights to prediction. The weights is used to specify which prediction would + not contribute to training loss. + + For each instance, the output `out` and`out_weight` are assigned based on + `match_indices` and `negative_indices`. + Assumed that the row offset for each instance in `input` is called lod, + this operator assigns classification/regression targets by performing the + following steps: + + 1. Assigning all outpts based on `match_indices`: + + If id = match_indices[i][j] > 0, + + out[i][j][0 : K] = X[lod[i] + id][j % P][0 : K] + out_weight[i][j] = 1. + + Otherwise, + + out[j][j][0 : K] = {mismatch_value, mismatch_value, ...} + out_weight[i][j] = 0. + + 2. Assigning out_weight based on `neg_indices` if `neg_indices` is provided: + + Assumed that the row offset for each instance in `neg_indices` is called neg_lod, + for i-th instance and each `id` of neg_indices in this instance: + + out[i][id][0 : K] = {mismatch_value, mismatch_value, ...} + out_weight[i][id] = 1.0 + + Args: + inputs (Variable): This input is a 3D LoDTensor with shape [M, P, K]. + matched_indices (Variable): Tensor), The input matched indices + is 2D Tenosr with shape [N, P], If MatchIndices[i][j] is -1, + the j-th entity of column is not matched to any entity of row in + i-th instance. + negative_indices (Variable): The input negative example indices are + an optional input with shape [Neg, 1] and int32 type, where Neg is + the total number of negative example indices. + mismatch_value (float32): Fill this value to the mismatched location. + + Returns: + out (Variable): The output is a 3D Tensor with shape [N, P, K], + N and P is the same as they are in `neg_indices`, K is the + same as it in input of X. If `match_indices[i][j]`. + out_weight (Variable): The weight for output with the shape of [N, P, 1]. + """ + helper = LayerHelper('target_assign', **locals()) + out = helper.create_tmp_variable(dtype=input.dtype) + out_weight = helper.create_tmp_variable(dtype='float32') + helper.append_op( + type='target_assign', + inputs={ + 'X': input, + 'MatchIndices': matched_indices, + 'NegIndices': negative_indices + }, + outputs={'Out': out, + 'OutWeight': out_weight}, + attrs={'mismatch_value': mismatch_value}) + return out, out_weight + + +def ssd_loss(location, + confidence, + gt_box, + gt_label, + prior_box, + prior_box_var=None, + background_label=0, + overlap_threshold=0.5, + neg_pos_ratio=3.0, + neg_overlap=0.5, + loc_loss_weight=1.0, + conf_loss_weight=1.0, + match_type='per_prediction', + mining_type='max_negative', + sample_size=None): + """ + **Multi-box loss layer for object dection algorithm of SSD** + + This layer is to compute dection loss for SSD given the location offset + predictions, confidence predictions, prior boxes and ground-truth boudding + boxes and labels, and the type of hard example mining. The returned loss + is a weighted sum of the localization loss (or regression loss) and + confidence loss (or classification loss) by performing the following steps: + + 1. Find matched boundding box by bipartite matching algorithm. + 1.1 Compute IOU similarity between ground-truth boxes and prior boxes. + 1.2 Compute matched boundding box by bipartite matching algorithm. + 2. Compute confidence for mining hard examples + 2.1. Get the target label based on matched indices. + 2.2. Compute confidence loss. + 3. Apply hard example mining to get the negative example indices and update + the matched indices. + 4. Assign classification and regression targets + 4.1. Encoded bbox according to the prior boxes. + 4.2. Assign regression targets. + 4.3. Assign classification targets. + 5. Compute the overall objective loss. + 5.1 Compute confidence loss. + 5.1 Compute localization loss. + 5.3 Compute the overall weighted loss. + + Args: + location (Variable): The location predictions are a 3D Tensor with + shape [N, Np, 4], N is the batch size, Np is total number of + predictions for each instance. 4 is the number of coordinate values, + the layout is [xmin, ymin, xmax, ymax]. + confidence (Variable): The confidence predictions are a 3D Tensor + with shape [N, Np, C], N and Np are the same as they are in + `location`, C is the class number. + gt_box (Variable): The ground-truth boudding boxes (bboxes) are a 2D + LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth + bboxes of mini-batch input. + gt_label (Variable): The ground-truth labels are a 2D LoDTensor + with shape [Ng, 1]. + prior_box (Variable): The prior boxes are a 2D Tensor with shape [Np, 4]. + prior_box_var (Variable): The variance of prior boxes are a 2D Tensor + with shape [Np, 4]. + background_label (int): The index of background label, 0 by default. + overlap_threshold (float): If match_type is 'per_prediction', use + `overlap_threshold` to determine the extra matching bboxes when + finding matched boxes. 0.5 by default. + neg_pos_ratio (float): The ratio of the negative boxes to the positive + boxes, used only when mining_type is max_negative, 3.0 by defalut. + neg_overlap (float): The negative overlap upper bound for the unmatched + predictions. Use only when mining_type is max_negative, + 0.5 by default. + sample_size (int): The max sample size of negative box, used only when + mining_type is hard_example. + loc_loss_weight (float): Weight for localization loss, 1.0 by default. + conf_loss_weight (float): Weight for confidence loss, 1.0 by default. + match_type (str): The type of matching method during training, should + be 'bipartite' or 'per_prediction'. + mining_type (str): The hard example mining type, should be 'hard_example' + or 'max_negative', now only support `max_negative`. + + Returns: + Variable: The weighted sum of the localization loss and confidence loss, + with shape [N * Np, 1], N and Np are the same as they are + in `location`. + + Raises: + ValueError: If mining_type is 'hard_example', now only support + mining type of `max_negative`. + + Examples: + .. code-block:: python + + pb = layers.data( + name='prior_box', + shape=[10, 4], + append_batch_size=False, + dtype='float32') + pbv = layers.data( + name='prior_box_var', + shape=[10, 4], + append_batch_size=False, + dtype='float32') + loc = layers.data(name='target_box', shape=[10, 4], dtype='float32') + scores = layers.data(name='scores', shape=[10, 21], dtype='float32') + gt_box = layers.data( + name='gt_box', shape=[4], lod_level=1, dtype='float32') + gt_label = layers.data( + name='gt_label', shape=[1], lod_level=1, dtype='float32') + loss = layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv) + """ + + helper = LayerHelper('ssd_loss', **locals()) + if mining_type != 'max_negative': + raise ValueError("Only support mining_type == max_negative now.") + + num, num_prior, num_class = confidence.shape + + def __reshape_to_2d(var): + return ops.reshape(x=var, shape=[-1, var.shape[-1]]) + + # 1. Find matched boundding box by prior box. + # 1.1 Compute IOU similarity between ground-truth boxes and prior boxes. + iou = iou_similarity(x=gt_box, y=prior_box) + # 1.2 Compute matched boundding box by bipartite matching algorithm. + matched_indices, matched_dist = bipartite_match(iou) + + # 2. Compute confidence for mining hard examples + # 2.1. Get the target label based on matched indices + gt_label = ops.reshape(x=gt_label, shape=gt_label.shape + (1, )) + target_label, _ = target_assign( + gt_label, matched_indices, mismatch_value=background_label) + # 2.2. Compute confidence loss. + # Reshape confidence to 2D tensor. + confidence = __reshape_to_2d(confidence) + target_label = tensor.cast(x=target_label, dtype='int64') + target_label = __reshape_to_2d(target_label) + conf_loss = nn.softmax_with_cross_entropy(confidence, target_label) + + # 3. Mining hard examples + conf_loss = ops.reshape(x=conf_loss, shape=(num, num_prior)) + neg_indices = helper.create_tmp_variable(dtype='int32') + dtype = matched_indices.dtype + updated_matched_indices = helper.create_tmp_variable(dtype=dtype) + helper.append_op( + type='mine_hard_examples', + inputs={ + 'ClsLoss': conf_loss, + 'LocLoss': None, + 'MatchIndices': matched_indices, + 'MatchDist': matched_dist, + }, + outputs={ + 'NegIndices': neg_indices, + 'UpdatedMatchIndices': updated_matched_indices + }, + attrs={ + 'neg_pos_ratio': neg_pos_ratio, + 'neg_dist_threshold': neg_pos_ratio, + 'mining_type': mining_type, + 'sample_size': sample_size, + }) + + # 4. Assign classification and regression targets + # 4.1. Encoded bbox according to the prior boxes. + encoded_bbox = box_coder( + prior_box=prior_box, + prior_box_var=prior_box_var, + target_box=gt_box, + code_type='encode_center_size') + # 4.2. Assign regression targets + target_bbox, target_loc_weight = target_assign( + encoded_bbox, updated_matched_indices, mismatch_value=background_label) + # 4.3. Assign classification targets + target_label, target_conf_weight = target_assign( + gt_label, + updated_matched_indices, + negative_indices=neg_indices, + mismatch_value=background_label) + + # 5. Compute loss. + # 5.1 Compute confidence loss. + target_label = __reshape_to_2d(target_label) + target_label = tensor.cast(x=target_label, dtype='int64') + conf_loss = nn.softmax_with_cross_entropy(confidence, target_label) + target_conf_weight = __reshape_to_2d(target_conf_weight) + conf_loss = conf_loss * target_conf_weight + + # 5.2 Compute regression loss. + location = __reshape_to_2d(location) + target_bbox = __reshape_to_2d(target_bbox) + + loc_loss = nn.smooth_l1(location, target_bbox) + target_loc_weight = __reshape_to_2d(target_loc_weight) + loc_loss = loc_loss * target_loc_weight + + # 5.3 Compute overall weighted loss. + loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss + return loss diff --git a/python/paddle/v2/fluid/tests/test_detection.py b/python/paddle/v2/fluid/tests/test_detection.py index fecc2a6226..b731fc9b02 100644 --- a/python/paddle/v2/fluid/tests/test_detection.py +++ b/python/paddle/v2/fluid/tests/test_detection.py @@ -13,16 +13,12 @@ # limitations under the License. from __future__ import print_function -import paddle.v2.fluid as fluid -import paddle.v2.fluid.core as core import paddle.v2.fluid.layers as layers -import paddle.v2.fluid.layers.detection as detection from paddle.v2.fluid.framework import Program, program_guard import unittest -import numpy as np -class TestBook(unittest.TestCase): +class TestDetection(unittest.TestCase): def test_detection_output(self): program = Program() with program_guard(program): @@ -49,6 +45,66 @@ class TestBook(unittest.TestCase): out = layers.detection_output( scores=scores, loc=loc, prior_box=pb, prior_box_var=pbv) self.assertIsNotNone(out) + self.assertEqual(out.shape[-1], 6) + print(str(program)) + + def test_detection_api(self): + program = Program() + with program_guard(program): + x = layers.data(name='x', shape=[4], dtype='float32') + y = layers.data(name='y', shape=[4], dtype='float32') + z = layers.data(name='z', shape=[4], dtype='float32', lod_level=1) + iou = layers.iou_similarity(x=x, y=y) + bcoder = layers.box_coder( + prior_box=x, + prior_box_var=y, + target_box=z, + code_type='encode_center_size') + self.assertIsNotNone(iou) + self.assertIsNotNone(bcoder) + + matched_indices, matched_dist = layers.bipartite_match(iou) + self.assertIsNotNone(matched_indices) + self.assertIsNotNone(matched_dist) + + gt = layers.data( + name='gt', shape=[1, 1], dtype='int32', lod_level=1) + trg, trg_weight = layers.target_assign( + gt, matched_indices, mismatch_value=0) + self.assertIsNotNone(trg) + self.assertIsNotNone(trg_weight) + + gt2 = layers.data( + name='gt2', shape=[10, 4], dtype='float32', lod_level=1) + trg, trg_weight = layers.target_assign( + gt2, matched_indices, mismatch_value=0) + self.assertIsNotNone(trg) + self.assertIsNotNone(trg_weight) + + print(str(program)) + + def test_ssd_loss(self): + program = Program() + with program_guard(program): + pb = layers.data( + name='prior_box', + shape=[10, 4], + append_batch_size=False, + dtype='float32') + pbv = layers.data( + name='prior_box_var', + shape=[10, 4], + append_batch_size=False, + dtype='float32') + loc = layers.data(name='target_box', shape=[10, 4], dtype='float32') + scores = layers.data(name='scores', shape=[10, 21], dtype='float32') + gt_box = layers.data( + name='gt_box', shape=[4], lod_level=1, dtype='float32') + gt_label = layers.data( + name='gt_label', shape=[1], lod_level=1, dtype='int32') + loss = layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv) + self.assertIsNotNone(loss) + self.assertEqual(loss.shape[-1], 1) print(str(program)) @@ -62,40 +118,39 @@ class TestPriorBox(unittest.TestCase): assert box.shape[1] == 4 def prior_box_output(self, data_shape): - images = fluid.layers.data( - name='pixel', shape=data_shape, dtype='float32') - conv1 = fluid.layers.conv2d( + images = layers.data(name='pixel', shape=data_shape, dtype='float32') + conv1 = layers.conv2d( input=images, num_filters=3, filter_size=3, stride=2, use_cudnn=False) - conv2 = fluid.layers.conv2d( + conv2 = layers.conv2d( input=conv1, num_filters=3, filter_size=3, stride=2, use_cudnn=False) - conv3 = fluid.layers.conv2d( + conv3 = layers.conv2d( input=conv2, num_filters=3, filter_size=3, stride=2, use_cudnn=False) - conv4 = fluid.layers.conv2d( + conv4 = layers.conv2d( input=conv3, num_filters=3, filter_size=3, stride=2, use_cudnn=False) - conv5 = fluid.layers.conv2d( + conv5 = layers.conv2d( input=conv4, num_filters=3, filter_size=3, stride=2, use_cudnn=False) - box, var = detection.prior_box( + box, var = layers.prior_box( inputs=[conv1, conv2, conv3, conv4, conv5, conv5], image=images, min_ratio=20, -- GitLab From 84d9c6907f96a896b5b366d0bacbcd3f79d9020a Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Mon, 12 Feb 2018 15:52:08 +0800 Subject: [PATCH 086/217] follow comments of yaming and qingqing --- python/paddle/v2/fluid/layers/detection.py | 183 ++++++++++----------- python/paddle/v2/fluid/nets.py | 33 ---- 2 files changed, 86 insertions(+), 130 deletions(-) diff --git a/python/paddle/v2/fluid/layers/detection.py b/python/paddle/v2/fluid/layers/detection.py index aab9f032bd..6d0f12f475 100644 --- a/python/paddle/v2/fluid/layers/detection.py +++ b/python/paddle/v2/fluid/layers/detection.py @@ -151,36 +151,36 @@ def prior_box(inputs, `_ . Args: - inputs(list): The list of input Variables, the format + inputs(list|tuple): The list of input Variables, the format of all Variables is NCHW. image(Variable): The input image data of PriorBoxOp, the layout is NCHW. min_ratio(int): the min ratio of generated prior boxes. max_ratio(int): the max ratio of generated prior boxes. - aspect_ratios(list): the aspect ratios of generated prior + aspect_ratios(list|tuple): the aspect ratios of generated prior boxes. The length of input and aspect_ratios must be equal. base_size(int): the base_size is used to get min_size and max_size according to min_ratio and max_ratio. - step_w(list, optional, default=None): Prior boxes step + step_w(list|tuple|None): Prior boxes step across width. If step_w[i] == 0.0, the prior boxes step across width of the inputs[i] will be automatically calculated. - step_h(list, optional, default=None): Prior boxes step + step_h(list|tuple|None): Prior boxes step across height, If step_h[i] == 0.0, the prior boxes step across height of the inputs[i] will be automatically calculated. offset(float, optional, default=0.5): Prior boxes center offset. - variance(list, optional, default=[0.1, 0.1, 0.1, 0.1]): the variances + variance(list|tuple|[0.1, 0.1, 0.1, 0.1]): the variances to be encoded in prior boxes. - flip(bool, optional, default=False): Whether to flip + flip(bool|False): Whether to flip aspect ratios. clip(bool, optional, default=False): Whether to clip out-of-boundary boxes. - min_sizes(list, optional, default=None): If `len(inputs) <=2`, + min_sizes(list|tuple|None): If `len(inputs) <=2`, min_sizes must be set up, and the length of min_sizes should equal to the length of inputs. - max_sizes(list, optional, default=None): If `len(inputs) <=2`, + max_sizes(list|tuple|None): If `len(inputs) <=2`, max_sizes must be set up, and the length of min_sizes should equal to the length of inputs. - name(str, optional, None): Name of the prior box layer. + name(str|None): Name of the prior box layer. Returns: boxes(Variable): the output prior boxes of PriorBox. @@ -252,7 +252,16 @@ def prior_box(inputs, out = ops.reshape(x=input, shape=new_shape) return out - assert isinstance(inputs, list), 'inputs should be a list.' + def _is_list_or_tuple_(data): + return (isinstance(data, list) or isinstance(data, tuple)) + + def _is_list_or_tuple_and_equal(data, length, err_info): + if not (_is_list_or_tuple_(data) and len(data) == length): + raise ValueError(err_info) + + if not _is_list_or_tuple_(inputs): + raise ValueError('inputs should be a list or tuple.') + num_layer = len(inputs) if num_layer <= 2: @@ -269,26 +278,25 @@ def prior_box(inputs, max_sizes = [base_size * .20] + max_sizes if aspect_ratios: - if not (isinstance(aspect_ratios, list) and - len(aspect_ratios) == num_layer): - raise ValueError( - 'aspect_ratios should be list and the length of inputs ' - 'and aspect_ratios should be the same.') + _is_list_or_tuple_and_equal( + aspect_ratios, num_layer, + 'aspect_ratios should be list and the length of inputs ' + 'and aspect_ratios should be the same.') if step_h: - if not (isinstance(step_h, list) and len(step_h) == num_layer): - raise ValueError( - 'step_h should be list and the length of inputs and ' - 'step_h should be the same.') + _is_list_or_tuple_and_equal( + step_h, num_layer, + 'step_h should be list and the length of inputs and ' + 'step_h should be the same.') if step_w: - if not (isinstance(step_w, list) and len(step_w) == num_layer): - raise ValueError( - 'step_w should be list and the length of inputs and ' - 'step_w should be the same.') + _is_list_or_tuple_and_equal( + step_w, num_layer, + 'step_w should be list and the length of inputs and ' + 'step_w should be the same.') if steps: - if not (isinstance(steps, list) and len(steps) == num_layer): - raise ValueError( - 'steps should be list and the length of inputs and ' - 'step_w should be the same.') + _is_list_or_tuple_and_equal( + steps, num_layer, + 'steps should be list and the length of inputs and ' + 'step_w should be the same.') step_w = steps step_h = steps @@ -298,13 +306,13 @@ def prior_box(inputs, min_size = min_sizes[i] max_size = max_sizes[i] aspect_ratio = [] - if not isinstance(min_size, list): + if not _is_list_or_tuple_(min_size): min_size = [min_size] - if not isinstance(max_size, list): + if not _is_list_or_tuple_(max_size): max_size = [max_size] if aspect_ratios: aspect_ratio = aspect_ratios[i] - if not isinstance(aspect_ratio, list): + if not _is_list_or_tuple_(aspect_ratio): aspect_ratio = [aspect_ratio] box, var = _prior_box_(input, image, min_size, max_size, aspect_ratio, @@ -354,26 +362,26 @@ def multi_box_head(inputs, MultiBox Detector)`_ . Args: - inputs(list): The list of input Variables, the format + inputs(list|tuple): The list of input Variables, the format of all Variables is NCHW. - num_classes(int): The number of calss. - min_sizes(list, optional, default=None): The length of - min_size is used to compute the the number of prior box. + num_classes(int): The number of classes. + min_sizes(list|tuple|None): The number of + min_sizes is used to compute the number of predicted box. If the min_size is None, it will be computed according to min_ratio and max_ratio. - max_sizes(list, optional, default=None): The length of max_size - is used to compute the the number of prior box. - min_ratio(int): If the min_sizes is None, min_ratio and min_ratio + max_sizes(list|tuple|None): The number of max_sizes + is used to compute the the number of predicted box. + min_ratio(int|None): If the min_sizes is None, min_ratio and max_ratio will be used to compute the min_sizes and max_sizes. - max_ratio(int): If the min_sizes is None, min_ratio and min_ratio + max_ratio(int|None): If the min_sizes is None, max_ratio and min_ratio will be used to compute the min_sizes and max_sizes. - aspect_ratios(list): The number of the aspect ratios is used to + aspect_ratios(list|tuple): The number of the aspect ratios is used to compute the number of prior box. base_size(int): the base_size is used to get min_size and max_size according to min_ratio and max_ratio. - flip(bool, optional, default=False): Whether to flip + flip(bool|False): Whether to flip aspect ratios. - name(str, optional, None): Name of the prior box layer. + name(str|None): Name of the prior box layer. Returns: @@ -397,52 +405,33 @@ def multi_box_head(inputs, flip=True) """ - def _conv_with_bn_(input, - conv_num_filter, - conv_padding=1, - conv_filter_size=3, - conv_stride=1, - conv_act=None, - param_attr=None, - conv_with_batchnorm=False, - conv_batchnorm_drop_rate=0.0, - use_cudnn=True): - - conv2d = nn.conv2d( - input=input, - num_filters=conv_num_filter, - filter_size=conv_filter_size, - padding=conv_padding, - stride=conv_stride, - param_attr=param_attr, - act=conv_act, - use_cudnn=use_cudnn) - - if conv_with_batchnorm: - conv2d = nn.batch_norm(input=conv2d) - drop_rate = conv_batchnorm_drop_rate - if abs(drop_rate) > 1e-5: - conv2d = nn.dropout(x=conv2d, dropout_prob=drop_rate) + def _is_equal_(len1, len2, err_info): + if not (len1 == len2): + raise ValueError(err_info) - return conv2d + def _is_list_or_tuple_(data): + return (isinstance(data, list) or isinstance(data, tuple)) - if not (isinstance(inputs, list)): - raise ValueError('inputs should be a list.') + if not _is_list_or_tuple_(inputs): + raise ValueError('inputs should be a list or tuple.') if min_sizes is not None: - if not (len(inputs) == len(min_sizes)): - raise ValueError('the length of min_sizes ' - 'and inputs should be the same.') + _is_equal_( + len(inputs), + len(min_sizes), 'the length of min_sizes ' + 'and inputs should be equal.') if max_sizes is not None: - if not (len(inputs) == len(max_sizes)): - raise ValueError('the length of max_sizes ' - 'and inputs should be the same.') + _is_equal_( + len(inputs), + len(max_sizes), 'the length of max_sizes ' + 'and inputs should be equal.') if aspect_ratios is not None: - if not (len(inputs) == len(aspect_ratios)): - raise ValueError('the length of aspect_ratios ' - 'and inputs should be the same.') + _is_equal_( + len(inputs), + len(aspect_ratios), 'the length of aspect_ratios ' + 'and inputs should be equal.') if min_sizes is None: # If min_sizes is None, min_sizes and max_sizes @@ -464,22 +453,23 @@ def multi_box_head(inputs, mbox_confs = [] for i, input in enumerate(inputs): min_size = min_sizes[i] - if type(min_size) is not list: + if not _is_list_or_tuple_(min_size): min_size = [min_size] max_size = [] if max_sizes is not None: max_size = max_sizes[i] - if type(max_size) is not list: + if not _is_list_or_tuple_(max_size): max_size = [max_size] - if not (len(max_size) == len(min_size)): - raise ValueError( - 'max_size and min_size should have same length.') + _is_equal_( + len(max_size), + len(min_size), + 'the length of max_size and min_size should be equal.') aspect_ratio = [] if aspect_ratios is not None: aspect_ratio = aspect_ratios[i] - if type(aspect_ratio) is not list: + if not _is_list_or_tuple_(aspect_ratio): aspect_ratio = [aspect_ratio] # get the number of prior box on each location @@ -499,25 +489,24 @@ def multi_box_head(inputs, if share_location: num_loc_output *= num_classes - mbox_loc = _conv_with_bn_( + mbox_loc = nn.conv2d( input=input, - conv_num_filter=num_loc_output, - conv_padding=pad, - conv_stride=stride, - conv_filter_size=kernel_size, - conv_with_batchnorm=use_batchnorm) + num_filters=num_loc_output, + filter_size=kernel_size, + padding=pad, + stride=stride) + mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1]) mbox_locs.append(mbox_loc) # get conf_loc num_conf_output = num_priors_per_location * num_classes - conf_loc = _conv_with_bn_( + conf_loc = nn.conv2d( input=input, - conv_num_filter=num_conf_output, - conv_padding=pad, - conv_stride=stride, - conv_filter_size=kernel_size, - conv_with_batchnorm=use_batchnorm) + num_filters=num_conf_output, + filter_size=kernel_size, + padding=pad, + stride=stride) conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1]) mbox_confs.append(conf_loc) diff --git a/python/paddle/v2/fluid/nets.py b/python/paddle/v2/fluid/nets.py index b7deccfd1f..be7878f869 100644 --- a/python/paddle/v2/fluid/nets.py +++ b/python/paddle/v2/fluid/nets.py @@ -18,7 +18,6 @@ __all__ = [ "sequence_conv_pool", "glu", "scaled_dot_product_attention", - "img_conv_with_bn", ] @@ -108,38 +107,6 @@ def img_conv_group(input, return pool_out -def img_conv_with_bn(input, - conv_num_filter, - conv_padding=1, - conv_filter_size=3, - conv_stride=1, - conv_act=None, - param_attr=None, - conv_with_batchnorm=False, - conv_batchnorm_drop_rate=0.0, - use_cudnn=True): - """ - Image Convolution Group, Used for vgg net. - """ - conv2d = layers.conv2d( - input=input, - num_filters=conv_num_filter, - filter_size=conv_filter_size, - padding=conv_padding, - stride=conv_stride, - param_attr=param_attr, - act=conv_act, - use_cudnn=use_cudnn) - - if conv_with_batchnorm: - conv2d = layers.batch_norm(input=conv2d) - drop_rate = conv_batchnorm_drop_rate - if abs(drop_rate) > 1e-5: - conv2d = layers.dropout(x=conv2d, dropout_prob=drop_rate) - - return conv2d - - def sequence_conv_pool(input, num_filters, filter_size, -- GitLab From b44917d09befcc6a300b2ef7c6ff86302d085f07 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Mon, 12 Feb 2018 08:45:06 +0000 Subject: [PATCH 087/217] Implement IsPersistable() in c++. --- paddle/fluid/inference/io.cc | 27 +++++++-------------------- 1 file changed, 7 insertions(+), 20 deletions(-) diff --git a/paddle/fluid/inference/io.cc b/paddle/fluid/inference/io.cc index 58d7ab40bf..8132347d63 100644 --- a/paddle/fluid/inference/io.cc +++ b/paddle/fluid/inference/io.cc @@ -32,23 +32,11 @@ void ReadBinaryFile(const std::string& filename, std::string& contents) { inputfs.close(); } -bool IsParameter(const framework::VarDesc* var, - const framework::ProgramDesc& main_program) { - if (var->Persistable()) { - // There are many unreachable variables in the program - for (size_t i = 0; i < main_program.Size(); ++i) { - const framework::BlockDesc& block = main_program.Block(i); - for (auto* op : block.AllOps()) { - if (op->Type() == framework::kFeedOpType) { - continue; - } - for (auto input_argument_name : op->InputArgumentNames()) { - if (input_argument_name == var->Name()) { - return true; - } - } - } - } +bool IsPersistable(const framework::VarDesc* var) { + if (var->Persistable() && + var->GetType() != framework::proto::VarDesc::FEED_MINIBATCH && + var->GetType() != framework::proto::VarDesc::FETCH_LIST) { + return true; } return false; } @@ -65,8 +53,8 @@ void LoadPersistables(framework::Executor& executor, std::vector paramlist; for (auto* var : global_block.AllVars()) { - if (IsParameter(var, main_program)) { - VLOG(3) << "parameter's name: " << var->Name(); + if (IsPersistable(var)) { + VLOG(3) << "persistable variable's name: " << var->Name(); framework::VarDesc* new_var = load_block->Var(var->Name()); new_var->SetShape(var->GetShape()); @@ -101,7 +89,6 @@ void LoadPersistables(framework::Executor& executor, executor.Run(*load_program, &scope, 0, true, true); - VLOG(3) << "Ran loading successfully"; delete load_program; } -- GitLab From 0d4d9c4e1392d46a1c2c3588bd4d6eb4fdd0c980 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Mon, 12 Feb 2018 17:39:26 +0800 Subject: [PATCH 088/217] fix grpc short connection --- paddle/fluid/operators/listen_and_serv_op.cc | 4 ++-- paddle/fluid/operators/recv_op.cc | 4 ++-- paddle/fluid/operators/send_op.cc | 4 ++-- python/paddle/v2/fluid/distribute_transpiler.py | 11 +++-------- 4 files changed, 9 insertions(+), 14 deletions(-) diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index 426dd0dc0e..8e88a7dcf1 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -82,8 +82,8 @@ class ListenAndServOp : public framework::OperatorBase { return string::Sprintf("%s.trainer_%d", varname, grads_counter_[varname]++); } - void Run(const framework::Scope &scope, - const platform::Place &dev_place) const override { + void RunImpl(const framework::Scope &scope, + const platform::Place &dev_place) const override { platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(dev_place); framework::Scope &recv_scope = scope.NewScope(); diff --git a/paddle/fluid/operators/recv_op.cc b/paddle/fluid/operators/recv_op.cc index c093f60cee..17b57b5d45 100644 --- a/paddle/fluid/operators/recv_op.cc +++ b/paddle/fluid/operators/recv_op.cc @@ -32,8 +32,8 @@ class RecvOp : public framework::OperatorBase { const framework::AttributeMap& attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope& scope, - const platform::Place& place) const override { + void RunImpl(const framework::Scope& scope, + const platform::Place& place) const override { auto outs = Outputs("Out"); std::vector epmap = Attr>("epmap"); diff --git a/paddle/fluid/operators/send_op.cc b/paddle/fluid/operators/send_op.cc index b241f738cb..39b6c0e8c5 100644 --- a/paddle/fluid/operators/send_op.cc +++ b/paddle/fluid/operators/send_op.cc @@ -48,8 +48,8 @@ class SendOp : public framework::OperatorBase { const framework::AttributeMap& attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope& scope, - const platform::Place& place) const override { + void RunImpl(const framework::Scope& scope, + const platform::Place& place) const override { auto ins = Inputs("X"); auto outs = Outputs("Out"); std::vector epmap = Attr>("epmap"); diff --git a/python/paddle/v2/fluid/distribute_transpiler.py b/python/paddle/v2/fluid/distribute_transpiler.py index 689920af0c..bf2e9e88f3 100644 --- a/python/paddle/v2/fluid/distribute_transpiler.py +++ b/python/paddle/v2/fluid/distribute_transpiler.py @@ -121,7 +121,6 @@ def split_dense_variable(var_list, block_size += dim1 - remains # update split_count after aligning split_count = int(math.ceil(var_numel / float(block_size))) - print("###split var ", var.name, var.shape, block_size, split_count) for block_id in xrange(split_count): curr_block_size = min(block_size, var_numel - ( (block_id) * block_size)) @@ -207,7 +206,7 @@ class DistributeTranspiler: rpc_client_var = program.global_block().create_var( name="RPC_CLIENT_VAR", - psersistable=True, + persistable=True, dtype='float32', # dtype and shape is not used in fact shape=[0]) @@ -256,15 +255,13 @@ class DistributeTranspiler: splited_shape = [rows] if len(orig_shape) >= 2: splited_shape.extend(orig_shape[1:]) - print("###splited: ", size, rows, splited_shape) var = program.global_block().create_var( name="%s.block%d" % (varname, i), - psersistable=False, + persistable=False, dtype=orig_var.dtype, type=orig_var.type, shape=splited_shape) # flattend splited var var_mapping[varname].append(var) - print("###created split var ", var) return var_mapping def _clone_var(self, block, var): @@ -322,7 +319,7 @@ class DistributeTranspiler: for i in xrange(trainers): var_each = block.create_var( name="%s.trainer_%d" % (var.name, i), - psersistable=var.persistable, + persistable=var.persistable, dtype=var.dtype, type=var.type, shape=var.shape) @@ -531,8 +528,6 @@ class DistributeTranspiler: """ # step5 pserver_program = Program() - print("param mapping on pserver: #### ", - self.param_grad_ep_mapping[endpoint]["params"]) for v in self.param_grad_ep_mapping[endpoint]["params"]: self._clone_var(pserver_program.global_block(), v) for v in self.param_grad_ep_mapping[endpoint]["grads"]: -- GitLab From 2d74b5f9ba8ddf6c4b9e2f069ce9d91886d61c07 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Mon, 12 Feb 2018 09:51:38 +0000 Subject: [PATCH 089/217] Refine the Python API load/save_inference_model. --- paddle/fluid/inference/tests/test_helper.h | 4 +- python/paddle/v2/fluid/io.py | 98 +++++++++++-------- .../fluid/tests/book/test_recognize_digits.py | 36 ++++--- 3 files changed, 83 insertions(+), 55 deletions(-) diff --git a/paddle/fluid/inference/tests/test_helper.h b/paddle/fluid/inference/tests/test_helper.h index a6c93aa073..458c766cb0 100644 --- a/paddle/fluid/inference/tests/test_helper.h +++ b/paddle/fluid/inference/tests/test_helper.h @@ -101,8 +101,8 @@ void TestInference(const std::string& dirname, if (IsCombined) { // All parameters are saved in a single file. // Hard-coding the file names of program and parameters in unittest. - // Users are free to specify different filename - // (provided: the filenames are changed in the python api as well: io.py) + // The file names should be consistent with that used in Python API + // `fluid.io.save_inference_model`. std::string prog_filename = "__model_combined__"; std::string param_filename = "__params_combined__"; inference_program = paddle::inference::Load(executor, diff --git a/python/paddle/v2/fluid/io.py b/python/paddle/v2/fluid/io.py index 0f43e46082..7146fd23b9 100644 --- a/python/paddle/v2/fluid/io.py +++ b/python/paddle/v2/fluid/io.py @@ -68,7 +68,7 @@ def save_vars(executor, main_program=None, vars=None, predicate=None, - save_file_name=None): + filename=None): """ Save variables to directory by executor. @@ -80,8 +80,8 @@ def save_vars(executor, as a bool. If it returns true, the corresponding input variable will be saved. :param vars: variables need to be saved. If vars is specified, program & predicate will be ignored - :param save_file_name: The name of a single file that all vars are saved to. - If it is None, save variables to separate files. + :param filename: The name of a single file that all vars are saved to. + If it is None, save variables to separate files. :return: None """ @@ -95,7 +95,7 @@ def save_vars(executor, executor, dirname=dirname, vars=filter(predicate, main_program.list_vars()), - save_file_name=save_file_name) + filename=filename) else: save_program = Program() save_block = save_program.global_block() @@ -103,7 +103,7 @@ def save_vars(executor, save_var_map = {} for each_var in vars: new_var = _clone_var_in_block_(save_block, each_var) - if save_file_name is None: + if filename is None: save_block.append_op( type='save', inputs={'X': [new_var]}, @@ -112,7 +112,7 @@ def save_vars(executor, else: save_var_map[new_var.name] = new_var - if save_file_name is not None: + if filename is not None: save_var_list = [] for name in sorted(save_var_map.keys()): save_var_list.append(save_var_map[name]) @@ -121,12 +121,12 @@ def save_vars(executor, type='save_combine', inputs={'X': save_var_list}, outputs={}, - attrs={'file_path': os.path.join(dirname, save_file_name)}) + attrs={'file_path': os.path.join(dirname, filename)}) executor.run(save_program) -def save_params(executor, dirname, main_program=None, save_file_name=None): +def save_params(executor, dirname, main_program=None, filename=None): """ Save all parameters to directory with executor. """ @@ -136,11 +136,10 @@ def save_params(executor, dirname, main_program=None, save_file_name=None): main_program=main_program, vars=None, predicate=is_parameter, - save_file_name=save_file_name) + filename=filename) -def save_persistables(executor, dirname, main_program=None, - save_file_name=None): +def save_persistables(executor, dirname, main_program=None, filename=None): """ Save all persistables to directory with executor. """ @@ -150,7 +149,7 @@ def save_persistables(executor, dirname, main_program=None, main_program=main_program, vars=None, predicate=is_persistable, - save_file_name=save_file_name) + filename=filename) def load_vars(executor, @@ -158,7 +157,7 @@ def load_vars(executor, main_program=None, vars=None, predicate=None, - load_file_name=None): + filename=None): """ Load variables from directory by executor. @@ -170,8 +169,8 @@ def load_vars(executor, as a bool. If it returns true, the corresponding input variable will be loaded. :param vars: variables need to be loaded. If vars is specified, program & predicate will be ignored - :param load_file_name: The name of the single file that all vars are loaded from. - If it is None, load variables from separate files. + :param filename: The name of the single file that all vars are loaded from. + If it is None, load variables from separate files. :return: None """ @@ -185,7 +184,7 @@ def load_vars(executor, executor, dirname=dirname, vars=filter(predicate, main_program.list_vars()), - load_file_name=load_file_name) + filename=filename) else: load_prog = Program() load_block = load_prog.global_block() @@ -194,7 +193,7 @@ def load_vars(executor, for each_var in vars: assert isinstance(each_var, Variable) new_var = _clone_var_in_block_(load_block, each_var) - if load_file_name is None: + if filename is None: load_block.append_op( type='load', inputs={}, @@ -203,7 +202,7 @@ def load_vars(executor, else: load_var_map[new_var.name] = new_var - if load_file_name is not None: + if filename is not None: load_var_list = [] for name in sorted(load_var_map.keys()): load_var_list.append(load_var_map[name]) @@ -212,12 +211,12 @@ def load_vars(executor, type='load_combine', inputs={}, outputs={"Out": load_var_list}, - attrs={'file_path': os.path.join(dirname, load_file_name)}) + attrs={'file_path': os.path.join(dirname, filename)}) executor.run(load_prog) -def load_params(executor, dirname, main_program=None, load_file_name=None): +def load_params(executor, dirname, main_program=None, filename=None): """ load all parameters from directory by executor. """ @@ -226,11 +225,10 @@ def load_params(executor, dirname, main_program=None, load_file_name=None): dirname=dirname, main_program=main_program, predicate=is_parameter, - load_file_name=load_file_name) + filename=filename) -def load_persistables(executor, dirname, main_program=None, - load_file_name=None): +def load_persistables(executor, dirname, main_program=None, filename=None): """ load all persistables from directory by executor. """ @@ -239,7 +237,7 @@ def load_persistables(executor, dirname, main_program=None, dirname=dirname, main_program=main_program, predicate=is_persistable, - load_file_name=load_file_name) + filename=filename) def get_inference_program(target_vars, main_program=None): @@ -299,7 +297,8 @@ def save_inference_model(dirname, target_vars, executor, main_program=None, - save_file_name=None): + model_filename=None, + params_filename=None): """ Build a model especially for inference, and save it to directory by the executor. @@ -310,8 +309,11 @@ def save_inference_model(dirname, :param executor: executor that save inference model :param main_program: original program, which will be pruned to build the inference model. Default default_main_program(). - :param save_file_name: The name of a single file that all parameters are saved to. - If it is None, save parameters to separate files. + :param model_filename: The name of file to save inference program. + If not specified, default filename `__model__` will be used. + :param params_filename: The name of file to save parameters. + It is used for the case that all parameters are saved in a single binary file. + If not specified, parameters are considered saved in separate files. :return: None """ @@ -342,15 +344,19 @@ def save_inference_model(dirname, prepend_feed_ops(inference_program, feeded_var_names) append_fetch_ops(inference_program, fetch_var_names) - if save_file_name == None: - model_file_name = dirname + "/__model__" + if model_filename is not None: + model_filename = os.path.basename(model_filename) else: - model_file_name = dirname + "/__model_combined__" + model_filename = "__model__" + model_filename = os.path.join(dirname, model_filename) - with open(model_file_name, "wb") as f: + if params_filename is not None: + params_filename = os.path.basename(params_filename) + + with open(model_filename, "wb") as f: f.write(inference_program.desc.serialize_to_string()) - save_persistables(executor, dirname, inference_program, save_file_name) + save_persistables(executor, dirname, inference_program, params_filename) def get_feed_targets_names(program): @@ -371,15 +377,21 @@ def get_fetch_targets_names(program): return fetch_targets_names -def load_inference_model(dirname, executor, load_file_name=None): +def load_inference_model(dirname, + executor, + model_filename=None, + params_filename=None): """ Load inference model from a directory :param dirname: directory path :param executor: executor that load inference model - :param load_file_name: The name of the single file that all parameters are loaded from. - If it is None, load parameters from separate files. - + :param model_filename: The name of file to load inference program. + If not specified, default filename `__model__` will be used. + :param params_filename: The name of file to load parameters. + It is used for the case that all parameters are saved in a single binary file. + If not specified, parameters are considered saved in separate files. + :return: [program, feed_target_names, fetch_targets] program: program especially for inference. feed_target_names: Names of variables that need to feed data @@ -388,16 +400,20 @@ def load_inference_model(dirname, executor, load_file_name=None): if not os.path.isdir(dirname): raise ValueError("There is no directory named '%s'", dirname) - if load_file_name == None: - model_file_name = dirname + "/__model__" + if model_filename is not None: + model_filename = os.path.basename(model_filename) else: - model_file_name = dirname + "/__model_combined__" + model_filename = "__model__" + model_filename = os.path.join(dirname, model_filename) + + if params_filename is not None: + params_filename = os.path.basename(params_filename) - with open(model_file_name, "rb") as f: + with open(model_filename, "rb") as f: program_desc_str = f.read() program = Program.parse_from_string(program_desc_str) - load_persistables(executor, dirname, program, load_file_name) + load_persistables(executor, dirname, program, params_filename) feed_target_names = get_feed_targets_names(program) fetch_target_names = get_fetch_targets_names(program) diff --git a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py index 8586ad4dfe..481bf6731d 100644 --- a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py +++ b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py @@ -78,7 +78,12 @@ def conv_net(img, label): return loss_net(conv_pool_2, label) -def train(nn_type, use_cuda, parallel, save_dirname, save_param_filename): +def train(nn_type, + use_cuda, + parallel, + save_dirname=None, + model_filename=None, + params_filename=None): if use_cuda and not fluid.core.is_compiled_with_cuda(): return img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32') @@ -146,7 +151,8 @@ def train(nn_type, use_cuda, parallel, save_dirname, save_param_filename): fluid.io.save_inference_model( save_dirname, ["img"], [prediction], exe, - save_file_name=save_param_filename) + model_filename=model_filename, + params_filename=params_filename) return else: print( @@ -158,7 +164,10 @@ def train(nn_type, use_cuda, parallel, save_dirname, save_param_filename): raise AssertionError("Loss of recognize digits is too large") -def infer(use_cuda, save_dirname=None, param_filename=None): +def infer(use_cuda, + save_dirname=None, + model_filename=None, + params_filename=None): if save_dirname is None: return @@ -171,8 +180,9 @@ def infer(use_cuda, save_dirname=None, param_filename=None): # the feed_target_names (the names of variables that will be feeded # data using feed operators), and the fetch_targets (variables that # we want to obtain data from using fetch operators). - [inference_program, feed_target_names, fetch_targets - ] = fluid.io.load_inference_model(save_dirname, exe, param_filename) + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model( + save_dirname, exe, model_filename, params_filename) # The input's dimension of conv should be 4-D or 5-D. # Use normilized image pixels as input data, which should be in the range [-1.0, 1.0]. @@ -189,25 +199,27 @@ def infer(use_cuda, save_dirname=None, param_filename=None): def main(use_cuda, parallel, nn_type, combine): + save_dirname = None + model_filename = None + params_filename = None if not use_cuda and not parallel: save_dirname = "recognize_digits_" + nn_type + ".inference.model" - save_filename = None if combine == True: - save_filename = "__params_combined__" - else: - save_dirname = None - save_filename = None + model_filename = "__model_combined__" + params_filename = "__params_combined__" train( nn_type=nn_type, use_cuda=use_cuda, parallel=parallel, save_dirname=save_dirname, - save_param_filename=save_filename) + model_filename=model_filename, + params_filename=params_filename) infer( use_cuda=use_cuda, save_dirname=save_dirname, - param_filename=save_filename) + model_filename=model_filename, + params_filename=params_filename) class TestRecognizeDigits(unittest.TestCase): -- GitLab From 07923ba006220bf39ebd9fcf19c6b930012e5139 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 12 Feb 2018 17:56:15 +0800 Subject: [PATCH 090/217] Memory/dropout4 (#8407) * "merge random generator kernel and mul" * "fix dropout" --- paddle/fluid/operators/dropout_op.cu | 42 ++++++++++++++-------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/paddle/fluid/operators/dropout_op.cu b/paddle/fluid/operators/dropout_op.cu index 4ae9f4ce54..a4a96d48f9 100644 --- a/paddle/fluid/operators/dropout_op.cu +++ b/paddle/fluid/operators/dropout_op.cu @@ -23,24 +23,23 @@ namespace paddle { namespace operators { template -struct MaskGenerator { - AttrType dropout_prob; - int seed; +__global__ void RandomGenerator(const size_t n, const int seed, + const AttrType dropout_prob, const T* src, + T* mask_data, T* dst) { + thrust::minstd_rand rng; + rng.seed(seed); + thrust::uniform_real_distribution dist(0, 1); - __host__ __device__ MaskGenerator(AttrType dropout_prob, int seed) - : dropout_prob(dropout_prob), seed(seed) {} - - inline __host__ __device__ T operator()(const unsigned int n) const { - thrust::minstd_rand rng; - rng.seed(seed); - thrust::uniform_real_distribution dist(0, 1); - rng.discard(n); + int idx = blockDim.x * blockIdx.x + threadIdx.x; + for (; idx < n; idx += blockDim.x * gridDim.x) { if (dist(rng) < dropout_prob) { - return static_cast(0); + mask_data[idx] = static_cast(0); + } else { + mask_data[idx] = static_cast(1); } - return static_cast(1); + dst[idx] = mask_data[idx] * src[idx]; } -}; +} // It seems that Eigen::Tensor::setRandom in GPU will SEGFAULT. // Use std::random and thrust::random(thrust is a std library in CUDA) to @@ -61,18 +60,19 @@ class GPUDropoutKernel : public framework::OpKernel { if (!context.Attr("is_test")) { auto* mask = context.Output("Mask"); auto* mask_data = mask->mutable_data(context.GetPlace()); - int size = framework::product(mask->dims()); + size_t size = framework::product(mask->dims()); + auto* x_data = x->data(); + auto* y_data = y->mutable_data(context.GetPlace()); std::random_device rnd; int seed = context.Attr("fix_seed") ? context.Attr("seed") : rnd(); - thrust::counting_iterator index_sequence_begin(0); - thrust::transform(index_sequence_begin, index_sequence_begin + size, - thrust::device_ptr(mask_data), - MaskGenerator(dropout_prob, seed)); - auto M = EigenMatrix::Reshape(*mask, 1); - Y.device(place) = X * M; + int threads = 512; + int grid = (x->numel() + threads - 1) / threads; + RandomGenerator<<>>( + size, seed, dropout_prob, x_data, mask_data, y_data); } else { Y.device(place) = X * (1.0f - dropout_prob); } -- GitLab From 6e79d01b6517667123fe8897613a5821be91b94b Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Mon, 12 Feb 2018 17:24:35 +0800 Subject: [PATCH 091/217] merge prior_box and multi_box --- python/paddle/v2/fluid/layers/detection.py | 486 +++++++----------- .../paddle/v2/fluid/tests/test_detection.py | 44 +- 2 files changed, 189 insertions(+), 341 deletions(-) diff --git a/python/paddle/v2/fluid/layers/detection.py b/python/paddle/v2/fluid/layers/detection.py index 6af5c8388b..5ae4da1ea3 100644 --- a/python/paddle/v2/fluid/layers/detection.py +++ b/python/paddle/v2/fluid/layers/detection.py @@ -23,7 +23,6 @@ import nn import math __all__ = [ - 'prior_box', 'multi_box_head', 'bipartite_match', 'target_assign', @@ -133,219 +132,6 @@ def detection_output(scores, return nmsed_outs -def prior_box(inputs, - image, - min_ratio, - max_ratio, - aspect_ratios, - base_size, - steps=None, - step_w=None, - step_h=None, - offset=0.5, - variance=[0.1, 0.1, 0.1, 0.1], - flip=False, - clip=False, - min_sizes=None, - max_sizes=None, - name=None): - """ - **Prior_boxes** - - Generate prior boxes for SSD(Single Shot MultiBox Detector) - algorithm. The details of this algorithm, please refer the - section 2.2 of SSD paper (SSD: Single Shot MultiBox Detector) - `_ . - - Args: - inputs(list|tuple): The list of input Variables, the format - of all Variables is NCHW. - image(Variable): The input image data of PriorBoxOp, - the layout is NCHW. - min_ratio(int): the min ratio of generated prior boxes. - max_ratio(int): the max ratio of generated prior boxes. - aspect_ratios(list|tuple): the aspect ratios of generated prior - boxes. The length of input and aspect_ratios must be equal. - base_size(int): the base_size is used to get min_size - and max_size according to min_ratio and max_ratio. - step_w(list|tuple|None): Prior boxes step - across width. If step_w[i] == 0.0, the prior boxes step - across width of the inputs[i] will be automatically calculated. - step_h(list|tuple|None): Prior boxes step - across height, If step_h[i] == 0.0, the prior boxes - step across height of the inputs[i] will be automatically calculated. - offset(float, optional, default=0.5): Prior boxes center offset. - variance(list|tuple|[0.1, 0.1, 0.1, 0.1]): the variances - to be encoded in prior boxes. - flip(bool|False): Whether to flip - aspect ratios. - clip(bool, optional, default=False): Whether to clip - out-of-boundary boxes. - min_sizes(list|tuple|None): If `len(inputs) <=2`, - min_sizes must be set up, and the length of min_sizes - should equal to the length of inputs. - max_sizes(list|tuple|None): If `len(inputs) <=2`, - max_sizes must be set up, and the length of min_sizes - should equal to the length of inputs. - name(str|None): Name of the prior box layer. - - Returns: - boxes(Variable): the output prior boxes of PriorBox. - The layout is [num_priors, 4]. num_priors is the total - box count of each position of inputs. - Variances(Variable): the expanded variances of PriorBox. - The layout is [num_priors, 4]. num_priors is the total - box count of each position of inputs - - Examples: - .. code-block:: python - - prior_box( - inputs = [conv1, conv2, conv3, conv4, conv5, conv6], - image = data, - min_ratio = 20, # 0.20 - max_ratio = 90, # 0.90 - offset = 0.5, - base_size = 300, - variance = [0.1,0.1,0.1,0.1], - aspect_ratios = [[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]], - flip=True, - clip=True) - """ - - def _prior_box_(input, - image, - min_sizes, - max_sizes, - aspect_ratios, - variance, - flip=False, - clip=False, - step_w=0.0, - step_h=0.0, - offset=0.5, - name=None): - helper = LayerHelper("prior_box", **locals()) - dtype = helper.input_dtype() - - box = helper.create_tmp_variable(dtype) - var = helper.create_tmp_variable(dtype) - helper.append_op( - type="prior_box", - inputs={"Input": input, - "Image": image}, - outputs={"Boxes": box, - "Variances": var}, - attrs={ - 'min_sizes': min_sizes, - 'max_sizes': max_sizes, - 'aspect_ratios': aspect_ratios, - 'variances': variance, - 'flip': flip, - 'clip': clip, - 'step_w': step_w, - 'step_h': step_h, - 'offset': offset - }) - return box, var - - def _reshape_with_axis_(input, axis=1): - if not (axis > 0 and axis < len(input.shape)): - raise ValueError("The axis should be smaller than " - "the arity of input and bigger than 0.") - new_shape = [ - -1, reduce(lambda x, y: x * y, input.shape[axis:len(input.shape)]) - ] - out = ops.reshape(x=input, shape=new_shape) - return out - - def _is_list_or_tuple_(data): - return (isinstance(data, list) or isinstance(data, tuple)) - - def _is_list_or_tuple_and_equal(data, length, err_info): - if not (_is_list_or_tuple_(data) and len(data) == length): - raise ValueError(err_info) - - if not _is_list_or_tuple_(inputs): - raise ValueError('inputs should be a list or tuple.') - - num_layer = len(inputs) - - if num_layer <= 2: - assert min_sizes is not None and max_sizes is not None - assert len(min_sizes) == num_layer and len(max_sizes) == num_layer - else: - min_sizes = [] - max_sizes = [] - step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2))) - for ratio in xrange(min_ratio, max_ratio + 1, step): - min_sizes.append(base_size * ratio / 100.) - max_sizes.append(base_size * (ratio + step) / 100.) - min_sizes = [base_size * .10] + min_sizes - max_sizes = [base_size * .20] + max_sizes - - if aspect_ratios: - _is_list_or_tuple_and_equal( - aspect_ratios, num_layer, - 'aspect_ratios should be list or tuple, and the length of inputs ' - 'and aspect_ratios should be the same.') - if step_h: - _is_list_or_tuple_and_equal( - step_h, num_layer, - 'step_h should be list or tuple, and the length of inputs and ' - 'step_h should be the same.') - if step_w: - _is_list_or_tuple_and_equal( - step_w, num_layer, - 'step_w should be list or tuple, and the length of inputs and ' - 'step_w should be the same.') - if steps: - _is_list_or_tuple_and_equal( - steps, num_layer, - 'steps should be list or tuple, and the length of inputs and ' - 'step_w should be the same.') - step_w = steps - step_h = steps - - box_results = [] - var_results = [] - for i, input in enumerate(inputs): - min_size = min_sizes[i] - max_size = max_sizes[i] - aspect_ratio = [] - if not _is_list_or_tuple_(min_size): - min_size = [min_size] - if not _is_list_or_tuple_(max_size): - max_size = [max_size] - if aspect_ratios: - aspect_ratio = aspect_ratios[i] - if not _is_list_or_tuple_(aspect_ratio): - aspect_ratio = [aspect_ratio] - - box, var = _prior_box_(input, image, min_size, max_size, aspect_ratio, - variance, flip, clip, step_w[i] - if step_w else 0.0, step_h[i] - if step_w else 0.0, offset) - - box_results.append(box) - var_results.append(var) - - if len(box_results) == 1: - box = box_results[0] - var = var_results[0] - else: - reshaped_boxes = [] - reshaped_vars = [] - for i in range(len(box_results)): - reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3)) - reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3)) - - box = tensor.concat(reshaped_boxes) - var = tensor.concat(reshaped_vars) - - return box, var - - def bipartite_match(dist_matrix, name=None): """ **Bipartite matchint operator** @@ -672,106 +458,162 @@ def ssd_loss(location, def multi_box_head(inputs, + image, + base_size, num_classes, + aspect_ratios, + min_ratio, + max_ratio, min_sizes=None, max_sizes=None, - min_ratio=None, - max_ratio=None, - aspect_ratios=None, + steps=None, + step_w=None, + step_h=None, + offset=0.5, + variance=[0.1, 0.1, 0.1, 0.1], flip=False, - share_location=True, + clip=False, kernel_size=1, - pad=1, + pad=0, stride=1, - use_batchnorm=False, - base_size=None): + name=None): """ - **Multi Box Head** + **Prior_boxes** - Generate prior boxes' location and confidence for SSD(Single - Shot MultiBox Detector)algorithm. The details of this algorithm, - please refer the section 2.1 of SSD paper (SSD: Single Shot - MultiBox Detector)`_ . + Generate prior boxes for SSD(Single Shot MultiBox Detector) + algorithm. The details of this algorithm, please refer the + section 2.2 of SSD paper (SSD: Single Shot MultiBox Detector) + `_ . Args: inputs(list|tuple): The list of input Variables, the format of all Variables is NCHW. - num_classes(int): The number of classes. - min_sizes(list|tuple|None): The number of - min_sizes is used to compute the number of predicted box. - If the min_size is None, it will be computed according - to min_ratio and max_ratio. - max_sizes(list|tuple|None): The number of max_sizes - is used to compute the the number of predicted box. - min_ratio(int|None): If the min_sizes is None, min_ratio and max_ratio - will be used to compute the min_sizes and max_sizes. - max_ratio(int|None): If the min_sizes is None, max_ratio and min_ratio - will be used to compute the min_sizes and max_sizes. - aspect_ratios(list|tuple): The number of the aspect ratios is used to - compute the number of prior box. + image(Variable): The input image data of PriorBoxOp, + the layout is NCHW. base_size(int): the base_size is used to get min_size and max_size according to min_ratio and max_ratio. - flip(bool|False): Whether to flip - aspect ratios. - name(str|None): Name of the prior box layer. + num_classes(int): The number of classes. + aspect_ratios(list|tuple): the aspect ratios of generated prior + boxes. The length of input and aspect_ratios must be equal. + min_ratio(int): the min ratio of generated prior boxes. + max_ratio(int): the max ratio of generated prior boxes. + min_sizes(list|tuple|None): If `len(inputs) <=2`, + min_sizes must be set up, and the length of min_sizes + should equal to the length of inputs. Default: None. + max_sizes(list|tuple|None): If `len(inputs) <=2`, + max_sizes must be set up, and the length of min_sizes + should equal to the length of inputs. Default: None. + steps(list|tuple): If step_w and step_h are the same, + step_w and step_h can be replaced by steps. + step_w(list|tuple): Prior boxes step + across width. If step_w[i] == 0.0, the prior boxes step + across width of the inputs[i] will be automatically + calculated. Default: None. + step_h(list|tuple): Prior boxes step across height, If + step_h[i] == 0.0, the prior boxes step across height of + the inputs[i] will be automatically calculated. Default: None. + offset(float): Prior boxes center offset. Default: 0.5 + variance(list|tuple): the variances to be encoded in prior boxes. + Default:[0.1, 0.1, 0.1, 0.1]. + flip(bool): Whether to flip aspect ratios. Default:False. + clip(bool): Whether to clip out-of-boundary boxes. Default: False. + kernel_size(int): The kernel size of conv2d. Default: 1. + pad(int|list|tuple): The padding of conv2d. Default:0. + stride(int|list|tuple): The stride of conv2d. Default:1, + name(str): Name of the prior box layer. Default: None. Returns: - mbox_loc(list): The predicted boxes' location of the inputs. The layout of each element is [N, H, W, Priors]. Priors is the number of predicted boxof each position of each input. mbox_conf(list): The predicted boxes' confidence of the inputs. The layout of each element is [N, H, W, Priors]. Priors is the number of predicted box of each position of each input. + boxes(Variable): the output prior boxes of PriorBox. + The layout is [num_priors, 4]. num_priors is the total + box count of each position of inputs. + Variances(Variable): the expanded variances of PriorBox. + The layout is [num_priors, 4]. num_priors is the total + box count of each position of inputs + Examples: .. code-block:: python - - mbox_locs, mbox_confs = detection.multi_box_head( - inputs=[conv1, conv2, conv3, conv4, conv5, conv5], - num_classes=21, - min_ratio=20, - max_ratio=90, - aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]], - base_size=300, - flip=True) + mbox_locs, mbox_confs, box, var = layers.multi_box_head( + inputs=[conv1, conv2, conv3, conv4, conv5, conv5], + image=images, + num_classes=21, + min_ratio=20, + max_ratio=90, + aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]], + base_size=300, + offset=0.5, + flip=True, + clip=True) """ - def _is_equal_(len1, len2, err_info): - if not (len1 == len2): - raise ValueError(err_info) + def _prior_box_(input, + image, + min_sizes, + max_sizes, + aspect_ratios, + variance, + flip=False, + clip=False, + step_w=0.0, + step_h=0.0, + offset=0.5, + name=None): + helper = LayerHelper("prior_box", **locals()) + dtype = helper.input_dtype() + + box = helper.create_tmp_variable(dtype) + var = helper.create_tmp_variable(dtype) + helper.append_op( + type="prior_box", + inputs={"Input": input, + "Image": image}, + outputs={"Boxes": box, + "Variances": var}, + attrs={ + 'min_sizes': min_sizes, + 'max_sizes': max_sizes, + 'aspect_ratios': aspect_ratios, + 'variances': variance, + 'flip': flip, + 'clip': clip, + 'step_w': step_w, + 'step_h': step_h, + 'offset': offset + }) + return box, var + + def _reshape_with_axis_(input, axis=1): + if not (axis > 0 and axis < len(input.shape)): + raise ValueError("The axis should be smaller than " + "the arity of input and bigger than 0.") + new_shape = [ + -1, reduce(lambda x, y: x * y, input.shape[axis:len(input.shape)]) + ] + out = ops.reshape(x=input, shape=new_shape) + return out def _is_list_or_tuple_(data): return (isinstance(data, list) or isinstance(data, tuple)) + def _is_list_or_tuple_and_equal(data, length, err_info): + if not (_is_list_or_tuple_(data) and len(data) == length): + raise ValueError(err_info) + if not _is_list_or_tuple_(inputs): raise ValueError('inputs should be a list or tuple.') - if min_sizes is not None: - _is_equal_( - len(inputs), - len(min_sizes), 'the length of min_sizes ' - 'and inputs should be equal.') - - if max_sizes is not None: - _is_equal_( - len(inputs), - len(max_sizes), 'the length of max_sizes ' - 'and inputs should be equal.') - - if aspect_ratios is not None: - _is_equal_( - len(inputs), - len(aspect_ratios), 'the length of aspect_ratios ' - 'and inputs should be equal.') - - if min_sizes is None: - # If min_sizes is None, min_sizes and max_sizes - # will be set according to max_ratio and min_ratio. - num_layer = len(inputs) - assert max_ratio is not None and min_ratio is not None,\ - 'max_ratio and min_ratio must be not None.' - assert num_layer >= 3, 'The length of the input data is at least three.' + num_layer = len(inputs) + + if num_layer <= 2: + assert min_sizes is not None and max_sizes is not None + assert len(min_sizes) == num_layer and len(max_sizes) == num_layer + else: min_sizes = [] max_sizes = [] step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2))) @@ -781,21 +623,43 @@ def multi_box_head(inputs, min_sizes = [base_size * .10] + min_sizes max_sizes = [base_size * .20] + max_sizes + if aspect_ratios: + _is_list_or_tuple_and_equal( + aspect_ratios, num_layer, + 'aspect_ratios should be list or tuple, and the length of inputs ' + 'and aspect_ratios should be the same.') + if step_h: + _is_list_or_tuple_and_equal( + step_h, num_layer, + 'step_h should be list or tuple, and the length of inputs and ' + 'step_h should be the same.') + if step_w: + _is_list_or_tuple_and_equal( + step_w, num_layer, + 'step_w should be list or tuple, and the length of inputs and ' + 'step_w should be the same.') + if steps: + _is_list_or_tuple_and_equal( + steps, num_layer, + 'steps should be list or tuple, and the length of inputs and ' + 'step_w should be the same.') + step_w = steps + step_h = steps + mbox_locs = [] mbox_confs = [] + box_results = [] + var_results = [] for i, input in enumerate(inputs): min_size = min_sizes[i] + max_size = max_sizes[i] + if not _is_list_or_tuple_(min_size): min_size = [min_size] - - max_size = [] - if max_sizes is not None: - max_size = max_sizes[i] - if not _is_list_or_tuple_(max_size): - max_size = [max_size] - _is_equal_( - len(max_size), - len(min_size), + if not _is_list_or_tuple_(max_size): + max_size = [max_size] + if not (len(max_size) == len(min_size)): + raise ValueError( 'the length of max_size and min_size should be equal.') aspect_ratio = [] @@ -804,23 +668,18 @@ def multi_box_head(inputs, if not _is_list_or_tuple_(aspect_ratio): aspect_ratio = [aspect_ratio] - # get the number of prior box on each location - num_priors_per_location = 0 - if max_sizes is not None: - num_priors_per_location = len(min_size) + \ - len(aspect_ratio) * len(min_size) +\ - len(max_size) - else: - num_priors_per_location = len(min_size) +\ - len(aspect_ratio) * len(min_size) - if flip: - num_priors_per_location += len(aspect_ratio) * len(min_size) - - # get mbox_loc - num_loc_output = num_priors_per_location * 4 - if share_location: - num_loc_output *= num_classes + box, var = _prior_box_(input, image, min_size, max_size, aspect_ratio, + variance, flip, clip, step_w[i] + if step_w else 0.0, step_h[i] + if step_w else 0.0, offset) + + box_results.append(box) + var_results.append(var) + + num_boxes = box.shape[2] + # get box_loc + num_loc_output = num_boxes * num_classes * 4 mbox_loc = nn.conv2d( input=input, num_filters=num_loc_output, @@ -832,7 +691,7 @@ def multi_box_head(inputs, mbox_locs.append(mbox_loc) # get conf_loc - num_conf_output = num_priors_per_location * num_classes + num_conf_output = num_boxes * num_classes conf_loc = nn.conv2d( input=input, num_filters=num_conf_output, @@ -842,4 +701,17 @@ def multi_box_head(inputs, conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1]) mbox_confs.append(conf_loc) - return mbox_locs, mbox_confs + if len(box_results) == 1: + box = box_results[0] + var = var_results[0] + else: + reshaped_boxes = [] + reshaped_vars = [] + for i in range(len(box_results)): + reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3)) + reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3)) + + box = tensor.concat(reshaped_boxes) + var = tensor.concat(reshaped_vars) + + return mbox_locs, mbox_confs, box, var diff --git a/python/paddle/v2/fluid/tests/test_detection.py b/python/paddle/v2/fluid/tests/test_detection.py index dd28a05313..8c0ca0d8fc 100644 --- a/python/paddle/v2/fluid/tests/test_detection.py +++ b/python/paddle/v2/fluid/tests/test_detection.py @@ -109,16 +109,19 @@ class TestDetection(unittest.TestCase): print(str(program)) -class TestPriorBox(unittest.TestCase): - def test_prior_box(self): +class TestMultiBoxHead(unittest.TestCase): + def test_multi_box_head(self): data_shape = [3, 224, 224] - box, var = self.prior_box_output(data_shape) + mbox_locs, mbox_confs, box, var = self.multi_box_head_output(data_shape) assert len(box.shape) == 2 assert box.shape == var.shape assert box.shape[1] == 4 - def prior_box_output(self, data_shape): + for loc, conf in zip(mbox_locs, mbox_confs): + assert loc.shape[1:3] == conf.shape[1:3] + + def multi_box_head_output(self, data_shape): images = fluid.layers.data( name='pixel', shape=data_shape, dtype='float32') conv1 = fluid.layers.conv2d(images, 3, 3, 2) @@ -127,46 +130,19 @@ class TestPriorBox(unittest.TestCase): conv4 = fluid.layers.conv2d(conv3, 3, 3, 2) conv5 = fluid.layers.conv2d(conv4, 3, 3, 2) - box, var = layers.prior_box( + mbox_locs, mbox_confs, box, var = layers.multi_box_head( inputs=[conv1, conv2, conv3, conv4, conv5, conv5], image=images, + num_classes=21, min_ratio=20, max_ratio=90, - # steps=[8, 16, 32, 64, 100, 300], aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]], base_size=300, offset=0.5, flip=True, clip=True) - return box, var - - -class TestMultiBoxHead(unittest.TestCase): - def test_prior_box(self): - data_shape = [3, 224, 224] - mbox_locs, mbox_confs = self.multi_box_output(data_shape) - - for loc, conf in zip(mbox_locs, mbox_confs): - assert loc.shape[1:3] == conf.shape[1:3] - - def multi_box_output(self, data_shape): - images = fluid.layers.data( - name='pixel', shape=data_shape, dtype='float32') - conv1 = fluid.layers.conv2d(images, 3, 3, 2) - conv2 = fluid.layers.conv2d(conv1, 3, 3, 2) - conv3 = fluid.layers.conv2d(conv2, 3, 3, 2) - conv4 = fluid.layers.conv2d(conv3, 3, 3, 2) - conv5 = fluid.layers.conv2d(conv4, 3, 3, 2) - mbox_locs, mbox_confs = detection.multi_box_head( - inputs=[conv1, conv2, conv3, conv4, conv5, conv5], - num_classes=21, - min_ratio=20, - max_ratio=90, - aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]], - base_size=300, - flip=True) - return mbox_locs, mbox_confs + return mbox_locs, mbox_confs, box, var if __name__ == '__main__': -- GitLab From 24509f4af942bb250564756ad636691c7921e1df Mon Sep 17 00:00:00 2001 From: qingqing01 Date: Mon, 12 Feb 2018 18:48:00 +0800 Subject: [PATCH 092/217] Fix the grammar in copyright. (#8403) --- CMakeLists.txt | 2 +- LICENSE | 2 +- benchmark/cluster/vgg16/vgg16_fluid.py | 2 +- benchmark/cluster/vgg16/vgg16_v2.py | 2 +- benchmark/paddle/image/alexnet.py | 2 +- benchmark/paddle/image/provider.py | 2 +- benchmark/paddle/rnn/imdb.py | 2 +- benchmark/paddle/rnn/provider.py | 2 +- benchmark/tensorflow/image/alexnet.py | 2 +- benchmark/tensorflow/image/alexnet_multi_gpu.py | 2 +- benchmark/tensorflow/image/googlenet.py | 2 +- benchmark/tensorflow/image/googlenet_multi_gpu.py | 2 +- benchmark/tensorflow/image/smallnet_mnist_cifar.py | 2 +- benchmark/tensorflow/rnn/reader.py | 2 +- cmake/configure.cmake | 2 +- cmake/cross_compiling/android.cmake | 2 +- cmake/cross_compiling/host.cmake | 2 +- cmake/cross_compiling/ios.cmake | 2 +- cmake/cross_compiling/raspberry_pi.cmake | 2 +- cmake/external/boost.cmake | 2 +- cmake/external/cares.cmake | 2 +- cmake/external/gflags.cmake | 2 +- cmake/external/glog.cmake | 2 +- cmake/external/grpc.cmake | 2 +- cmake/external/gtest.cmake | 2 +- cmake/external/mkldnn.cmake | 2 +- cmake/external/mklml.cmake | 2 +- cmake/external/nccl.cmake | 2 +- cmake/external/openblas.cmake | 2 +- cmake/external/protobuf.cmake | 2 +- cmake/external/pybind11.cmake | 2 +- cmake/external/python.cmake | 2 +- cmake/external/swig.cmake | 2 +- cmake/external/warpctc.cmake | 2 +- cmake/external/zlib.cmake | 2 +- cmake/generic.cmake | 2 +- cmake/make_resource.py | 2 +- cmake/system.cmake | 2 +- doc/faq/local/src/reduce_min_pool_size.py | 2 +- doc/faq/local/src/word2vec_config.py | 2 +- doc/faq/local/src/word2vec_dataprovider.py | 2 +- doc/getstarted/concepts/src/infer.py | 2 +- doc/getstarted/concepts/src/train.py | 2 +- doc/howto/cluster/src/word2vec/api_train_v2.py | 2 +- .../cluster/src/word2vec/api_train_v2_cluster.py | 2 +- doc/howto/cluster/src/word2vec/prepare.py | 2 +- go/CMakeLists.txt | 2 +- go/cmd/master/CMakeLists.txt | 2 +- go/cmd/master/master.go | 2 +- go/cmd/pserver/CMakeLists.txt | 2 +- go/cmd/pserver/pserver.go | 2 +- go/connection/conn.go | 2 +- go/master/CMakeLists.txt | 2 +- go/master/c/CMakeLists.txt | 2 +- go/master/c/client.go | 2 +- go/master/client.go | 2 +- go/master/client_internal_test.go | 2 +- go/master/client_test.go | 2 +- go/master/etcd_client.go | 2 +- go/master/inmem_store.go | 2 +- go/master/service.go | 2 +- go/master/service_internal_test.go | 2 +- go/pserver/CMakeLists.txt | 2 +- go/pserver/client/CMakeLists.txt | 2 +- go/pserver/client/c/CMakeLists.txt | 2 +- go/pserver/client/c/cclient.go | 2 +- go/pserver/client/c/test/CMakeLists.txt | 2 +- go/pserver/client/c/test/test_cclient.c | 2 +- go/pserver/client/c/test/test_mnist.py | 2 +- go/pserver/client/c/test/test_train.py | 2 +- go/pserver/client/client.go | 2 +- go/pserver/client/client_test.go | 2 +- go/pserver/client/etcd_client.go | 2 +- go/pserver/etcd_client.go | 2 +- go/pserver/optimizer.go | 2 +- go/pserver/optimizer_test.go | 2 +- go/pserver/service.go | 2 +- go/pserver/service_test.go | 2 +- go/utils/networkhelper/CMakeLists.txt | 2 +- go/utils/networkhelper/helper.go | 2 +- go/utils/networkhelper/helper_test.go | 2 +- paddle/api/Arguments.cpp | 2 +- paddle/api/ConfigParser.cpp | 2 +- paddle/api/Evaluator.cpp | 2 +- paddle/api/GradientMachine.cpp | 2 +- paddle/api/Internal.h | 2 +- paddle/api/Matrix.cpp | 2 +- paddle/api/PaddleAPI.h | 2 +- paddle/api/PaddleAPIPrivate.h | 2 +- paddle/api/Parameter.cpp | 2 +- paddle/api/ParameterOptimizer.cpp | 2 +- paddle/api/ParameterUpdater.cpp | 2 +- paddle/api/SequenceGenerator.cpp | 2 +- paddle/api/Trainer.cpp | 2 +- paddle/api/Util.cpp | 2 +- paddle/api/Vector.cpp | 2 +- paddle/api/test/testTrainConfig.py | 2 +- paddle/capi/Arguments.cpp | 2 +- paddle/capi/Main.cpp | 2 +- paddle/capi/Matrix.cpp | 2 +- paddle/capi/Vector.cpp | 2 +- paddle/capi/arguments.h | 2 +- paddle/capi/capi.h | 2 +- paddle/capi/capi_private.h | 2 +- paddle/capi/error.cpp | 2 +- paddle/capi/error.h | 2 +- .../capi/examples/model_inference/common/common.h | 2 +- paddle/capi/examples/model_inference/dense/main.c | 2 +- .../model_inference/dense/merge_v2_model.py | 2 +- .../examples/model_inference/dense/mnist_v2.py | 2 +- .../model_inference/dense/trainer_config.py | 2 +- .../examples/model_inference/multi_thread/main.c | 2 +- .../model_inference/multi_thread/main_gpu.c | 2 +- .../model_inference/multi_thread/trainer_config.py | 14 +++++++++++++- .../capi/examples/model_inference/sequence/main.c | 2 +- .../model_inference/sequence/trainer_config.py | 2 +- .../examples/model_inference/sparse_binary/main.c | 2 +- .../sparse_binary/trainer_config.py | 14 +++++++++++++- paddle/capi/gradient_machine.cpp | 2 +- paddle/capi/gradient_machine.h | 2 +- paddle/capi/main.h | 2 +- paddle/capi/matrix.h | 2 +- paddle/capi/tests/test_Arguments.cpp | 2 +- paddle/capi/tests/test_GradientMachine.cpp | 2 +- paddle/capi/tests/test_Matrix.cpp | 2 +- paddle/capi/tests/test_Vector.cpp | 2 +- paddle/capi/tests/test_predict_network.py | 2 +- paddle/capi/vector.h | 2 +- paddle/cuda/include/hl_activation_functions.h | 2 +- paddle/cuda/include/hl_aggregate.h | 2 +- paddle/cuda/include/hl_avx_functions.h | 2 +- paddle/cuda/include/hl_base.h | 2 +- paddle/cuda/include/hl_batch_norm.h | 2 +- paddle/cuda/include/hl_batch_transpose.h | 2 +- paddle/cuda/include/hl_cnn.h | 2 +- paddle/cuda/include/hl_cpu_gru.cuh | 2 +- paddle/cuda/include/hl_cpu_lstm.cuh | 2 +- paddle/cuda/include/hl_cpu_matrix_kernel.cuh | 2 +- .../cuda/include/hl_cpu_matrix_kernel_detail.cuh | 2 +- paddle/cuda/include/hl_cpu_scalar.cuh | 2 +- paddle/cuda/include/hl_cpu_simd_neon.cuh | 2 +- paddle/cuda/include/hl_cpu_simd_sse.cuh | 2 +- paddle/cuda/include/hl_cuda.h | 2 +- paddle/cuda/include/hl_cuda.ph | 2 +- paddle/cuda/include/hl_cuda_cublas.h | 2 +- paddle/cuda/include/hl_cuda_cudnn.h | 2 +- paddle/cuda/include/hl_cuda_cudnn.ph | 2 +- paddle/cuda/include/hl_device_functions.cuh | 2 +- paddle/cuda/include/hl_functions.h | 2 +- paddle/cuda/include/hl_gpu.h | 2 +- paddle/cuda/include/hl_gpu_functions.cuh | 2 +- paddle/cuda/include/hl_gpu_gru.cuh | 2 +- paddle/cuda/include/hl_gpu_lstm.cuh | 2 +- paddle/cuda/include/hl_gpu_matrix_kernel.cuh | 2 +- paddle/cuda/include/hl_gru_ops.cuh | 2 +- paddle/cuda/include/hl_lstm.h | 2 +- paddle/cuda/include/hl_lstm_ops.cuh | 2 +- paddle/cuda/include/hl_matrix.h | 2 +- paddle/cuda/include/hl_matrix_apply.cuh | 2 +- paddle/cuda/include/hl_matrix_base.cuh | 2 +- paddle/cuda/include/hl_matrix_base_detail.cuh | 2 +- paddle/cuda/include/hl_matrix_ops.cuh | 2 +- paddle/cuda/include/hl_matrix_type.cuh | 2 +- paddle/cuda/include/hl_perturbation_util.cuh | 2 +- paddle/cuda/include/hl_recurrent_apply.cuh | 2 +- paddle/cuda/include/hl_sequence.h | 2 +- paddle/cuda/include/hl_sparse.h | 2 +- paddle/cuda/include/hl_sparse.ph | 2 +- paddle/cuda/include/hl_table_apply.h | 2 +- paddle/cuda/include/hl_tensor_ops.h | 2 +- paddle/cuda/include/hl_thread.ph | 2 +- paddle/cuda/include/hl_time.h | 2 +- paddle/cuda/include/hl_top_k.h | 2 +- paddle/cuda/include/hl_warpctc_wrap.h | 2 +- paddle/cuda/include/stub/hl_aggregate_stub.h | 2 +- paddle/cuda/include/stub/hl_cnn_stub.h | 2 +- paddle/cuda/include/stub/hl_cuda_cublas_stub.h | 2 +- paddle/cuda/include/stub/hl_cuda_cudnn_stub.h | 2 +- paddle/cuda/include/stub/hl_cuda_stub.h | 2 +- paddle/cuda/include/stub/hl_lstm_stub.h | 2 +- paddle/cuda/include/stub/hl_matrix_stub.h | 2 +- paddle/cuda/include/stub/hl_sequence_stub.h | 2 +- paddle/cuda/include/stub/hl_sparse_stub.h | 2 +- paddle/cuda/src/avx_mathfun.h | 2 +- paddle/cuda/src/hl_avx_functions.cc | 2 +- paddle/cuda/src/hl_batch_norm.cu | 2 +- paddle/cuda/src/hl_batch_transpose.cu | 2 +- paddle/cuda/src/hl_cpu_functions.cc | 2 +- paddle/cuda/src/hl_cuda_aggregate.cu | 2 +- paddle/cuda/src/hl_cuda_cnn.cu | 2 +- paddle/cuda/src/hl_cuda_cublas.cc | 2 +- paddle/cuda/src/hl_cuda_cudnn.cc | 2 +- paddle/cuda/src/hl_cuda_device.cc | 2 +- paddle/cuda/src/hl_cuda_lstm.cu | 2 +- paddle/cuda/src/hl_cuda_matrix.cu | 2 +- paddle/cuda/src/hl_cuda_sequence.cu | 2 +- paddle/cuda/src/hl_cuda_sparse.cu | 2 +- paddle/cuda/src/hl_cuda_sparse.cuh | 2 +- paddle/cuda/src/hl_math.cc | 2 +- paddle/cuda/src/hl_perturbation_util.cu | 2 +- paddle/cuda/src/hl_table_apply.cu | 2 +- paddle/cuda/src/hl_time.cc | 2 +- paddle/cuda/src/hl_top_k.cu | 2 +- paddle/cuda/src/hl_warpctc_wrap.cc | 2 +- paddle/fluid/framework/attribute.cc | 2 +- paddle/fluid/framework/attribute.h | 2 +- paddle/fluid/framework/backward.cc | 2 +- paddle/fluid/framework/backward.h | 2 +- paddle/fluid/framework/backward_test.cc | 2 +- paddle/fluid/framework/block_desc.cc | 2 +- paddle/fluid/framework/block_desc.h | 2 +- paddle/fluid/framework/data_device_transform.cc | 2 +- paddle/fluid/framework/data_device_transform.h | 2 +- .../fluid/framework/data_device_transform_test.cu | 2 +- paddle/fluid/framework/data_layout.h | 2 +- paddle/fluid/framework/data_layout_transform.cc | 2 +- paddle/fluid/framework/data_layout_transform.h | 2 +- .../fluid/framework/data_layout_transform_test.cc | 4 ++-- paddle/fluid/framework/data_transform.cc | 2 +- paddle/fluid/framework/data_transform.h | 2 +- paddle/fluid/framework/data_type.h | 2 +- paddle/fluid/framework/data_type_transform.cc | 2 +- paddle/fluid/framework/data_type_transform.h | 2 +- paddle/fluid/framework/data_type_transform_test.cc | 2 +- paddle/fluid/framework/ddim.cc | 2 +- paddle/fluid/framework/ddim.h | 2 +- paddle/fluid/framework/ddim_test.cc | 2 +- paddle/fluid/framework/dim.h | 2 +- paddle/fluid/framework/dim_test.cu | 2 +- paddle/fluid/framework/eigen.h | 2 +- paddle/fluid/framework/eigen_test.cc | 2 +- paddle/fluid/framework/executor.cc | 2 +- paddle/fluid/framework/executor.h | 2 +- paddle/fluid/framework/feed_fetch_method.cc | 2 +- paddle/fluid/framework/feed_fetch_method.h | 2 +- paddle/fluid/framework/feed_fetch_type.h | 2 +- paddle/fluid/framework/framework.proto | 2 +- paddle/fluid/framework/grad_op_desc_maker.h | 2 +- paddle/fluid/framework/init.cc | 2 +- paddle/fluid/framework/init.h | 2 +- paddle/fluid/framework/init_test.cc | 2 +- paddle/fluid/framework/library_type.h | 2 +- paddle/fluid/framework/lod_rank_table.cc | 2 +- paddle/fluid/framework/lod_rank_table.h | 2 +- paddle/fluid/framework/lod_tensor.cc | 2 +- paddle/fluid/framework/lod_tensor.h | 2 +- paddle/fluid/framework/lod_tensor_array.h | 2 +- paddle/fluid/framework/lod_tensor_test.cc | 2 +- paddle/fluid/framework/lod_tensor_test.cu | 2 +- paddle/fluid/framework/mixed_vector.h | 2 +- paddle/fluid/framework/mixed_vector_test.cu | 2 +- paddle/fluid/framework/op_desc.cc | 2 +- paddle/fluid/framework/op_desc.h | 2 +- paddle/fluid/framework/op_info.cc | 2 +- paddle/fluid/framework/op_info.h | 2 +- paddle/fluid/framework/op_kernel_type.h | 2 +- paddle/fluid/framework/op_kernel_type_test.cc | 2 +- paddle/fluid/framework/op_proto_maker.cc | 2 +- paddle/fluid/framework/op_proto_maker.h | 2 +- paddle/fluid/framework/op_proto_maker_test.cc | 2 +- paddle/fluid/framework/op_registry.cc | 2 +- paddle/fluid/framework/op_registry.h | 2 +- paddle/fluid/framework/op_registry_test.cc | 2 +- paddle/fluid/framework/operator.cc | 2 +- paddle/fluid/framework/operator.h | 2 +- paddle/fluid/framework/operator_test.cc | 2 +- paddle/fluid/framework/program_desc.cc | 2 +- paddle/fluid/framework/program_desc.h | 2 +- paddle/fluid/framework/program_desc_test.cc | 2 +- paddle/fluid/framework/proto_desc.h | 2 +- paddle/fluid/framework/prune.cc | 2 +- paddle/fluid/framework/prune.h | 2 +- paddle/fluid/framework/prune_test.cc | 2 +- paddle/fluid/framework/scope.cc | 2 +- paddle/fluid/framework/scope.h | 2 +- paddle/fluid/framework/scope_test.cc | 2 +- paddle/fluid/framework/selected_rows.cc | 2 +- paddle/fluid/framework/selected_rows.h | 2 +- paddle/fluid/framework/selected_rows_test.cc | 2 +- paddle/fluid/framework/shape_inference.cc | 2 +- paddle/fluid/framework/shape_inference.h | 2 +- paddle/fluid/framework/tensor.cc | 2 +- paddle/fluid/framework/tensor.h | 2 +- paddle/fluid/framework/tensor_impl.h | 2 +- paddle/fluid/framework/tensor_test.cc | 2 +- paddle/fluid/framework/tensor_util.h | 2 +- paddle/fluid/framework/tensor_util_test.cc | 2 +- paddle/fluid/framework/threadpool.cc | 2 +- paddle/fluid/framework/threadpool.h | 2 +- paddle/fluid/framework/threadpool_test.cc | 2 +- paddle/fluid/framework/type_defs.h | 2 +- paddle/fluid/framework/var_desc.cc | 2 +- paddle/fluid/framework/var_desc.h | 2 +- paddle/fluid/framework/var_type.h | 2 +- paddle/fluid/framework/var_type_inference.h | 2 +- paddle/fluid/framework/var_type_inference_test.cc | 4 ++-- paddle/fluid/framework/variable.h | 2 +- paddle/fluid/framework/variable_test.cc | 2 +- paddle/fluid/inference/io.cc | 2 +- paddle/fluid/inference/io.h | 2 +- .../tests/book/test_inference_fit_a_line.cc | 2 +- .../book/test_inference_image_classification.cc | 2 +- .../book/test_inference_label_semantic_roles.cc | 2 +- .../tests/book/test_inference_recognize_digits.cc | 2 +- .../book/test_inference_recommender_system.cc | 2 +- .../book/test_inference_rnn_encoder_decoder.cc | 2 +- .../book/test_inference_understand_sentiment.cc | 2 +- .../tests/book/test_inference_word2vec.cc | 2 +- paddle/fluid/inference/tests/test_helper.h | 2 +- paddle/fluid/memory/detail/buddy_allocator.cc | 2 +- paddle/fluid/memory/detail/buddy_allocator.h | 2 +- paddle/fluid/memory/detail/memory_block.cc | 2 +- paddle/fluid/memory/detail/memory_block.h | 2 +- paddle/fluid/memory/detail/meta_cache.cc | 2 +- paddle/fluid/memory/detail/meta_cache.h | 2 +- paddle/fluid/memory/detail/meta_data.cc | 2 +- paddle/fluid/memory/detail/meta_data.h | 2 +- paddle/fluid/memory/detail/system_allocator.cc | 2 +- paddle/fluid/memory/detail/system_allocator.h | 2 +- .../fluid/memory/detail/system_allocator_test.cc | 2 +- paddle/fluid/memory/memcpy.cc | 2 +- paddle/fluid/memory/memcpy.h | 2 +- paddle/fluid/memory/memory.cc | 2 +- paddle/fluid/memory/memory.h | 2 +- paddle/fluid/memory/memory_test.cc | 2 +- paddle/fluid/operators/accuracy_op.cc | 2 +- paddle/fluid/operators/accuracy_op.cu | 2 +- paddle/fluid/operators/accuracy_op.h | 2 +- paddle/fluid/operators/activation_op.cc | 2 +- paddle/fluid/operators/activation_op.cu | 2 +- paddle/fluid/operators/activation_op.h | 2 +- paddle/fluid/operators/adadelta_op.cc | 2 +- paddle/fluid/operators/adadelta_op.cu | 2 +- paddle/fluid/operators/adadelta_op.h | 2 +- paddle/fluid/operators/adagrad_op.cc | 2 +- paddle/fluid/operators/adagrad_op.cu | 2 +- paddle/fluid/operators/adagrad_op.h | 2 +- paddle/fluid/operators/adam_op.cc | 2 +- paddle/fluid/operators/adam_op.cu | 2 +- paddle/fluid/operators/adam_op.h | 2 +- paddle/fluid/operators/adamax_op.cc | 2 +- paddle/fluid/operators/adamax_op.cu | 2 +- paddle/fluid/operators/adamax_op.h | 2 +- paddle/fluid/operators/array_operator.h | 2 +- paddle/fluid/operators/array_to_lod_tensor_op.cc | 2 +- paddle/fluid/operators/assign_op.cc | 2 +- paddle/fluid/operators/assign_value_op.cc | 2 +- paddle/fluid/operators/assign_value_op.cu.cc | 2 +- paddle/fluid/operators/assign_value_op.h | 2 +- paddle/fluid/operators/auc_op.cc | 2 +- paddle/fluid/operators/auc_op.h | 2 +- paddle/fluid/operators/batch_norm_op.cc | 2 +- paddle/fluid/operators/batch_norm_op.cu.cc | 2 +- paddle/fluid/operators/batch_norm_op.h | 2 +- paddle/fluid/operators/beam_search_decode_op.cc | 2 +- paddle/fluid/operators/beam_search_decode_op.h | 2 +- .../fluid/operators/beam_search_decode_op_test.cc | 2 +- paddle/fluid/operators/beam_search_op.cc | 2 +- paddle/fluid/operators/beam_search_op.h | 2 +- paddle/fluid/operators/beam_search_op_test.cc | 2 +- .../fluid/operators/bilinear_tensor_product_op.cc | 2 +- .../fluid/operators/bilinear_tensor_product_op.cu | 2 +- .../fluid/operators/bilinear_tensor_product_op.h | 2 +- paddle/fluid/operators/bipartite_match_op.cc | 2 +- paddle/fluid/operators/box_coder_op.cc | 2 +- paddle/fluid/operators/box_coder_op.cu | 2 +- paddle/fluid/operators/box_coder_op.h | 2 +- paddle/fluid/operators/cast_op.cc | 2 +- paddle/fluid/operators/cast_op.cu | 2 +- paddle/fluid/operators/cast_op.h | 2 +- paddle/fluid/operators/chunk_eval_op.cc | 2 +- paddle/fluid/operators/chunk_eval_op.h | 2 +- paddle/fluid/operators/clip_by_norm_op.cc | 2 +- paddle/fluid/operators/clip_by_norm_op.cu | 2 +- paddle/fluid/operators/clip_by_norm_op.h | 2 +- paddle/fluid/operators/clip_op.cc | 2 +- paddle/fluid/operators/clip_op.cu | 2 +- paddle/fluid/operators/clip_op.h | 2 +- paddle/fluid/operators/compare_op.cc | 2 +- paddle/fluid/operators/compare_op.cu | 2 +- paddle/fluid/operators/compare_op.h | 2 +- paddle/fluid/operators/concat_op.cc | 2 +- paddle/fluid/operators/concat_op.cu.cc | 2 +- paddle/fluid/operators/concat_op.h | 2 +- paddle/fluid/operators/cond_op.cc | 2 +- paddle/fluid/operators/cond_op.h | 2 +- paddle/fluid/operators/conditional_block_op.cc | 2 +- paddle/fluid/operators/conv_cudnn_op.cu.cc | 2 +- paddle/fluid/operators/conv_op.cc | 2 +- paddle/fluid/operators/conv_op.cu.cc | 2 +- paddle/fluid/operators/conv_op.h | 2 +- paddle/fluid/operators/conv_shift_op.cc | 2 +- paddle/fluid/operators/conv_shift_op.cu | 2 +- paddle/fluid/operators/conv_shift_op.h | 2 +- .../fluid/operators/conv_transpose_cudnn_op.cu.cc | 2 +- paddle/fluid/operators/conv_transpose_op.cc | 2 +- paddle/fluid/operators/conv_transpose_op.cu.cc | 2 +- paddle/fluid/operators/conv_transpose_op.h | 2 +- paddle/fluid/operators/cos_sim_op.cc | 2 +- paddle/fluid/operators/cos_sim_op.cu | 2 +- paddle/fluid/operators/cos_sim_op.h | 2 +- paddle/fluid/operators/crf_decoding_op.cc | 2 +- paddle/fluid/operators/crf_decoding_op.h | 2 +- paddle/fluid/operators/crop_op.cc | 2 +- paddle/fluid/operators/crop_op.cu | 2 +- paddle/fluid/operators/crop_op.h | 2 +- paddle/fluid/operators/cross_entropy_op.cc | 2 +- paddle/fluid/operators/cross_entropy_op.cu | 2 +- paddle/fluid/operators/cross_entropy_op.h | 2 +- paddle/fluid/operators/ctc_align_op.cc | 2 +- paddle/fluid/operators/ctc_align_op.cu | 2 +- paddle/fluid/operators/ctc_align_op.h | 2 +- paddle/fluid/operators/cum_op.h | 2 +- paddle/fluid/operators/cumsum_op.cc | 2 +- paddle/fluid/operators/cumsum_op.cu | 2 +- paddle/fluid/operators/decayed_adagrad_op.cc | 2 +- paddle/fluid/operators/decayed_adagrad_op.cu | 2 +- paddle/fluid/operators/decayed_adagrad_op.h | 2 +- paddle/fluid/operators/detail/grpc_client.cc | 2 +- paddle/fluid/operators/detail/grpc_client.h | 2 +- paddle/fluid/operators/detail/grpc_server.cc | 2 +- paddle/fluid/operators/detail/grpc_server.h | 2 +- paddle/fluid/operators/detail/safe_ref.h | 2 +- paddle/fluid/operators/detail/sendrecvop_utils.cc | 2 +- paddle/fluid/operators/detail/sendrecvop_utils.h | 2 +- paddle/fluid/operators/detail/simple_block_queue.h | 2 +- paddle/fluid/operators/detail/strided_memcpy.h | 2 +- paddle/fluid/operators/detection_map_op.cc | 2 +- paddle/fluid/operators/detection_map_op.h | 2 +- paddle/fluid/operators/detection_output_op.cc | 2 +- paddle/fluid/operators/detection_output_op.cu.cc | 2 +- paddle/fluid/operators/detection_output_op.h | 2 +- paddle/fluid/operators/dropout_op.cc | 2 +- paddle/fluid/operators/dropout_op.cu | 2 +- paddle/fluid/operators/dropout_op.h | 2 +- paddle/fluid/operators/edit_distance_op.cc | 2 +- paddle/fluid/operators/edit_distance_op.cu | 2 +- paddle/fluid/operators/edit_distance_op.h | 2 +- paddle/fluid/operators/elementwise_add_op.cc | 2 +- paddle/fluid/operators/elementwise_add_op.cu | 2 +- paddle/fluid/operators/elementwise_add_op.h | 2 +- paddle/fluid/operators/elementwise_div_op.cc | 2 +- paddle/fluid/operators/elementwise_div_op.cu | 2 +- paddle/fluid/operators/elementwise_div_op.h | 2 +- paddle/fluid/operators/elementwise_max_op.cc | 2 +- paddle/fluid/operators/elementwise_max_op.cu | 2 +- paddle/fluid/operators/elementwise_max_op.h | 2 +- paddle/fluid/operators/elementwise_min_op.cc | 2 +- paddle/fluid/operators/elementwise_min_op.cu | 2 +- paddle/fluid/operators/elementwise_min_op.h | 2 +- paddle/fluid/operators/elementwise_mul_op.cc | 2 +- paddle/fluid/operators/elementwise_mul_op.cu | 2 +- paddle/fluid/operators/elementwise_mul_op.h | 2 +- paddle/fluid/operators/elementwise_op.h | 2 +- paddle/fluid/operators/elementwise_op_function.h | 2 +- paddle/fluid/operators/elementwise_pow_op.cc | 2 +- paddle/fluid/operators/elementwise_pow_op.cu | 2 +- paddle/fluid/operators/elementwise_pow_op.h | 2 +- paddle/fluid/operators/elementwise_sub_op.cc | 2 +- paddle/fluid/operators/elementwise_sub_op.cu | 2 +- paddle/fluid/operators/elementwise_sub_op.h | 2 +- paddle/fluid/operators/expand_op.cc | 2 +- paddle/fluid/operators/expand_op.cu | 2 +- paddle/fluid/operators/expand_op.h | 2 +- paddle/fluid/operators/feed_op.cc | 2 +- paddle/fluid/operators/fetch_op.cc | 2 +- .../operators/fill_constant_batch_size_like_op.cc | 2 +- .../fill_constant_batch_size_like_op.cu.cc | 2 +- .../operators/fill_constant_batch_size_like_op.h | 2 +- paddle/fluid/operators/fill_constant_op.cc | 2 +- paddle/fluid/operators/fill_op.cc | 2 +- paddle/fluid/operators/fill_zeros_like_op.cc | 2 +- paddle/fluid/operators/fill_zeros_like_op.cu.cc | 2 +- paddle/fluid/operators/fill_zeros_like_op.h | 2 +- paddle/fluid/operators/ftrl_op.cc | 2 +- paddle/fluid/operators/ftrl_op.cu | 2 +- paddle/fluid/operators/ftrl_op.h | 2 +- paddle/fluid/operators/gather.cu.h | 2 +- paddle/fluid/operators/gather.h | 2 +- paddle/fluid/operators/gather_op.cc | 2 +- paddle/fluid/operators/gather_op.cu | 2 +- paddle/fluid/operators/gather_op.h | 2 +- paddle/fluid/operators/gather_test.cc | 2 +- paddle/fluid/operators/gaussian_random_op.cc | 2 +- paddle/fluid/operators/gaussian_random_op.cu | 2 +- paddle/fluid/operators/get_places_op.cc | 2 +- paddle/fluid/operators/gru_op.cc | 2 +- paddle/fluid/operators/gru_op.cu.cc | 2 +- paddle/fluid/operators/gru_op.h | 2 +- paddle/fluid/operators/gru_unit_op.cc | 2 +- paddle/fluid/operators/gru_unit_op.cu | 2 +- paddle/fluid/operators/gru_unit_op.h | 2 +- paddle/fluid/operators/hinge_loss_op.cc | 2 +- paddle/fluid/operators/hinge_loss_op.cu | 2 +- paddle/fluid/operators/hinge_loss_op.h | 2 +- paddle/fluid/operators/huber_loss_op.cc | 2 +- paddle/fluid/operators/huber_loss_op.cu | 2 +- paddle/fluid/operators/huber_loss_op.h | 2 +- paddle/fluid/operators/im2sequence_op.cc | 2 +- paddle/fluid/operators/im2sequence_op.cu | 2 +- paddle/fluid/operators/im2sequence_op.h | 2 +- paddle/fluid/operators/increment_op.cc | 2 +- paddle/fluid/operators/iou_similarity_op.cc | 2 +- paddle/fluid/operators/iou_similarity_op.cu | 2 +- paddle/fluid/operators/iou_similarity_op.h | 2 +- paddle/fluid/operators/is_empty_op.cc | 2 +- paddle/fluid/operators/l1_norm_op.cc | 2 +- paddle/fluid/operators/l1_norm_op.cu | 2 +- paddle/fluid/operators/l1_norm_op.h | 2 +- paddle/fluid/operators/label_smooth_op.cc | 2 +- paddle/fluid/operators/label_smooth_op.cu | 2 +- paddle/fluid/operators/label_smooth_op.h | 2 +- paddle/fluid/operators/layer_norm_op.cc | 2 +- paddle/fluid/operators/layer_norm_op.cu | 2 +- paddle/fluid/operators/layer_norm_op.h | 2 +- paddle/fluid/operators/linear_chain_crf_op.cc | 2 +- paddle/fluid/operators/linear_chain_crf_op.cu | 2 +- paddle/fluid/operators/linear_chain_crf_op.h | 2 +- paddle/fluid/operators/listen_and_serv_op.cc | 2 +- paddle/fluid/operators/load_combine_op.cc | 2 +- paddle/fluid/operators/load_op.cc | 2 +- paddle/fluid/operators/lod_array_length_op.cc | 2 +- paddle/fluid/operators/lod_rank_table_op.cc | 2 +- paddle/fluid/operators/lod_reset_op.cc | 2 +- paddle/fluid/operators/lod_reset_op.cu | 2 +- paddle/fluid/operators/lod_reset_op.h | 2 +- paddle/fluid/operators/lod_tensor_to_array_op.cc | 2 +- paddle/fluid/operators/log_loss_op.cc | 2 +- paddle/fluid/operators/log_loss_op.cu | 2 +- paddle/fluid/operators/log_loss_op.h | 2 +- paddle/fluid/operators/logical_op.cc | 2 +- paddle/fluid/operators/logical_op.cu | 2 +- paddle/fluid/operators/logical_op.h | 2 +- paddle/fluid/operators/lookup_table_op.cc | 2 +- paddle/fluid/operators/lookup_table_op.cu | 2 +- paddle/fluid/operators/lookup_table_op.h | 2 +- paddle/fluid/operators/lrn_op.cc | 2 +- paddle/fluid/operators/lrn_op.cu | 2 +- paddle/fluid/operators/lrn_op.h | 2 +- paddle/fluid/operators/lstm_op.cc | 2 +- paddle/fluid/operators/lstm_op.cu.cc | 2 +- paddle/fluid/operators/lstm_op.h | 2 +- paddle/fluid/operators/lstm_unit_op.cc | 2 +- paddle/fluid/operators/lstm_unit_op.cu | 2 +- paddle/fluid/operators/lstm_unit_op.h | 2 +- paddle/fluid/operators/lstmp_op.cc | 2 +- paddle/fluid/operators/lstmp_op.cu | 2 +- paddle/fluid/operators/lstmp_op.h | 2 +- paddle/fluid/operators/margin_rank_loss_op.cc | 2 +- paddle/fluid/operators/margin_rank_loss_op.cu | 2 +- paddle/fluid/operators/margin_rank_loss_op.h | 2 +- paddle/fluid/operators/math/context_project.cc | 2 +- paddle/fluid/operators/math/context_project.cu | 2 +- paddle/fluid/operators/math/context_project.h | 2 +- paddle/fluid/operators/math/cos_sim_functor.cc | 2 +- paddle/fluid/operators/math/cos_sim_functor.cu | 2 +- paddle/fluid/operators/math/cos_sim_functor.h | 2 +- paddle/fluid/operators/math/cross_entropy.cc | 2 +- paddle/fluid/operators/math/cross_entropy.cu | 2 +- paddle/fluid/operators/math/cross_entropy.h | 2 +- paddle/fluid/operators/math/depthwise_conv.cu | 2 +- paddle/fluid/operators/math/depthwise_conv.h | 2 +- .../operators/math/detail/activation_functions.h | 2 +- .../fluid/operators/math/detail/avx_functions.cc | 2 +- .../fluid/operators/math/detail/gru_cpu_kernel.h | 2 +- .../fluid/operators/math/detail/gru_gpu_kernel.h | 2 +- paddle/fluid/operators/math/detail/gru_kernel.h | 2 +- .../fluid/operators/math/detail/lstm_cpu_kernel.h | 2 +- .../fluid/operators/math/detail/lstm_gpu_kernel.h | 2 +- paddle/fluid/operators/math/detail/lstm_kernel.h | 2 +- paddle/fluid/operators/math/detection_util.h | 2 +- paddle/fluid/operators/math/gru_compute.cc | 2 +- paddle/fluid/operators/math/gru_compute.cu | 2 +- paddle/fluid/operators/math/gru_compute.h | 2 +- paddle/fluid/operators/math/im2col.cc | 2 +- paddle/fluid/operators/math/im2col.cu | 2 +- paddle/fluid/operators/math/im2col.h | 2 +- paddle/fluid/operators/math/im2col_test.cc | 2 +- paddle/fluid/operators/math/lstm_compute.cc | 2 +- paddle/fluid/operators/math/lstm_compute.cu | 2 +- paddle/fluid/operators/math/lstm_compute.h | 2 +- paddle/fluid/operators/math/math_function.cc | 2 +- paddle/fluid/operators/math/math_function.cu | 2 +- paddle/fluid/operators/math/math_function.h | 2 +- paddle/fluid/operators/math/math_function_impl.h | 2 +- paddle/fluid/operators/math/math_function_test.cc | 2 +- paddle/fluid/operators/math/math_function_test.cu | 2 +- paddle/fluid/operators/math/matmul.h | 2 +- paddle/fluid/operators/math/maxouting.cc | 2 +- paddle/fluid/operators/math/maxouting.cu | 2 +- paddle/fluid/operators/math/maxouting.h | 2 +- paddle/fluid/operators/math/pooling.cc | 2 +- paddle/fluid/operators/math/pooling.cu | 2 +- paddle/fluid/operators/math/pooling.h | 2 +- paddle/fluid/operators/math/sampler.cc | 2 +- paddle/fluid/operators/math/sampler.h | 2 +- .../fluid/operators/math/selected_rows_functor.cc | 2 +- .../fluid/operators/math/selected_rows_functor.cu | 2 +- .../fluid/operators/math/selected_rows_functor.h | 2 +- .../operators/math/selected_rows_functor_test.cc | 2 +- .../operators/math/selected_rows_functor_test.cu | 2 +- paddle/fluid/operators/math/sequence2batch.cc | 2 +- paddle/fluid/operators/math/sequence2batch.cu | 2 +- paddle/fluid/operators/math/sequence2batch.h | 2 +- paddle/fluid/operators/math/sequence_padding.cc | 2 +- paddle/fluid/operators/math/sequence_padding.cu | 2 +- paddle/fluid/operators/math/sequence_padding.h | 2 +- .../fluid/operators/math/sequence_padding_test.cc | 2 +- paddle/fluid/operators/math/sequence_pooling.cc | 2 +- paddle/fluid/operators/math/sequence_pooling.cu | 2 +- paddle/fluid/operators/math/sequence_pooling.h | 2 +- paddle/fluid/operators/math/sequence_scale.cc | 2 +- paddle/fluid/operators/math/sequence_scale.cu | 2 +- paddle/fluid/operators/math/sequence_scale.h | 2 +- paddle/fluid/operators/math/softmax.cc | 2 +- paddle/fluid/operators/math/softmax.cu | 2 +- paddle/fluid/operators/math/softmax.h | 2 +- paddle/fluid/operators/math/softmax_impl.h | 2 +- paddle/fluid/operators/math/unpooling.cc | 2 +- paddle/fluid/operators/math/unpooling.cu | 2 +- paddle/fluid/operators/math/unpooling.h | 2 +- paddle/fluid/operators/math/vol2col.cc | 2 +- paddle/fluid/operators/math/vol2col.cu | 2 +- paddle/fluid/operators/math/vol2col.h | 2 +- paddle/fluid/operators/math/vol2col_test.cc | 2 +- paddle/fluid/operators/matmul_op.cc | 2 +- paddle/fluid/operators/matmul_op.cu.cc | 2 +- paddle/fluid/operators/matmul_op.h | 2 +- paddle/fluid/operators/max_sequence_len_op.cc | 2 +- paddle/fluid/operators/maxout_op.cc | 2 +- paddle/fluid/operators/maxout_op.cu.cc | 2 +- paddle/fluid/operators/maxout_op.h | 2 +- paddle/fluid/operators/mean_op.cc | 2 +- paddle/fluid/operators/mean_op.cu | 2 +- paddle/fluid/operators/mean_op.h | 2 +- paddle/fluid/operators/merge_lod_tensor_op.cc | 2 +- paddle/fluid/operators/mine_hard_examples_op.cc | 2 +- paddle/fluid/operators/minus_op.cc | 2 +- paddle/fluid/operators/minus_op.cu | 2 +- paddle/fluid/operators/minus_op.h | 2 +- paddle/fluid/operators/modified_huber_loss_op.cc | 2 +- paddle/fluid/operators/modified_huber_loss_op.cu | 2 +- paddle/fluid/operators/modified_huber_loss_op.h | 2 +- paddle/fluid/operators/momentum_op.cc | 2 +- paddle/fluid/operators/momentum_op.cu | 2 +- paddle/fluid/operators/momentum_op.h | 2 +- paddle/fluid/operators/mul_op.cc | 2 +- paddle/fluid/operators/mul_op.cu.cc | 2 +- paddle/fluid/operators/mul_op.h | 2 +- paddle/fluid/operators/multiclass_nms_op.cc | 2 +- paddle/fluid/operators/multiplex_op.cc | 2 +- paddle/fluid/operators/multiplex_op.cu | 2 +- paddle/fluid/operators/multiplex_op.h | 2 +- paddle/fluid/operators/nccl/nccl_gpu_common.cc | 2 +- paddle/fluid/operators/nccl/nccl_gpu_common.h | 2 +- paddle/fluid/operators/nccl_op.cc | 2 +- paddle/fluid/operators/nccl_op.cu.cc | 2 +- paddle/fluid/operators/nccl_op_test.cu.cc | 2 +- paddle/fluid/operators/nce_op.cc | 2 +- paddle/fluid/operators/nce_op.h | 2 +- paddle/fluid/operators/net_op.cc | 2 +- paddle/fluid/operators/net_op.h | 2 +- paddle/fluid/operators/net_op_test.cc | 2 +- paddle/fluid/operators/norm_op.cc | 2 +- paddle/fluid/operators/norm_op.cu | 2 +- paddle/fluid/operators/norm_op.h | 2 +- paddle/fluid/operators/one_hot_op.cc | 2 +- paddle/fluid/operators/one_hot_op.cu | 2 +- paddle/fluid/operators/one_hot_op.h | 2 +- paddle/fluid/operators/pad_op.cc | 2 +- paddle/fluid/operators/pad_op.cu | 2 +- paddle/fluid/operators/pad_op.h | 2 +- paddle/fluid/operators/parallel_do_op.cc | 2 +- paddle/fluid/operators/pool_cudnn_op.cu.cc | 2 +- paddle/fluid/operators/pool_op.cc | 2 +- paddle/fluid/operators/pool_op.cu.cc | 2 +- paddle/fluid/operators/pool_op.h | 2 +- paddle/fluid/operators/pool_with_index_op.cc | 2 +- paddle/fluid/operators/pool_with_index_op.cu.cc | 2 +- paddle/fluid/operators/pool_with_index_op.h | 2 +- paddle/fluid/operators/precision_recall_op.cc | 2 +- paddle/fluid/operators/precision_recall_op.h | 2 +- paddle/fluid/operators/prelu_op.cc | 2 +- paddle/fluid/operators/prelu_op.cu | 2 +- paddle/fluid/operators/prelu_op.h | 2 +- paddle/fluid/operators/print_op.cc | 2 +- paddle/fluid/operators/prior_box_op.cc | 2 +- paddle/fluid/operators/prior_box_op.h | 2 +- paddle/fluid/operators/proximal_adagrad_op.cc | 2 +- paddle/fluid/operators/proximal_adagrad_op.cu | 2 +- paddle/fluid/operators/proximal_adagrad_op.h | 2 +- paddle/fluid/operators/proximal_gd_op.cc | 2 +- paddle/fluid/operators/proximal_gd_op.cu | 2 +- paddle/fluid/operators/proximal_gd_op.h | 2 +- paddle/fluid/operators/rank_loss_op.cc | 2 +- paddle/fluid/operators/rank_loss_op.cu | 2 +- paddle/fluid/operators/rank_loss_op.h | 2 +- paddle/fluid/operators/recurrent_op.cc | 2 +- paddle/fluid/operators/recv_op.cc | 2 +- paddle/fluid/operators/reduce_op.cc | 2 +- paddle/fluid/operators/reduce_op.cu | 2 +- paddle/fluid/operators/reduce_op.h | 2 +- .../operators/reorder_lod_tensor_by_rank_op.cc | 2 +- paddle/fluid/operators/reshape_op.cc | 2 +- paddle/fluid/operators/reshape_op.cu | 2 +- paddle/fluid/operators/reshape_op.h | 2 +- paddle/fluid/operators/rmsprop_op.cc | 2 +- paddle/fluid/operators/rmsprop_op.cu | 2 +- paddle/fluid/operators/rmsprop_op.h | 2 +- paddle/fluid/operators/rnn_memory_helper_op.cc | 2 +- paddle/fluid/operators/roi_pool_op.cc | 2 +- paddle/fluid/operators/roi_pool_op.cu | 2 +- paddle/fluid/operators/roi_pool_op.h | 2 +- paddle/fluid/operators/row_conv_op.cc | 2 +- paddle/fluid/operators/row_conv_op.cu | 2 +- paddle/fluid/operators/row_conv_op.h | 2 +- paddle/fluid/operators/save_combine_op.cc | 2 +- .../fluid/operators/save_load_combine_op_test.cc | 2 +- paddle/fluid/operators/save_load_op_test.cc | 2 +- paddle/fluid/operators/save_op.cc | 2 +- paddle/fluid/operators/scale_op.cc | 2 +- paddle/fluid/operators/scale_op.cu | 2 +- paddle/fluid/operators/scale_op.h | 2 +- paddle/fluid/operators/scatter.cu.h | 2 +- paddle/fluid/operators/scatter.h | 2 +- paddle/fluid/operators/scatter_op.cc | 2 +- paddle/fluid/operators/scatter_op.cu | 2 +- paddle/fluid/operators/scatter_op.h | 2 +- paddle/fluid/operators/scatter_test.cc | 2 +- paddle/fluid/operators/send_op.cc | 2 +- paddle/fluid/operators/send_recv_op_test.cc | 2 +- paddle/fluid/operators/sequence_concat_op.cc | 2 +- paddle/fluid/operators/sequence_concat_op.cu.cc | 2 +- paddle/fluid/operators/sequence_concat_op.h | 2 +- paddle/fluid/operators/sequence_conv_op.cc | 2 +- paddle/fluid/operators/sequence_conv_op.cu.cc | 2 +- paddle/fluid/operators/sequence_conv_op.h | 2 +- paddle/fluid/operators/sequence_erase_op.cc | 2 +- paddle/fluid/operators/sequence_erase_op.cu | 2 +- paddle/fluid/operators/sequence_erase_op.h | 2 +- paddle/fluid/operators/sequence_expand_op.cc | 2 +- paddle/fluid/operators/sequence_expand_op.cu | 2 +- paddle/fluid/operators/sequence_expand_op.h | 2 +- paddle/fluid/operators/sequence_pool_op.cc | 2 +- paddle/fluid/operators/sequence_pool_op.cu | 2 +- paddle/fluid/operators/sequence_pool_op.h | 2 +- paddle/fluid/operators/sequence_reshape_op.cc | 2 +- paddle/fluid/operators/sequence_reshape_op.cu | 2 +- paddle/fluid/operators/sequence_reshape_op.h | 2 +- paddle/fluid/operators/sequence_slice_op.cc | 2 +- paddle/fluid/operators/sequence_slice_op.cu | 2 +- paddle/fluid/operators/sequence_slice_op.h | 2 +- paddle/fluid/operators/sequence_softmax_op.cc | 2 +- paddle/fluid/operators/sequence_softmax_op.cu.cc | 2 +- paddle/fluid/operators/sequence_softmax_op.h | 2 +- paddle/fluid/operators/sgd_op.cc | 2 +- paddle/fluid/operators/sgd_op.cu | 2 +- paddle/fluid/operators/sgd_op.h | 2 +- paddle/fluid/operators/shrink_rnn_memory_op.cc | 2 +- .../sigmoid_cross_entropy_with_logits_op.cc | 2 +- .../sigmoid_cross_entropy_with_logits_op.cu | 2 +- .../sigmoid_cross_entropy_with_logits_op.h | 2 +- paddle/fluid/operators/sign_op.cc | 2 +- paddle/fluid/operators/sign_op.cu | 2 +- paddle/fluid/operators/sign_op.h | 2 +- paddle/fluid/operators/smooth_l1_loss_op.cc | 2 +- paddle/fluid/operators/smooth_l1_loss_op.cu | 2 +- paddle/fluid/operators/smooth_l1_loss_op.h | 2 +- paddle/fluid/operators/softmax_op.cc | 2 +- paddle/fluid/operators/softmax_op.cu.cc | 2 +- paddle/fluid/operators/softmax_op.h | 2 +- .../operators/softmax_with_cross_entropy_op.cc | 2 +- .../operators/softmax_with_cross_entropy_op.cu | 2 +- .../operators/softmax_with_cross_entropy_op.h | 2 +- paddle/fluid/operators/split_lod_tensor_op.cc | 2 +- paddle/fluid/operators/split_op.cc | 2 +- paddle/fluid/operators/split_op.cu.cc | 2 +- paddle/fluid/operators/split_op.h | 2 +- paddle/fluid/operators/split_selected_rows_op.cc | 2 +- paddle/fluid/operators/split_selected_rows_op.cu | 2 +- paddle/fluid/operators/split_selected_rows_op.h | 2 +- paddle/fluid/operators/spp_op.cc | 2 +- paddle/fluid/operators/spp_op.cu.cc | 2 +- paddle/fluid/operators/spp_op.h | 2 +- paddle/fluid/operators/squared_l2_distance_op.cc | 2 +- paddle/fluid/operators/squared_l2_distance_op.cu | 2 +- paddle/fluid/operators/squared_l2_distance_op.h | 2 +- paddle/fluid/operators/squared_l2_norm_op.cc | 2 +- paddle/fluid/operators/squared_l2_norm_op.cu | 2 +- paddle/fluid/operators/squared_l2_norm_op.h | 2 +- paddle/fluid/operators/strided_memcpy.h | 2 +- paddle/fluid/operators/strided_memcpy_test.cc | 2 +- paddle/fluid/operators/sum_op.cc | 2 +- paddle/fluid/operators/sum_op.cu | 2 +- paddle/fluid/operators/sum_op.h | 2 +- paddle/fluid/operators/target_assign_op.cc | 2 +- paddle/fluid/operators/target_assign_op.cu | 2 +- paddle/fluid/operators/target_assign_op.h | 2 +- .../fluid/operators/tensor_array_read_write_op.cc | 2 +- paddle/fluid/operators/top_k_op.cc | 2 +- paddle/fluid/operators/top_k_op.cu | 2 +- paddle/fluid/operators/top_k_op.h | 2 +- paddle/fluid/operators/transpose_op.cc | 2 +- paddle/fluid/operators/transpose_op.cu.cc | 2 +- paddle/fluid/operators/transpose_op.h | 2 +- paddle/fluid/operators/uniform_random_op.cc | 2 +- paddle/fluid/operators/uniform_random_op.cu | 2 +- paddle/fluid/operators/unpool_op.cc | 2 +- paddle/fluid/operators/unpool_op.cu.cc | 2 +- paddle/fluid/operators/unpool_op.h | 2 +- paddle/fluid/operators/warpctc_op.cc | 2 +- paddle/fluid/operators/warpctc_op.cu.cc | 2 +- paddle/fluid/operators/warpctc_op.h | 2 +- paddle/fluid/operators/while_op.cc | 2 +- paddle/fluid/platform/assert.h | 2 +- paddle/fluid/platform/call_once.h | 2 +- paddle/fluid/platform/cpu_info.cc | 2 +- paddle/fluid/platform/cpu_info.h | 2 +- paddle/fluid/platform/cpu_info_test.cc | 2 +- paddle/fluid/platform/cuda_helper.h | 2 +- paddle/fluid/platform/cuda_profiler.h | 2 +- paddle/fluid/platform/cudnn_helper.h | 2 +- paddle/fluid/platform/cudnn_helper_test.cc | 2 +- paddle/fluid/platform/details/device_ptr_cast.h | 2 +- paddle/fluid/platform/device_context.cc | 2 +- paddle/fluid/platform/device_context.h | 2 +- paddle/fluid/platform/device_context_test.cu | 2 +- paddle/fluid/platform/dynload/cublas.cc | 2 +- paddle/fluid/platform/dynload/cublas.h | 2 +- paddle/fluid/platform/dynload/cudnn.cc | 2 +- paddle/fluid/platform/dynload/cudnn.h | 2 +- paddle/fluid/platform/dynload/curand.cc | 2 +- paddle/fluid/platform/dynload/curand.h | 2 +- paddle/fluid/platform/dynload/dynamic_loader.cc | 2 +- paddle/fluid/platform/dynload/dynamic_loader.h | 2 +- paddle/fluid/platform/dynload/nccl.cc | 2 +- paddle/fluid/platform/dynload/nccl.h | 2 +- paddle/fluid/platform/dynload/warpctc.cc | 2 +- paddle/fluid/platform/dynload/warpctc.h | 2 +- paddle/fluid/platform/enforce.cc | 2 +- paddle/fluid/platform/enforce.h | 2 +- paddle/fluid/platform/enforce_test.cc | 2 +- paddle/fluid/platform/for_range.h | 2 +- paddle/fluid/platform/gpu_info.cc | 2 +- paddle/fluid/platform/gpu_info.h | 2 +- paddle/fluid/platform/hostdevice.h | 2 +- paddle/fluid/platform/macros.h | 2 +- paddle/fluid/platform/mkldnn_helper.h | 2 +- paddle/fluid/platform/nccl_test.cu | 2 +- paddle/fluid/platform/place.cc | 2 +- paddle/fluid/platform/place.h | 2 +- paddle/fluid/platform/place_test.cc | 2 +- paddle/fluid/platform/profiler.cc | 2 +- paddle/fluid/platform/profiler.h | 2 +- paddle/fluid/platform/profiler_test.cc | 2 +- paddle/fluid/platform/transform.h | 2 +- paddle/fluid/platform/transform_test.cu | 2 +- paddle/fluid/platform/variant.h | 2 +- paddle/fluid/pybind/const_value.cc | 2 +- paddle/fluid/pybind/const_value.h | 2 +- paddle/fluid/pybind/exception.cc | 2 +- paddle/fluid/pybind/exception.h | 2 +- paddle/fluid/pybind/protobuf.cc | 2 +- paddle/fluid/pybind/protobuf.h | 2 +- paddle/fluid/pybind/pybind.cc | 2 +- paddle/fluid/pybind/tensor_py.h | 2 +- paddle/fluid/string/piece.cc | 2 +- paddle/fluid/string/piece.h | 2 +- paddle/fluid/string/piece_test.cc | 2 +- paddle/fluid/string/printf.h | 2 +- paddle/fluid/string/printf_test.cc | 2 +- paddle/fluid/string/tinyformat/tinyformat.h | 2 +- paddle/fluid/string/to_string.h | 2 +- paddle/fluid/string/to_string_test.cc | 2 +- paddle/function/BlockExpandOp.cpp | 2 +- paddle/function/BlockExpandOpTest.cpp | 2 +- paddle/function/BufferArg.cpp | 2 +- paddle/function/BufferArg.h | 2 +- paddle/function/BufferArgTest.cpp | 2 +- paddle/function/ContextProjectionOp.cpp | 2 +- paddle/function/ContextProjectionOp.h | 2 +- paddle/function/ContextProjectionOpGpu.cu | 2 +- paddle/function/ContextProjectionOpTest.cpp | 2 +- paddle/function/ConvOp.h | 2 +- paddle/function/ConvOpTest.h | 2 +- paddle/function/CosSimOp.cpp | 2 +- paddle/function/CosSimOp.h | 2 +- paddle/function/CosSimOpGpu.cu | 2 +- paddle/function/CosSimOpTest.cpp | 2 +- paddle/function/CropOp.cpp | 2 +- paddle/function/CropOp.h | 2 +- paddle/function/CropOpGpu.cu | 2 +- paddle/function/CropOpTest.cpp | 2 +- paddle/function/CrossMapNormalOp.cpp | 2 +- paddle/function/CrossMapNormalOp.h | 2 +- paddle/function/CrossMapNormalOpGpu.cu | 2 +- paddle/function/CrossMapNormalOpTest.cpp | 2 +- paddle/function/DepthwiseConvOp.cpp | 2 +- paddle/function/DepthwiseConvOp.h | 2 +- paddle/function/DepthwiseConvOpGpu.cu | 2 +- paddle/function/DepthwiseConvOpTest.cpp | 2 +- paddle/function/EigenGemm.cpp | 2 +- paddle/function/Function.cpp | 2 +- paddle/function/Function.h | 2 +- paddle/function/FunctionTest.cpp | 2 +- paddle/function/FunctionTest.h | 2 +- paddle/function/GemmConvOp.cpp | 2 +- paddle/function/GemmConvOpTest.cpp | 2 +- paddle/function/GemmFunctor.cpp | 2 +- paddle/function/GemmFunctor.h | 2 +- paddle/function/GruFunctor.h | 2 +- paddle/function/Im2Col.h | 2 +- paddle/function/Im2ColOp.cpp | 2 +- paddle/function/Im2ColOpGpu.cu | 2 +- paddle/function/Im2ColTest.cpp | 2 +- paddle/function/MulOp.cpp | 2 +- paddle/function/MulOp.h | 2 +- paddle/function/MulOpGpu.cu | 2 +- paddle/function/MulOpTest.cpp | 2 +- paddle/function/NaiveConvOp.cpp | 2 +- paddle/function/PadOp.cpp | 2 +- paddle/function/PadOp.h | 2 +- paddle/function/PadOpGpu.cu | 2 +- paddle/function/PadOpTest.cpp | 2 +- paddle/function/RowConvOp.cpp | 2 +- paddle/function/RowConvOp.h | 2 +- paddle/function/RowConvOpGpu.cu | 2 +- paddle/function/RowConvOpTest.cpp | 2 +- paddle/function/ScaleSubRegionOp.cpp | 2 +- paddle/function/ScaleSubRegionOp.h | 2 +- paddle/function/ScaleSubRegionOpGpu.cu | 2 +- paddle/function/ScaleSubRegionOpTest.cpp | 2 +- paddle/function/SwitchOp.cpp | 2 +- paddle/function/SwitchOp.h | 2 +- paddle/function/SwitchOpTest.cpp | 2 +- paddle/function/TensorShape.h | 2 +- paddle/function/TensorShapeTest.cpp | 2 +- paddle/function/TensorType.h | 2 +- paddle/function/TensorTypeTest.cpp | 2 +- paddle/function/neon/NeonDepthwiseConv.cpp | 2 +- paddle/function/neon/NeonDepthwiseConv.h | 2 +- .../function/neon/NeonDepthwiseConvTranspose.cpp | 2 +- paddle/function/neon/neon_util.h | 2 +- paddle/function/nnpack/NNPACKConvOp.cpp | 2 +- paddle/function/nnpack/NNPACKConvOpTest.cpp | 2 +- paddle/gserver/activations/ActivationFunction.cpp | 2 +- paddle/gserver/activations/ActivationFunction.h | 2 +- paddle/gserver/activations/MKLDNNActivation.cpp | 2 +- paddle/gserver/activations/MKLDNNActivation.h | 2 +- paddle/gserver/dataproviders/DataProvider.cpp | 2 +- paddle/gserver/dataproviders/DataProvider.h | 2 +- paddle/gserver/dataproviders/DataProviderGroup.h | 2 +- paddle/gserver/dataproviders/MultiDataProvider.cpp | 2 +- paddle/gserver/dataproviders/MultiDataProvider.h | 2 +- paddle/gserver/dataproviders/ProtoReader.h | 2 +- paddle/gserver/dataproviders/PyDataProvider.cpp | 2 +- paddle/gserver/dataproviders/PyDataProvider.h | 2 +- paddle/gserver/dataproviders/PyDataProvider2.cpp | 2 +- paddle/gserver/evaluators/CTCErrorEvaluator.cpp | 2 +- paddle/gserver/evaluators/ChunkEvaluator.cpp | 2 +- .../gserver/evaluators/DetectionMAPEvaluator.cpp | 2 +- paddle/gserver/evaluators/Evaluator.cpp | 2 +- paddle/gserver/evaluators/Evaluator.h | 2 +- .../gserver/gradientmachines/GradientMachine.cpp | 2 +- paddle/gserver/gradientmachines/GradientMachine.h | 2 +- .../gradientmachines/GradientMachineMode.cpp | 2 +- .../gserver/gradientmachines/GradientMachineMode.h | 2 +- .../gradientmachines/MultiGradientMachine.cpp | 2 +- .../gradientmachines/MultiGradientMachine.h | 2 +- paddle/gserver/gradientmachines/MultiNetwork.cpp | 2 +- paddle/gserver/gradientmachines/MultiNetwork.h | 2 +- paddle/gserver/gradientmachines/NeuralNetwork.cpp | 2 +- paddle/gserver/gradientmachines/NeuralNetwork.h | 2 +- .../gradientmachines/ParallelNeuralNetwork.cpp | 2 +- .../gradientmachines/ParallelNeuralNetwork.h | 2 +- .../gradientmachines/RecurrentGradientMachine.cpp | 2 +- .../gradientmachines/RecurrentGradientMachine.h | 2 +- paddle/gserver/layers/AddtoLayer.cpp | 2 +- paddle/gserver/layers/AddtoLayer.h | 2 +- paddle/gserver/layers/AgentLayer.cpp | 2 +- paddle/gserver/layers/AgentLayer.h | 2 +- paddle/gserver/layers/AverageLayer.cpp | 2 +- paddle/gserver/layers/AverageLayer.h | 2 +- paddle/gserver/layers/BatchNormBaseLayer.cpp | 2 +- paddle/gserver/layers/BatchNormBaseLayer.h | 2 +- paddle/gserver/layers/BatchNormalizationLayer.cpp | 2 +- paddle/gserver/layers/BatchNormalizationLayer.h | 2 +- paddle/gserver/layers/BilinearInterpLayer.cpp | 2 +- paddle/gserver/layers/BilinearInterpLayer.h | 2 +- paddle/gserver/layers/BlockExpandLayer.cpp | 2 +- paddle/gserver/layers/BlockExpandLayer.h | 2 +- paddle/gserver/layers/CRFDecodingLayer.cpp | 2 +- paddle/gserver/layers/CRFDecodingLayer.h | 2 +- paddle/gserver/layers/CRFLayer.cpp | 2 +- paddle/gserver/layers/CRFLayer.h | 2 +- paddle/gserver/layers/CTCLayer.cpp | 2 +- paddle/gserver/layers/CTCLayer.h | 2 +- paddle/gserver/layers/ClipLayer.cpp | 2 +- paddle/gserver/layers/ConcatenateLayer.cpp | 2 +- paddle/gserver/layers/ContextProjection.cpp | 2 +- paddle/gserver/layers/ContextProjection.h | 2 +- paddle/gserver/layers/Conv3DLayer.cpp | 2 +- paddle/gserver/layers/Conv3DLayer.h | 2 +- paddle/gserver/layers/ConvBaseLayer.cpp | 2 +- paddle/gserver/layers/ConvBaseLayer.h | 2 +- paddle/gserver/layers/ConvBaseOperator.cpp | 2 +- paddle/gserver/layers/ConvBaseOperator.h | 2 +- paddle/gserver/layers/ConvBaseProjection.cpp | 2 +- paddle/gserver/layers/ConvBaseProjection.h | 2 +- paddle/gserver/layers/ConvOperator.cpp | 2 +- paddle/gserver/layers/ConvOperator.h | 2 +- paddle/gserver/layers/ConvProjection.cpp | 2 +- paddle/gserver/layers/ConvProjection.h | 2 +- paddle/gserver/layers/ConvShiftLayer.cpp | 2 +- paddle/gserver/layers/ConvTransOperator.cpp | 2 +- paddle/gserver/layers/ConvTransOperator.h | 2 +- paddle/gserver/layers/ConvTransProjection.cpp | 2 +- paddle/gserver/layers/ConvTransProjection.h | 2 +- paddle/gserver/layers/ConvexCombinationLayer.cpp | 2 +- paddle/gserver/layers/CosSimLayer.cpp | 2 +- paddle/gserver/layers/CosSimLayer.h | 2 +- paddle/gserver/layers/CosSimVecMatLayer.cpp | 2 +- paddle/gserver/layers/CostLayer.cpp | 2 +- paddle/gserver/layers/CostLayer.h | 2 +- paddle/gserver/layers/CropLayer.cpp | 2 +- paddle/gserver/layers/CropLayer.h | 2 +- paddle/gserver/layers/CrossChannelNormLayer.cpp | 2 +- paddle/gserver/layers/CrossEntropyOverBeam.cpp | 2 +- paddle/gserver/layers/CrossEntropyOverBeam.h | 2 +- paddle/gserver/layers/CudnnBatchNormLayer.cpp | 2 +- paddle/gserver/layers/CudnnBatchNormLayer.h | 2 +- paddle/gserver/layers/CudnnConvBaseLayer.cpp | 2 +- paddle/gserver/layers/CudnnConvBaseLayer.h | 2 +- paddle/gserver/layers/CudnnPoolLayer.cpp | 2 +- paddle/gserver/layers/CudnnPoolLayer.h | 2 +- paddle/gserver/layers/DataLayer.cpp | 2 +- paddle/gserver/layers/DataLayer.h | 2 +- paddle/gserver/layers/DataNormLayer.cpp | 2 +- paddle/gserver/layers/DataNormLayer.h | 2 +- paddle/gserver/layers/DeConv3DLayer.cpp | 2 +- paddle/gserver/layers/DeConv3DLayer.h | 2 +- paddle/gserver/layers/DetectionOutputLayer.cpp | 2 +- paddle/gserver/layers/DetectionOutputLayer.h | 2 +- paddle/gserver/layers/DetectionUtil.cpp | 2 +- paddle/gserver/layers/DetectionUtil.h | 2 +- paddle/gserver/layers/DotMulOperator.cpp | 2 +- paddle/gserver/layers/DotMulProjection.cpp | 2 +- paddle/gserver/layers/DotProdLayer.cpp | 2 +- paddle/gserver/layers/EosIdCheckLayer.cpp | 2 +- paddle/gserver/layers/ExpandConvLayer.cpp | 2 +- paddle/gserver/layers/ExpandConvLayer.h | 2 +- paddle/gserver/layers/ExpandLayer.cpp | 2 +- paddle/gserver/layers/ExpandLayer.h | 2 +- .../gserver/layers/FactorizationMachineLayer.cpp | 2 +- paddle/gserver/layers/FactorizationMachineLayer.h | 2 +- paddle/gserver/layers/FeatureMapExpandLayer.cpp | 2 +- paddle/gserver/layers/FullMatrixProjection.cpp | 2 +- paddle/gserver/layers/FullMatrixProjection.h | 2 +- paddle/gserver/layers/FullyConnectedLayer.cpp | 2 +- paddle/gserver/layers/FullyConnectedLayer.h | 2 +- paddle/gserver/layers/GatedRecurrentLayer.cpp | 2 +- paddle/gserver/layers/GatedRecurrentLayer.h | 2 +- paddle/gserver/layers/GetOutputLayer.cpp | 2 +- paddle/gserver/layers/GruCompute.cpp | 2 +- paddle/gserver/layers/GruCompute.cu | 2 +- paddle/gserver/layers/GruCompute.h | 2 +- paddle/gserver/layers/GruStepLayer.cpp | 2 +- paddle/gserver/layers/HierarchicalSigmoidLayer.cpp | 2 +- paddle/gserver/layers/HierarchicalSigmoidLayer.h | 2 +- paddle/gserver/layers/IdentityProjection.cpp | 2 +- paddle/gserver/layers/InterpolationLayer.cpp | 2 +- paddle/gserver/layers/KmaxSeqScoreLayer.cpp | 2 +- paddle/gserver/layers/L2DistanceLayer.cpp | 2 +- paddle/gserver/layers/L2DistanceLayer.h | 2 +- paddle/gserver/layers/Layer.cpp | 2 +- paddle/gserver/layers/Layer.h | 2 +- paddle/gserver/layers/LinearChainCRF.cpp | 2 +- paddle/gserver/layers/LinearChainCRF.h | 2 +- paddle/gserver/layers/LinearChainCTC.cpp | 2 +- paddle/gserver/layers/LinearChainCTC.h | 2 +- paddle/gserver/layers/LstmCompute.cpp | 2 +- paddle/gserver/layers/LstmCompute.cu | 2 +- paddle/gserver/layers/LstmCompute.h | 2 +- paddle/gserver/layers/LstmLayer.cpp | 2 +- paddle/gserver/layers/LstmLayer.h | 2 +- paddle/gserver/layers/LstmStepLayer.cpp | 2 +- paddle/gserver/layers/MDLstmLayer.cpp | 2 +- paddle/gserver/layers/MKLDNNAddtoLayer.cpp | 2 +- paddle/gserver/layers/MKLDNNAddtoLayer.h | 2 +- paddle/gserver/layers/MKLDNNBase.h | 2 +- paddle/gserver/layers/MKLDNNBatchNormLayer.cpp | 2 +- paddle/gserver/layers/MKLDNNBatchNormLayer.h | 2 +- paddle/gserver/layers/MKLDNNConcatLayer.cpp | 2 +- paddle/gserver/layers/MKLDNNConcatLayer.h | 2 +- paddle/gserver/layers/MKLDNNConvLayer.cpp | 2 +- paddle/gserver/layers/MKLDNNConvLayer.h | 2 +- paddle/gserver/layers/MKLDNNFcLayer.cpp | 2 +- paddle/gserver/layers/MKLDNNFcLayer.h | 2 +- paddle/gserver/layers/MKLDNNLRNLayer.cpp | 2 +- paddle/gserver/layers/MKLDNNLRNLayer.h | 2 +- paddle/gserver/layers/MKLDNNLayer.cpp | 2 +- paddle/gserver/layers/MKLDNNLayer.h | 2 +- paddle/gserver/layers/MKLDNNPoolLayer.cpp | 2 +- paddle/gserver/layers/MKLDNNPoolLayer.h | 2 +- paddle/gserver/layers/MKLPackedRecurrentLayer.cpp | 2 +- paddle/gserver/layers/MKLPackedRecurrentLayer.h | 2 +- paddle/gserver/layers/MKLPackedWeight.h | 2 +- paddle/gserver/layers/MaxIdLayer.cpp | 2 +- paddle/gserver/layers/MaxLayer.cpp | 2 +- paddle/gserver/layers/MaxLayer.h | 2 +- paddle/gserver/layers/MaxOutLayer.cpp | 2 +- paddle/gserver/layers/MaxOutLayer.h | 2 +- paddle/gserver/layers/MaxPoolWithMaskLayer.cpp | 2 +- paddle/gserver/layers/MaxPoolWithMaskLayer.h | 2 +- paddle/gserver/layers/MixedLayer.cpp | 2 +- paddle/gserver/layers/MixedLayer.h | 2 +- paddle/gserver/layers/MultiBoxLossLayer.cpp | 2 +- paddle/gserver/layers/MultinomialSampler.cpp | 2 +- paddle/gserver/layers/MultinomialSampler.h | 2 +- paddle/gserver/layers/MultiplexLayer.cpp | 2 +- paddle/gserver/layers/NCELayer.cpp | 2 +- paddle/gserver/layers/NormLayer.cpp | 2 +- paddle/gserver/layers/NormLayer.h | 2 +- paddle/gserver/layers/NormProjectionLayer.cpp | 2 +- paddle/gserver/layers/NormProjectionLayer.h | 2 +- paddle/gserver/layers/Operator.cpp | 2 +- paddle/gserver/layers/Operator.h | 2 +- paddle/gserver/layers/OuterProdLayer.cpp | 2 +- paddle/gserver/layers/PadLayer.cpp | 2 +- paddle/gserver/layers/PadLayer.h | 2 +- paddle/gserver/layers/ParameterReluLayer.cpp | 2 +- paddle/gserver/layers/ParameterReluLayer.h | 2 +- paddle/gserver/layers/Pool3DLayer.cpp | 2 +- paddle/gserver/layers/Pool3DLayer.h | 2 +- paddle/gserver/layers/PoolLayer.cpp | 2 +- paddle/gserver/layers/PoolLayer.h | 2 +- paddle/gserver/layers/PoolProjection.cpp | 2 +- paddle/gserver/layers/PoolProjection.h | 2 +- paddle/gserver/layers/PoolProjectionLayer.cpp | 2 +- paddle/gserver/layers/PoolProjectionLayer.h | 2 +- paddle/gserver/layers/PowerLayer.cpp | 2 +- paddle/gserver/layers/PrintLayer.cpp | 2 +- paddle/gserver/layers/PriorBox.cpp | 2 +- paddle/gserver/layers/Projection.cpp | 2 +- paddle/gserver/layers/Projection.h | 2 +- paddle/gserver/layers/ROIPoolLayer.cpp | 2 +- paddle/gserver/layers/ROIPoolLayer.h | 2 +- paddle/gserver/layers/RecurrentLayer.cpp | 2 +- paddle/gserver/layers/RecurrentLayer.h | 2 +- paddle/gserver/layers/RecurrentLayerGroup.cpp | 2 +- paddle/gserver/layers/ResizeLayer.cpp | 2 +- paddle/gserver/layers/RotateLayer.cpp | 2 +- paddle/gserver/layers/RotateLayer.h | 2 +- paddle/gserver/layers/RowConvLayer.cpp | 2 +- paddle/gserver/layers/RowConvLayer.h | 2 +- paddle/gserver/layers/RowL2NormLayer.cpp | 2 +- paddle/gserver/layers/SamplingIdLayer.cpp | 2 +- paddle/gserver/layers/ScaleShiftLayer.cpp | 2 +- paddle/gserver/layers/ScaleSubRegionLayer.cpp | 2 +- paddle/gserver/layers/ScaleSubRegionLayer.h | 2 +- paddle/gserver/layers/ScalingLayer.cpp | 2 +- paddle/gserver/layers/ScalingProjection.cpp | 2 +- .../layers/SelectiveFullyConnectedLayer.cpp | 2 +- .../gserver/layers/SelectiveFullyConnectedLayer.h | 2 +- paddle/gserver/layers/SequenceConcatLayer.cpp | 2 +- .../gserver/layers/SequenceLastInstanceLayer.cpp | 2 +- paddle/gserver/layers/SequencePoolLayer.cpp | 2 +- paddle/gserver/layers/SequencePoolLayer.h | 2 +- paddle/gserver/layers/SequenceReshapeLayer.cpp | 2 +- paddle/gserver/layers/SequenceSliceLayer.cpp | 2 +- paddle/gserver/layers/SequenceToBatch.cpp | 2 +- paddle/gserver/layers/SequenceToBatch.h | 2 +- paddle/gserver/layers/SliceProjection.cpp | 2 +- paddle/gserver/layers/SlopeInterceptLayer.cpp | 2 +- paddle/gserver/layers/SpatialPyramidPoolLayer.cpp | 2 +- paddle/gserver/layers/SpatialPyramidPoolLayer.h | 2 +- paddle/gserver/layers/SubNestedSequenceLayer.cpp | 2 +- paddle/gserver/layers/SubSequenceLayer.cpp | 2 +- paddle/gserver/layers/SumToOneNormLayer.cpp | 2 +- paddle/gserver/layers/SwitchOrderLayer.cpp | 2 +- paddle/gserver/layers/SwitchOrderLayer.h | 2 +- paddle/gserver/layers/TableProjection.cpp | 2 +- paddle/gserver/layers/TableProjection.h | 2 +- paddle/gserver/layers/TensorLayer.cpp | 2 +- paddle/gserver/layers/TensorLayer.h | 2 +- paddle/gserver/layers/TransLayer.cpp | 2 +- paddle/gserver/layers/TransLayer.h | 2 +- .../layers/TransposedFullMatrixProjection.cpp | 2 +- paddle/gserver/layers/ValidationLayer.cpp | 2 +- paddle/gserver/layers/ValidationLayer.h | 2 +- paddle/gserver/layers/WarpCTCLayer.cpp | 2 +- paddle/gserver/layers/WarpCTCLayer.h | 2 +- paddle/gserver/tests/LayerGradUtil.cpp | 2 +- paddle/gserver/tests/LayerGradUtil.h | 2 +- paddle/gserver/tests/MKLDNNTester.cpp | 2 +- paddle/gserver/tests/MKLDNNTester.h | 2 +- paddle/gserver/tests/img_conv_cudnn.py | 2 +- paddle/gserver/tests/img_conv_exconv.py | 2 +- paddle/gserver/tests/pyDataProvider.py | 2 +- paddle/gserver/tests/rnn_data_provider.py | 2 +- paddle/gserver/tests/sequenceGen.py | 2 +- .../sequence_nest_rnn_multi_unequalength_inputs.py | 2 +- paddle/gserver/tests/sequence_recurrent.py | 2 +- paddle/gserver/tests/sequence_recurrent_group.py | 2 +- .../gserver/tests/sequence_rnn_matched_inputs.py | 2 +- paddle/gserver/tests/sequence_rnn_mixed_inputs.py | 2 +- .../sequence_rnn_multi_unequalength_inputs.py | 2 +- paddle/gserver/tests/test_ActivationGrad.cpp | 2 +- paddle/gserver/tests/test_BatchNorm.cpp | 2 +- paddle/gserver/tests/test_CRFLayerGrad.cpp | 2 +- paddle/gserver/tests/test_CompareSparse.cpp | 2 +- paddle/gserver/tests/test_CompareTwoNets.cpp | 2 +- paddle/gserver/tests/test_ConvTrans.cpp | 2 +- paddle/gserver/tests/test_ConvUnify.cpp | 2 +- .../tests/test_CrossEntropyOverBeamGrad.cpp | 2 +- paddle/gserver/tests/test_DetectionOutput.cpp | 2 +- paddle/gserver/tests/test_Evaluator.cpp | 2 +- paddle/gserver/tests/test_Expand.cpp | 2 +- paddle/gserver/tests/test_KmaxSeqScore.cpp | 2 +- paddle/gserver/tests/test_LayerGrad.cpp | 2 +- paddle/gserver/tests/test_LinearChainCRF.cpp | 2 +- paddle/gserver/tests/test_MKLDNN.cpp | 2 +- .../tests/test_MaxPoolingWithMaskOutput.cpp | 2 +- paddle/gserver/tests/test_MultinomialSampler.cpp | 2 +- paddle/gserver/tests/test_NetworkCompare.cpp | 2 +- paddle/gserver/tests/test_PriorBox.cpp | 2 +- paddle/gserver/tests/test_PyDataProvider.cpp | 2 +- paddle/gserver/tests/test_PyDataProvider2.cpp | 2 +- paddle/gserver/tests/test_PyDataProvider2.py | 2 +- .../tests/test_RecurrentGradientMachine.cpp | 2 +- paddle/gserver/tests/test_RecurrentLayer.cpp | 2 +- paddle/gserver/tests/test_SelectiveFCLayer.cpp | 2 +- paddle/gserver/tests/test_SeqSliceLayerGrad.cpp | 2 +- paddle/gserver/tests/test_WarpCTCLayer.cpp | 2 +- paddle/math/Allocator.h | 2 +- paddle/math/BaseMatrix.cu | 2 +- paddle/math/BaseMatrix.h | 2 +- paddle/math/CpuSparseMatrix.cpp | 2 +- paddle/math/CpuSparseMatrix.h | 2 +- paddle/math/ExecViaCpu.h | 2 +- paddle/math/MKLDNNMatrix.cpp | 2 +- paddle/math/MKLDNNMatrix.h | 2 +- paddle/math/MathFunctions.cpp | 2 +- paddle/math/MathFunctions.h | 2 +- paddle/math/MathUtils.cpp | 2 +- paddle/math/MathUtils.h | 2 +- paddle/math/Matrix.cpp | 2 +- paddle/math/Matrix.h | 2 +- paddle/math/MatrixBitCode.cpp | 2 +- paddle/math/MemoryHandle.cpp | 2 +- paddle/math/MemoryHandle.h | 2 +- paddle/math/NEONFunctions.cpp | 2 +- paddle/math/NEONFunctions.h | 2 +- paddle/math/PoolAllocator.cpp | 2 +- paddle/math/PoolAllocator.h | 2 +- paddle/math/RowBuffer.h | 2 +- paddle/math/SIMDFunctions.cpp | 2 +- paddle/math/SIMDFunctions.h | 2 +- paddle/math/SparseMatrix.cpp | 2 +- paddle/math/SparseMatrix.h | 2 +- paddle/math/SparseRowMatrix.cpp | 2 +- paddle/math/SparseRowMatrix.h | 2 +- paddle/math/Storage.cpp | 2 +- paddle/math/Storage.h | 2 +- paddle/math/TensorApply.h | 2 +- paddle/math/TensorAssign.h | 2 +- paddle/math/TensorEvaluate.h | 2 +- paddle/math/TensorExpression.h | 2 +- paddle/math/TrainingAlgorithmOp.cu | 2 +- paddle/math/TrainingAlgorithmOp.h | 2 +- paddle/math/Vector.cpp | 2 +- paddle/math/Vector.h | 2 +- paddle/math/float16.h | 2 +- paddle/math/tests/OriginalOptimizerApi.h | 2 +- paddle/math/tests/PerfUtils.h | 2 +- paddle/math/tests/TensorCheck.h | 2 +- paddle/math/tests/TestUtils.h | 2 +- paddle/math/tests/test_Allocator.cpp | 2 +- paddle/math/tests/test_BaseMatrix.cpp | 2 +- paddle/math/tests/test_CpuGpuVector.cpp | 2 +- paddle/math/tests/test_ExecViaCpu.cpp | 2 +- paddle/math/tests/test_FPException.cpp | 2 +- paddle/math/tests/test_GpuProfiler.cpp | 2 +- paddle/math/tests/test_Matrix.cpp | 2 +- paddle/math/tests/test_RowBuffer.cpp | 2 +- paddle/math/tests/test_SIMDFunctions.cpp | 2 +- paddle/math/tests/test_SparseMatrix.cpp | 2 +- paddle/math/tests/test_Tensor.cu | 2 +- paddle/math/tests/test_TrainingAlgorithm.cpp | 2 +- paddle/math/tests/test_batchTranspose.cpp | 2 +- paddle/math/tests/test_float16.cpp | 2 +- paddle/math/tests/test_float16.cu | 2 +- paddle/math/tests/test_lazyAssign.cu | 2 +- paddle/math/tests/test_matrixCompare.cpp | 2 +- paddle/math/tests/test_matrixUtil.h | 2 +- paddle/math/tests/test_perturbation.cpp | 2 +- paddle/math/tests/test_sparseMatrixCompare.cpp | 2 +- paddle/optimizer/adadelta_optimizer.cc | 2 +- paddle/optimizer/adadelta_optimizer.h | 2 +- paddle/optimizer/adagrad_optimizer.cc | 2 +- paddle/optimizer/adagrad_optimizer.h | 2 +- paddle/optimizer/adam_optimizer.cc | 2 +- paddle/optimizer/adam_optimizer.h | 2 +- paddle/optimizer/lr_policy.h | 2 +- paddle/optimizer/optimizer.cc | 2 +- paddle/optimizer/optimizer.h | 2 +- paddle/optimizer/parameter_optimizer.cc | 2 +- paddle/optimizer/parameter_optimizer.h | 2 +- paddle/optimizer/parameter_optimizer_test.cc | 2 +- paddle/optimizer/serialization.h | 2 +- paddle/optimizer/serialization_test.cc | 2 +- paddle/optimizer/sgd_optimizer.cc | 2 +- paddle/optimizer/sgd_optimizer.h | 2 +- paddle/optimizer/tensor.h | 2 +- paddle/parameter/Argument.cpp | 2 +- paddle/parameter/Argument.h | 2 +- paddle/parameter/AverageOptimizer.cpp | 2 +- paddle/parameter/AverageOptimizer.h | 2 +- paddle/parameter/FirstOrderOptimizer.cpp | 2 +- paddle/parameter/FirstOrderOptimizer.h | 2 +- paddle/parameter/LearningRateScheduler.cpp | 2 +- paddle/parameter/LearningRateScheduler.h | 2 +- paddle/parameter/OptimizerFunctions.cpp | 2 +- paddle/parameter/OptimizerFunctions.h | 2 +- paddle/parameter/OptimizerWithRegularizer.cpp | 2 +- paddle/parameter/OptimizerWithRegularizer.h | 2 +- paddle/parameter/Parameter.cpp | 2 +- paddle/parameter/Parameter.h | 2 +- paddle/parameter/ParameterOptimizer.cpp | 2 +- paddle/parameter/ParameterOptimizer.h | 2 +- paddle/parameter/ParameterUpdateFunctions.cpp | 2 +- paddle/parameter/ParameterUpdateFunctions.h | 2 +- paddle/parameter/ParameterUpdaterBase.cpp | 2 +- paddle/parameter/ParameterUpdaterBase.h | 2 +- paddle/parameter/ParameterUpdaterHook.cpp | 2 +- paddle/parameter/ParameterUpdaterHook.h | 2 +- paddle/parameter/Regularizer.cpp | 2 +- paddle/parameter/Regularizer.h | 2 +- paddle/parameter/ThreadLocalBuffer.cpp | 2 +- paddle/parameter/ThreadLocalBuffer.h | 2 +- paddle/parameter/Weight.cpp | 2 +- paddle/parameter/Weight.h | 2 +- paddle/parameter/tests/test_argument.cpp | 2 +- paddle/parameter/tests/test_common.cpp | 2 +- paddle/pserver/BaseClient.cpp | 2 +- paddle/pserver/BaseClient.h | 2 +- paddle/pserver/LightNetwork.cpp | 2 +- paddle/pserver/LightNetwork.h | 2 +- paddle/pserver/ParameterClient2.cpp | 2 +- paddle/pserver/ParameterClient2.h | 2 +- paddle/pserver/ParameterServer2.cpp | 2 +- paddle/pserver/ParameterServer2.h | 2 +- paddle/pserver/ParameterServer2Main.cpp | 2 +- paddle/pserver/ParameterServerController.cpp | 2 +- paddle/pserver/ParameterServerController.h | 2 +- paddle/pserver/ProtoServer.cpp | 2 +- paddle/pserver/ProtoServer.h | 2 +- paddle/pserver/RDMANetwork.h | 2 +- paddle/pserver/SocketChannel.cpp | 2 +- paddle/pserver/SocketChannel.h | 2 +- paddle/pserver/SparseParameterDistribution.cpp | 2 +- paddle/pserver/SparseParameterDistribution.h | 2 +- paddle/pserver/test/SocketTest.cpp | 2 +- paddle/pserver/test/test_ParameterServer2.cpp | 2 +- paddle/pserver/test/test_ProtoServer.cpp | 2 +- proto/DataConfig.proto | 2 +- proto/DataFormat.proto | 2 +- proto/ModelConfig.proto | 2 +- proto/OptimizerConfig.proto | 2 +- proto/ParameterConfig.proto | 2 +- proto/ParameterServerConfig.proto | 4 ++-- proto/ParameterService.proto | 2 +- proto/TrainerConfig.proto | 2 +- .../tests/ProtobufEqualMain.cpp | 2 +- .../tests/configs/img_layers.py | 2 +- .../tests/configs/img_trans_layers.py | 2 +- .../tests/configs/last_first_seq.py | 2 +- .../tests/configs/layer_activations.py | 2 +- .../tests/configs/math_ops.py | 2 +- .../tests/configs/projections.py | 2 +- .../tests/configs/shared_fc.py | 2 +- .../tests/configs/shared_gru.py | 2 +- .../tests/configs/shared_lstm.py | 2 +- .../tests/configs/simple_rnn_layers.py | 2 +- .../tests/configs/test_BatchNorm3D.py | 2 +- .../tests/configs/test_bi_grumemory.py | 2 +- .../tests/configs/test_bilinear_interp.py | 2 +- .../tests/configs/test_clip_layer.py | 2 +- .../tests/configs/test_conv3d_layer.py | 2 +- .../tests/configs/test_cost_layers.py | 2 +- .../tests/configs/test_cost_layers_with_weight.py | 2 +- .../tests/configs/test_crop.py | 2 +- .../tests/configs/test_deconv3d_layer.py | 2 +- .../tests/configs/test_detection_output_layer.py | 2 +- .../tests/configs/test_dot_prod_layer.py | 2 +- .../tests/configs/test_expand_layer.py | 2 +- .../tests/configs/test_factorization_machine.py | 2 +- .../tests/configs/test_fc.py | 2 +- .../tests/configs/test_gated_unit_layer.py | 2 +- .../tests/configs/test_grumemory_layer.py | 2 +- .../tests/configs/test_hsigmoid.py | 2 +- .../tests/configs/test_l2_distance_layer.py | 2 +- .../tests/configs/test_lstmemory_layer.py | 2 +- .../tests/configs/test_maxout.py | 2 +- .../tests/configs/test_multibox_loss_layer.py | 2 +- .../tests/configs/test_multiplex_layer.py | 2 +- .../tests/configs/test_ntm_layers.py | 2 +- .../tests/configs/test_pad.py | 2 +- .../tests/configs/test_pooling3D_layer.py | 2 +- .../tests/configs/test_prelu_layer.py | 2 +- .../tests/configs/test_print_layer.py | 2 +- .../tests/configs/test_recursive_topology.py | 2 +- .../tests/configs/test_repeat_layer.py | 2 +- .../tests/configs/test_resize_layer.py | 2 +- .../tests/configs/test_rnn_group.py | 2 +- .../tests/configs/test_roi_pool_layer.py | 2 +- .../tests/configs/test_row_conv.py | 2 +- .../tests/configs/test_row_l2_norm_layer.py | 2 +- .../tests/configs/test_scale_shift_layer.py | 2 +- .../tests/configs/test_scale_sub_region_layer.py | 2 +- .../tests/configs/test_seq_concat_reshape.py | 2 +- .../tests/configs/test_sequence_pooling.py | 2 +- .../tests/configs/test_smooth_l1.py | 2 +- .../tests/configs/test_split_datasource.py | 2 +- .../tests/configs/test_spp_layer.py | 2 +- .../tests/configs/unused_layers.py | 2 +- .../tests/configs/util_layers.py | 2 +- .../tests/test_reset_hook.py | 2 +- python/paddle/utils/image_multiproc.py | 2 +- python/paddle/v2/dataset/tests/imikolov_test.py | 2 +- python/paddle/v2/event.py | 2 +- python/paddle/v2/fluid/__init__.py | 2 +- python/paddle/v2/fluid/backward.py | 2 +- python/paddle/v2/fluid/clip.py | 2 +- python/paddle/v2/fluid/data_feeder.py | 2 +- python/paddle/v2/fluid/debuger.py | 2 +- python/paddle/v2/fluid/default_scope_funcs.py | 2 +- python/paddle/v2/fluid/distribute_transpiler.py | 2 +- .../v2/fluid/distribute_transpiler_simple.py | 2 +- python/paddle/v2/fluid/distributed_spliter.py | 2 +- python/paddle/v2/fluid/evaluator.py | 2 +- python/paddle/v2/fluid/executor.py | 2 +- python/paddle/v2/fluid/framework.py | 2 +- python/paddle/v2/fluid/graphviz.py | 2 +- python/paddle/v2/fluid/initializer.py | 2 +- python/paddle/v2/fluid/io.py | 2 +- python/paddle/v2/fluid/layer_helper.py | 2 +- python/paddle/v2/fluid/layers/__init__.py | 2 +- python/paddle/v2/fluid/layers/control_flow.py | 2 +- python/paddle/v2/fluid/layers/device.py | 2 +- python/paddle/v2/fluid/layers/io.py | 2 +- .../v2/fluid/layers/layer_function_generator.py | 2 +- python/paddle/v2/fluid/layers/math_op_patch.py | 2 +- python/paddle/v2/fluid/layers/nn.py | 2 +- python/paddle/v2/fluid/layers/ops.py | 2 +- python/paddle/v2/fluid/layers/tensor.py | 2 +- .../v2/fluid/memory_optimization_transpiler.py | 2 +- python/paddle/v2/fluid/net_drawer.py | 2 +- python/paddle/v2/fluid/nets.py | 2 +- python/paddle/v2/fluid/op.py | 2 +- python/paddle/v2/fluid/optimizer.py | 2 +- python/paddle/v2/fluid/param_attr.py | 2 +- python/paddle/v2/fluid/profiler.py | 2 +- python/paddle/v2/fluid/regularizer.py | 2 +- python/paddle/v2/fluid/tests/__init__.py | 2 +- python/paddle/v2/fluid/tests/book/__init__.py | 2 +- .../fluid/tests/book/notest_rnn_encoder_decoer.py | 2 +- .../paddle/v2/fluid/tests/book/test_fit_a_line.py | 2 +- .../fluid/tests/book/test_image_classification.py | 2 +- .../fluid/tests/book/test_label_semantic_roles.py | 2 +- .../fluid/tests/book/test_machine_translation.py | 2 +- .../v2/fluid/tests/book/test_recognize_digits.py | 2 +- .../v2/fluid/tests/book/test_recommender_system.py | 2 +- python/paddle/v2/fluid/tests/book/test_word2vec.py | 2 +- .../book_distribute/notest_dist_fit_a_line.py | 2 +- .../notest_dist_label_semantic_roles.py | 2 +- .../tests/book_distribute/notest_dist_word2vec.py | 2 +- .../book_distribute/notest_machine_translation.py | 2 +- .../notest_recognize_digits_conv_dist.py | 2 +- .../notest_recognize_digits_mlp_dist.py | 2 +- .../notest_recommender_system_dist.py | 2 +- .../notest_understand_sentiment_conv_dist.py | 2 +- .../notest_understand_sentiment_dynamic_lstm.py | 2 +- .../fluid/tests/book_distribute/test_split_var.py | 2 +- .../test_memopt_fit_a_line.py | 2 +- .../test_memopt_image_classification_train.py | 2 +- .../test_memopt_machine_translation.py | 2 +- python/paddle/v2/fluid/tests/decorators.py | 2 +- python/paddle/v2/fluid/tests/demo/fc_gan.py | 2 +- python/paddle/v2/fluid/tests/op_test.py | 2 +- python/paddle/v2/fluid/tests/test_accuracy_op.py | 2 +- python/paddle/v2/fluid/tests/test_activation_op.py | 2 +- python/paddle/v2/fluid/tests/test_adadelta_op.py | 2 +- python/paddle/v2/fluid/tests/test_adagrad_op.py | 2 +- python/paddle/v2/fluid/tests/test_adam_op.py | 2 +- python/paddle/v2/fluid/tests/test_adamax_op.py | 2 +- .../v2/fluid/tests/test_array_read_write_op.py | 2 +- python/paddle/v2/fluid/tests/test_assign_op.py | 2 +- .../paddle/v2/fluid/tests/test_assign_value_op.py | 2 +- python/paddle/v2/fluid/tests/test_auc_op.py | 2 +- python/paddle/v2/fluid/tests/test_batch_norm_op.py | 2 +- .../v2/fluid/tests/test_beam_search_decode_op.py | 2 +- .../paddle/v2/fluid/tests/test_beam_search_op.py | 2 +- .../fluid/tests/test_bilinear_tensor_product_op.py | 2 +- .../v2/fluid/tests/test_bipartite_match_op.py | 2 +- python/paddle/v2/fluid/tests/test_box_coder_op.py | 2 +- python/paddle/v2/fluid/tests/test_calc_gradient.py | 2 +- python/paddle/v2/fluid/tests/test_cast_op.py | 2 +- python/paddle/v2/fluid/tests/test_chunk_eval_op.py | 2 +- .../paddle/v2/fluid/tests/test_clip_by_norm_op.py | 2 +- python/paddle/v2/fluid/tests/test_clip_op.py | 2 +- python/paddle/v2/fluid/tests/test_compare_op.py | 2 +- python/paddle/v2/fluid/tests/test_concat_op.py | 2 +- python/paddle/v2/fluid/tests/test_cond_op.py | 2 +- .../v2/fluid/tests/test_conditional_block.py | 2 +- python/paddle/v2/fluid/tests/test_const_value.py | 2 +- python/paddle/v2/fluid/tests/test_conv2d_op.py | 2 +- .../v2/fluid/tests/test_conv2d_transpose_op.py | 2 +- python/paddle/v2/fluid/tests/test_conv3d_op.py | 2 +- .../v2/fluid/tests/test_conv3d_transpose_op.py | 2 +- python/paddle/v2/fluid/tests/test_conv_shift_op.py | 2 +- python/paddle/v2/fluid/tests/test_cos_sim_op.py | 2 +- .../v2/fluid/tests/test_create_op_doc_string.py | 2 +- .../paddle/v2/fluid/tests/test_crf_decoding_op.py | 2 +- python/paddle/v2/fluid/tests/test_crop_op.py | 2 +- .../paddle/v2/fluid/tests/test_cross_entropy_op.py | 2 +- python/paddle/v2/fluid/tests/test_ctc_align.py | 2 +- python/paddle/v2/fluid/tests/test_cumsum_op.py | 2 +- python/paddle/v2/fluid/tests/test_data_feeder.py | 2 +- .../v2/fluid/tests/test_decayed_adagrad_op.py | 2 +- .../v2/fluid/tests/test_default_scope_funcs.py | 2 +- python/paddle/v2/fluid/tests/test_detection.py | 2 +- .../v2/fluid/tests/test_detection_output_op.py | 2 +- python/paddle/v2/fluid/tests/test_dropout_op.py | 2 +- python/paddle/v2/fluid/tests/test_dyn_rnn.py | 2 +- .../v2/fluid/tests/test_dynrnn_gradient_check.py | 2 +- .../v2/fluid/tests/test_dynrnn_static_input.py | 2 +- .../paddle/v2/fluid/tests/test_edit_distance_op.py | 2 +- .../v2/fluid/tests/test_elementwise_add_op.py | 2 +- .../v2/fluid/tests/test_elementwise_div_op.py | 2 +- .../v2/fluid/tests/test_elementwise_max_op.py | 2 +- .../v2/fluid/tests/test_elementwise_min_op.py | 2 +- .../v2/fluid/tests/test_elementwise_mul_op.py | 2 +- .../v2/fluid/tests/test_elementwise_pow_op.py | 2 +- .../v2/fluid/tests/test_elementwise_sub_op.py | 2 +- python/paddle/v2/fluid/tests/test_error_clip.py | 2 +- python/paddle/v2/fluid/tests/test_exception.py | 2 +- .../paddle/v2/fluid/tests/test_executor_and_mul.py | 2 +- python/paddle/v2/fluid/tests/test_expand_op.py | 2 +- .../v2/fluid/tests/test_feed_fetch_method.py | 2 +- .../tests/test_fill_constant_batch_size_like_op.py | 2 +- .../paddle/v2/fluid/tests/test_fill_constant_op.py | 2 +- python/paddle/v2/fluid/tests/test_fill_op.py | 2 +- .../v2/fluid/tests/test_fill_zeros_like_op.py | 2 +- .../v2/fluid/tests/test_framework_debug_str.py | 2 +- python/paddle/v2/fluid/tests/test_ftrl_op.py | 2 +- python/paddle/v2/fluid/tests/test_gather_op.py | 2 +- .../v2/fluid/tests/test_gaussian_random_op.py | 2 +- python/paddle/v2/fluid/tests/test_get_places_op.py | 2 +- python/paddle/v2/fluid/tests/test_gradient_clip.py | 2 +- python/paddle/v2/fluid/tests/test_gru_op.py | 2 +- python/paddle/v2/fluid/tests/test_gru_unit_op.py | 2 +- python/paddle/v2/fluid/tests/test_hinge_loss_op.py | 2 +- python/paddle/v2/fluid/tests/test_huber_loss_op.py | 2 +- .../paddle/v2/fluid/tests/test_im2sequence_op.py | 2 +- .../fluid/tests/test_image_classification_layer.py | 2 +- python/paddle/v2/fluid/tests/test_infer_shape.py | 2 +- .../v2/fluid/tests/test_inference_model_io.py | 2 +- python/paddle/v2/fluid/tests/test_initializer.py | 2 +- .../v2/fluid/tests/test_iou_similarity_op.py | 2 +- python/paddle/v2/fluid/tests/test_is_empty_op.py | 2 +- python/paddle/v2/fluid/tests/test_l1_norm_op.py | 2 +- .../paddle/v2/fluid/tests/test_label_smooth_op.py | 2 +- python/paddle/v2/fluid/tests/test_layer_norm_op.py | 2 +- python/paddle/v2/fluid/tests/test_layers.py | 2 +- .../v2/fluid/tests/test_linear_chain_crf_op.py | 2 +- .../v2/fluid/tests/test_lod_array_length_op.py | 2 +- .../paddle/v2/fluid/tests/test_lod_rank_table.py | 2 +- python/paddle/v2/fluid/tests/test_lod_reset_op.py | 2 +- .../paddle/v2/fluid/tests/test_lod_tensor_array.py | 2 +- .../v2/fluid/tests/test_lod_tensor_array_ops.py | 2 +- python/paddle/v2/fluid/tests/test_log_loss_op.py | 2 +- python/paddle/v2/fluid/tests/test_logical_op.py | 2 +- .../paddle/v2/fluid/tests/test_lookup_table_op.py | 2 +- python/paddle/v2/fluid/tests/test_lrn_op.py | 2 +- python/paddle/v2/fluid/tests/test_lstm_op.py | 2 +- python/paddle/v2/fluid/tests/test_lstm_unit_op.py | 2 +- python/paddle/v2/fluid/tests/test_lstmp_op.py | 2 +- .../v2/fluid/tests/test_margin_rank_loss_op.py | 2 +- python/paddle/v2/fluid/tests/test_math_op_patch.py | 2 +- python/paddle/v2/fluid/tests/test_matmul_op.py | 2 +- python/paddle/v2/fluid/tests/test_maxout_op.py | 2 +- python/paddle/v2/fluid/tests/test_mean_op.py | 2 +- .../tests/test_memory_optimization_transpiler.py | 2 +- python/paddle/v2/fluid/tests/test_minus_op.py | 2 +- .../paddle/v2/fluid/tests/test_mnist_if_else_op.py | 2 +- .../v2/fluid/tests/test_modified_huber_loss_op.py | 2 +- python/paddle/v2/fluid/tests/test_momentum_op.py | 2 +- python/paddle/v2/fluid/tests/test_mul_op.py | 2 +- .../v2/fluid/tests/test_multiclass_nms_op.py | 2 +- .../v2/fluid/tests/test_multihead_attention.py | 2 +- python/paddle/v2/fluid/tests/test_multiplex_op.py | 2 +- python/paddle/v2/fluid/tests/test_nce.py | 2 +- python/paddle/v2/fluid/tests/test_net.py | 2 +- python/paddle/v2/fluid/tests/test_norm_op.py | 2 +- .../v2/fluid/tests/test_normalization_wrapper.py | 2 +- python/paddle/v2/fluid/tests/test_one_hot_op.py | 2 +- .../paddle/v2/fluid/tests/test_op_support_gpu.py | 2 +- python/paddle/v2/fluid/tests/test_operator.py | 2 +- python/paddle/v2/fluid/tests/test_operator_desc.py | 2 +- python/paddle/v2/fluid/tests/test_optimizer.py | 2 +- python/paddle/v2/fluid/tests/test_pad_op.py | 2 +- python/paddle/v2/fluid/tests/test_parallel_op.py | 2 +- python/paddle/v2/fluid/tests/test_parameter.py | 2 +- python/paddle/v2/fluid/tests/test_pool2d_op.py | 2 +- python/paddle/v2/fluid/tests/test_pool3d_op.py | 2 +- python/paddle/v2/fluid/tests/test_pool_max_op.py | 2 +- .../fluid/tests/test_positive_negative_pair_op.py | 2 +- .../v2/fluid/tests/test_precision_recall_op.py | 2 +- python/paddle/v2/fluid/tests/test_prelu_op.py | 2 +- python/paddle/v2/fluid/tests/test_print_op.py | 2 +- python/paddle/v2/fluid/tests/test_prior_box_op.py | 2 +- python/paddle/v2/fluid/tests/test_profiler.py | 2 +- python/paddle/v2/fluid/tests/test_program.py | 2 +- python/paddle/v2/fluid/tests/test_protobuf.py | 2 +- .../paddle/v2/fluid/tests/test_protobuf_descs.py | 2 +- .../v2/fluid/tests/test_proximal_adagrad_op.py | 2 +- .../paddle/v2/fluid/tests/test_proximal_gd_op.py | 2 +- python/paddle/v2/fluid/tests/test_rank_loss_op.py | 2 +- python/paddle/v2/fluid/tests/test_recurrent_op.py | 2 +- python/paddle/v2/fluid/tests/test_recv_op.py | 2 +- python/paddle/v2/fluid/tests/test_reduce_op.py | 2 +- python/paddle/v2/fluid/tests/test_registry.py | 2 +- python/paddle/v2/fluid/tests/test_regularizer.py | 2 +- .../v2/fluid/tests/test_reorder_lod_tensor.py | 2 +- python/paddle/v2/fluid/tests/test_reshape_op.py | 2 +- python/paddle/v2/fluid/tests/test_rmsprop_op.py | 2 +- .../v2/fluid/tests/test_rnn_memory_helper_op.py | 2 +- python/paddle/v2/fluid/tests/test_roi_pool_op.py | 2 +- python/paddle/v2/fluid/tests/test_row_conv_op.py | 2 +- python/paddle/v2/fluid/tests/test_scale_op.py | 2 +- python/paddle/v2/fluid/tests/test_scatter_op.py | 2 +- python/paddle/v2/fluid/tests/test_scope.py | 2 +- python/paddle/v2/fluid/tests/test_selected_rows.py | 2 +- python/paddle/v2/fluid/tests/test_seq_concat_op.py | 2 +- python/paddle/v2/fluid/tests/test_seq_conv.py | 2 +- python/paddle/v2/fluid/tests/test_seq_pool.py | 2 +- .../v2/fluid/tests/test_sequence_erase_op.py | 2 +- .../paddle/v2/fluid/tests/test_sequence_expand.py | 2 +- .../paddle/v2/fluid/tests/test_sequence_reshape.py | 2 +- .../v2/fluid/tests/test_sequence_slice_op.py | 2 +- .../v2/fluid/tests/test_sequence_softmax_op.py | 2 +- python/paddle/v2/fluid/tests/test_sgd_op.py | 2 +- .../v2/fluid/tests/test_shrink_rnn_memory.py | 2 +- .../test_sigmoid_cross_entropy_with_logits_op.py | 2 +- python/paddle/v2/fluid/tests/test_sign_op.py | 2 +- .../v2/fluid/tests/test_smooth_l1_loss_op.py | 2 +- python/paddle/v2/fluid/tests/test_softmax_op.py | 2 +- .../tests/test_softmax_with_cross_entropy_op.py | 2 +- .../tests/test_split_and_merge_lod_tensor_op.py | 2 +- python/paddle/v2/fluid/tests/test_split_op.py | 2 +- .../v2/fluid/tests/test_split_selected_rows_op.py | 2 +- python/paddle/v2/fluid/tests/test_spp_op.py | 2 +- .../v2/fluid/tests/test_squared_l2_distance_op.py | 2 +- .../v2/fluid/tests/test_squared_l2_norm_op.py | 2 +- python/paddle/v2/fluid/tests/test_sum_op.py | 2 +- python/paddle/v2/fluid/tests/test_switch.py | 2 +- .../paddle/v2/fluid/tests/test_target_assign_op.py | 2 +- python/paddle/v2/fluid/tests/test_tensor.py | 2 +- python/paddle/v2/fluid/tests/test_top_k_op.py | 2 +- python/paddle/v2/fluid/tests/test_transpose_op.py | 2 +- .../v2/fluid/tests/test_uniform_random_op.py | 2 +- python/paddle/v2/fluid/tests/test_unpool_op.py | 2 +- python/paddle/v2/fluid/tests/test_variable.py | 2 +- python/paddle/v2/fluid/tests/test_warpctc_op.py | 2 +- .../v2/fluid/tests/test_weight_normalization.py | 2 +- python/paddle/v2/fluid/tests/test_while_op.py | 2 +- python/paddle/v2/image.py | 2 +- python/paddle/v2/inference.py | 2 +- python/paddle/v2/master/__init__.py | 2 +- python/paddle/v2/master/client.py | 2 +- python/paddle/v2/reader/tests/__init__.py | 2 +- python/paddle/v2/reader/tests/creator_test.py | 4 ++-- python/paddle/v2/reader/tests/decorator_test.py | 2 +- python/paddle/v2/tests/test_image.py | 2 +- python/paddle/v2/tests/test_layer.py | 2 +- python/paddle/v2/tests/test_op.py | 2 +- python/paddle/v2/tests/test_paramconf_order.py | 4 ++-- python/paddle/v2/tests/test_parameters.py | 2 +- python/paddle/v2/tests/test_rnn_layer.py | 2 +- python/paddle/v2/tests/test_topology.py | 2 +- python/paddle/v2/trainer.py | 2 +- tools/manylinux1/build_scripts/manylinux1-check.py | 2 +- .../manylinux1/build_scripts/python-tag-abi-tag.py | 2 +- tools/manylinux1/build_scripts/ssl-check.py | 2 +- 1695 files changed, 1724 insertions(+), 1700 deletions(-) mode change 120000 => 100755 paddle/capi/examples/model_inference/multi_thread/trainer_config.py mode change 120000 => 100755 paddle/capi/examples/model_inference/sparse_binary/trainer_config.py diff --git a/CMakeLists.txt b/CMakeLists.txt index 3a21574b85..ae04f9ff3f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/LICENSE b/LICENSE index e77bd090ee..5fe86943b3 100644 --- a/LICENSE +++ b/LICENSE @@ -188,7 +188,7 @@ Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/benchmark/cluster/vgg16/vgg16_fluid.py b/benchmark/cluster/vgg16/vgg16_fluid.py index 499e06ec42..99395699f2 100644 --- a/benchmark/cluster/vgg16/vgg16_fluid.py +++ b/benchmark/cluster/vgg16/vgg16_fluid.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmark/cluster/vgg16/vgg16_v2.py b/benchmark/cluster/vgg16/vgg16_v2.py index 6ac6b3c332..1a66af32d7 100644 --- a/benchmark/cluster/vgg16/vgg16_v2.py +++ b/benchmark/cluster/vgg16/vgg16_v2.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. diff --git a/benchmark/paddle/image/alexnet.py b/benchmark/paddle/image/alexnet.py index 7029608187..9efc3f0494 100644 --- a/benchmark/paddle/image/alexnet.py +++ b/benchmark/paddle/image/alexnet.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmark/paddle/image/provider.py b/benchmark/paddle/image/provider.py index 21e0d381aa..6ad817ccef 100644 --- a/benchmark/paddle/image/provider.py +++ b/benchmark/paddle/image/provider.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmark/paddle/rnn/imdb.py b/benchmark/paddle/rnn/imdb.py index c3b5faa19a..2a67f9b0cf 100755 --- a/benchmark/paddle/rnn/imdb.py +++ b/benchmark/paddle/rnn/imdb.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmark/paddle/rnn/provider.py b/benchmark/paddle/rnn/provider.py index f35cd5b079..23cc0c44a9 100644 --- a/benchmark/paddle/rnn/provider.py +++ b/benchmark/paddle/rnn/provider.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmark/tensorflow/image/alexnet.py b/benchmark/tensorflow/image/alexnet.py index a37d7e7c62..95728b7a85 100644 --- a/benchmark/tensorflow/image/alexnet.py +++ b/benchmark/tensorflow/image/alexnet.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmark/tensorflow/image/alexnet_multi_gpu.py b/benchmark/tensorflow/image/alexnet_multi_gpu.py index 2ebab8fb60..51dfe3f1cb 100644 --- a/benchmark/tensorflow/image/alexnet_multi_gpu.py +++ b/benchmark/tensorflow/image/alexnet_multi_gpu.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmark/tensorflow/image/googlenet.py b/benchmark/tensorflow/image/googlenet.py index 1202cbb171..37b2ba6911 100644 --- a/benchmark/tensorflow/image/googlenet.py +++ b/benchmark/tensorflow/image/googlenet.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmark/tensorflow/image/googlenet_multi_gpu.py b/benchmark/tensorflow/image/googlenet_multi_gpu.py index f06437eb6c..7179c5301c 100644 --- a/benchmark/tensorflow/image/googlenet_multi_gpu.py +++ b/benchmark/tensorflow/image/googlenet_multi_gpu.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmark/tensorflow/image/smallnet_mnist_cifar.py b/benchmark/tensorflow/image/smallnet_mnist_cifar.py index 558c68575f..2ca1623b6b 100644 --- a/benchmark/tensorflow/image/smallnet_mnist_cifar.py +++ b/benchmark/tensorflow/image/smallnet_mnist_cifar.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmark/tensorflow/rnn/reader.py b/benchmark/tensorflow/rnn/reader.py index 9660d3c22b..ac08c10a42 100755 --- a/benchmark/tensorflow/rnn/reader.py +++ b/benchmark/tensorflow/rnn/reader.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cmake/configure.cmake b/cmake/configure.cmake index 5c6bcfde76..ae3295fe41 100644 --- a/cmake/configure.cmake +++ b/cmake/configure.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cmake/cross_compiling/android.cmake b/cmake/cross_compiling/android.cmake index 84219cfa55..4cf2be3bdf 100644 --- a/cmake/cross_compiling/android.cmake +++ b/cmake/cross_compiling/android.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cmake/cross_compiling/host.cmake b/cmake/cross_compiling/host.cmake index 14c35266ec..f9c6b12136 100644 --- a/cmake/cross_compiling/host.cmake +++ b/cmake/cross_compiling/host.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cmake/cross_compiling/ios.cmake b/cmake/cross_compiling/ios.cmake index d3f5bf6852..10d389ec8e 100644 --- a/cmake/cross_compiling/ios.cmake +++ b/cmake/cross_compiling/ios.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cmake/cross_compiling/raspberry_pi.cmake b/cmake/cross_compiling/raspberry_pi.cmake index 817b39f683..0425b2ae15 100644 --- a/cmake/cross_compiling/raspberry_pi.cmake +++ b/cmake/cross_compiling/raspberry_pi.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cmake/external/boost.cmake b/cmake/external/boost.cmake index dbc676bdac..9e135b2c0e 100644 --- a/cmake/external/boost.cmake +++ b/cmake/external/boost.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cmake/external/cares.cmake b/cmake/external/cares.cmake index aec51410b3..a743b572a6 100644 --- a/cmake/external/cares.cmake +++ b/cmake/external/cares.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cmake/external/gflags.cmake b/cmake/external/gflags.cmake index d4f252bb9f..a1d2d0f446 100644 --- a/cmake/external/gflags.cmake +++ b/cmake/external/gflags.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cmake/external/glog.cmake b/cmake/external/glog.cmake index 0c6b3aafcb..ac0181e69c 100644 --- a/cmake/external/glog.cmake +++ b/cmake/external/glog.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cmake/external/grpc.cmake b/cmake/external/grpc.cmake index 79b2449fe6..0853b98181 100644 --- a/cmake/external/grpc.cmake +++ b/cmake/external/grpc.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cmake/external/gtest.cmake b/cmake/external/gtest.cmake index 5a4aa7a5b7..d335298742 100644 --- a/cmake/external/gtest.cmake +++ b/cmake/external/gtest.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cmake/external/mkldnn.cmake b/cmake/external/mkldnn.cmake index 89fc34796a..aef5311a12 100644 --- a/cmake/external/mkldnn.cmake +++ b/cmake/external/mkldnn.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cmake/external/mklml.cmake b/cmake/external/mklml.cmake index 15a07ea3da..739a910c7c 100644 --- a/cmake/external/mklml.cmake +++ b/cmake/external/mklml.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cmake/external/nccl.cmake b/cmake/external/nccl.cmake index fc43766efa..af5c689c35 100644 --- a/cmake/external/nccl.cmake +++ b/cmake/external/nccl.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cmake/external/openblas.cmake b/cmake/external/openblas.cmake index 4012a164be..e2b7ef8d54 100644 --- a/cmake/external/openblas.cmake +++ b/cmake/external/openblas.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake index ff5855052d..0fde4373a4 100644 --- a/cmake/external/protobuf.cmake +++ b/cmake/external/protobuf.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cmake/external/pybind11.cmake b/cmake/external/pybind11.cmake index 4e87dc49d8..c885877a2b 100644 --- a/cmake/external/pybind11.cmake +++ b/cmake/external/pybind11.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cmake/external/python.cmake b/cmake/external/python.cmake index 46c68cce32..d7e5571bdb 100644 --- a/cmake/external/python.cmake +++ b/cmake/external/python.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cmake/external/swig.cmake b/cmake/external/swig.cmake index 9db457c7b2..de07703695 100644 --- a/cmake/external/swig.cmake +++ b/cmake/external/swig.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cmake/external/warpctc.cmake b/cmake/external/warpctc.cmake index 5fa60df7b3..9a9a20f897 100644 --- a/cmake/external/warpctc.cmake +++ b/cmake/external/warpctc.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cmake/external/zlib.cmake b/cmake/external/zlib.cmake index 1638cd8fdf..e568880632 100644 --- a/cmake/external/zlib.cmake +++ b/cmake/external/zlib.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cmake/generic.cmake b/cmake/generic.cmake index 1cb54ba216..12e07bd5f8 100644 --- a/cmake/generic.cmake +++ b/cmake/generic.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cmake/make_resource.py b/cmake/make_resource.py index 4f9f5546b9..09a2ca877d 100644 --- a/cmake/make_resource.py +++ b/cmake/make_resource.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/cmake/system.cmake b/cmake/system.cmake index 396bd1a079..c91ef91127 100644 --- a/cmake/system.cmake +++ b/cmake/system.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/doc/faq/local/src/reduce_min_pool_size.py b/doc/faq/local/src/reduce_min_pool_size.py index 9efdb5707a..cba96652f7 100644 --- a/doc/faq/local/src/reduce_min_pool_size.py +++ b/doc/faq/local/src/reduce_min_pool_size.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/doc/faq/local/src/word2vec_config.py b/doc/faq/local/src/word2vec_config.py index b4fcf0960e..a5b84e8ed4 100644 --- a/doc/faq/local/src/word2vec_config.py +++ b/doc/faq/local/src/word2vec_config.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/doc/faq/local/src/word2vec_dataprovider.py b/doc/faq/local/src/word2vec_dataprovider.py index 3b6273b057..9fe67b6d6c 100644 --- a/doc/faq/local/src/word2vec_dataprovider.py +++ b/doc/faq/local/src/word2vec_dataprovider.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/doc/getstarted/concepts/src/infer.py b/doc/getstarted/concepts/src/infer.py index a1b60388c4..afe256f234 100644 --- a/doc/getstarted/concepts/src/infer.py +++ b/doc/getstarted/concepts/src/infer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/doc/getstarted/concepts/src/train.py b/doc/getstarted/concepts/src/train.py index 0e5bdb57bc..a85d5d8a3a 100644 --- a/doc/getstarted/concepts/src/train.py +++ b/doc/getstarted/concepts/src/train.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/doc/howto/cluster/src/word2vec/api_train_v2.py b/doc/howto/cluster/src/word2vec/api_train_v2.py index 9a65f14628..9107e24c17 100644 --- a/doc/howto/cluster/src/word2vec/api_train_v2.py +++ b/doc/howto/cluster/src/word2vec/api_train_v2.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/doc/howto/cluster/src/word2vec/api_train_v2_cluster.py b/doc/howto/cluster/src/word2vec/api_train_v2_cluster.py index 2afce9a66e..791504094f 100644 --- a/doc/howto/cluster/src/word2vec/api_train_v2_cluster.py +++ b/doc/howto/cluster/src/word2vec/api_train_v2_cluster.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/doc/howto/cluster/src/word2vec/prepare.py b/doc/howto/cluster/src/word2vec/prepare.py index ade01c378e..a42548fbf0 100644 --- a/doc/howto/cluster/src/word2vec/prepare.py +++ b/doc/howto/cluster/src/word2vec/prepare.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/go/CMakeLists.txt b/go/CMakeLists.txt index 29ce909c64..f3a9296c2c 100644 --- a/go/CMakeLists.txt +++ b/go/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/go/cmd/master/CMakeLists.txt b/go/cmd/master/CMakeLists.txt index 9e149967e7..fc99d8d3bd 100644 --- a/go/cmd/master/CMakeLists.txt +++ b/go/cmd/master/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/go/cmd/master/master.go b/go/cmd/master/master.go index f57db1c0a0..537df59c86 100644 --- a/go/cmd/master/master.go +++ b/go/cmd/master/master.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/go/cmd/pserver/CMakeLists.txt b/go/cmd/pserver/CMakeLists.txt index 51db6dff04..20d033c938 100644 --- a/go/cmd/pserver/CMakeLists.txt +++ b/go/cmd/pserver/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/go/cmd/pserver/pserver.go b/go/cmd/pserver/pserver.go index 1358801c1c..271274cafc 100644 --- a/go/cmd/pserver/pserver.go +++ b/go/cmd/pserver/pserver.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/go/connection/conn.go b/go/connection/conn.go index ffa8db689d..b8353e8e18 100644 --- a/go/connection/conn.go +++ b/go/connection/conn.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/go/master/CMakeLists.txt b/go/master/CMakeLists.txt index 93efa4eaf7..b5101c3479 100644 --- a/go/master/CMakeLists.txt +++ b/go/master/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/go/master/c/CMakeLists.txt b/go/master/c/CMakeLists.txt index 082d9f3f59..58b44e6445 100644 --- a/go/master/c/CMakeLists.txt +++ b/go/master/c/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/go/master/c/client.go b/go/master/c/client.go index 9a3960d59c..42c176d00b 100644 --- a/go/master/c/client.go +++ b/go/master/c/client.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/go/master/client.go b/go/master/client.go index 7bcf869553..e43903dd14 100644 --- a/go/master/client.go +++ b/go/master/client.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/go/master/client_internal_test.go b/go/master/client_internal_test.go index 2f13fd0dcd..37028a9e1f 100644 --- a/go/master/client_internal_test.go +++ b/go/master/client_internal_test.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/go/master/client_test.go b/go/master/client_test.go index 1963dbfd73..01ecad2dea 100644 --- a/go/master/client_test.go +++ b/go/master/client_test.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/go/master/etcd_client.go b/go/master/etcd_client.go index 2a41d36949..36fe611274 100644 --- a/go/master/etcd_client.go +++ b/go/master/etcd_client.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/go/master/inmem_store.go b/go/master/inmem_store.go index a5bd2d4fe1..33b4714317 100644 --- a/go/master/inmem_store.go +++ b/go/master/inmem_store.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/go/master/service.go b/go/master/service.go index f350102880..39f746e528 100644 --- a/go/master/service.go +++ b/go/master/service.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/go/master/service_internal_test.go b/go/master/service_internal_test.go index bd1a939a55..dd22f3d548 100644 --- a/go/master/service_internal_test.go +++ b/go/master/service_internal_test.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/go/pserver/CMakeLists.txt b/go/pserver/CMakeLists.txt index 9ac05199e7..32f3b2baba 100644 --- a/go/pserver/CMakeLists.txt +++ b/go/pserver/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/go/pserver/client/CMakeLists.txt b/go/pserver/client/CMakeLists.txt index e295611060..1d6f45a664 100644 --- a/go/pserver/client/CMakeLists.txt +++ b/go/pserver/client/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/go/pserver/client/c/CMakeLists.txt b/go/pserver/client/c/CMakeLists.txt index a932791c7c..78776219de 100644 --- a/go/pserver/client/c/CMakeLists.txt +++ b/go/pserver/client/c/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/go/pserver/client/c/cclient.go b/go/pserver/client/c/cclient.go index 2eeec1b6b3..cddc28e46f 100644 --- a/go/pserver/client/c/cclient.go +++ b/go/pserver/client/c/cclient.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/go/pserver/client/c/test/CMakeLists.txt b/go/pserver/client/c/test/CMakeLists.txt index 3724ccb60b..411dc50332 100644 --- a/go/pserver/client/c/test/CMakeLists.txt +++ b/go/pserver/client/c/test/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/go/pserver/client/c/test/test_cclient.c b/go/pserver/client/c/test/test_cclient.c index 05ec421fff..0116e42a0a 100644 --- a/go/pserver/client/c/test/test_cclient.c +++ b/go/pserver/client/c/test/test_cclient.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go/pserver/client/c/test/test_mnist.py b/go/pserver/client/c/test/test_mnist.py index 821d9adfcb..97f63aeb6d 100644 --- a/go/pserver/client/c/test/test_mnist.py +++ b/go/pserver/client/c/test/test_mnist.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/go/pserver/client/c/test/test_train.py b/go/pserver/client/c/test/test_train.py index 445a8d3aa4..2db5a0bf6a 100644 --- a/go/pserver/client/c/test/test_train.py +++ b/go/pserver/client/c/test/test_train.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/go/pserver/client/client.go b/go/pserver/client/client.go index 18fce34b37..2a8f66a07c 100644 --- a/go/pserver/client/client.go +++ b/go/pserver/client/client.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/go/pserver/client/client_test.go b/go/pserver/client/client_test.go index ec832305ee..3a067ff518 100644 --- a/go/pserver/client/client_test.go +++ b/go/pserver/client/client_test.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/go/pserver/client/etcd_client.go b/go/pserver/client/etcd_client.go index 16d0c3b943..3fb835a6e1 100644 --- a/go/pserver/client/etcd_client.go +++ b/go/pserver/client/etcd_client.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/go/pserver/etcd_client.go b/go/pserver/etcd_client.go index 08ddb247f2..719013b1bb 100644 --- a/go/pserver/etcd_client.go +++ b/go/pserver/etcd_client.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/go/pserver/optimizer.go b/go/pserver/optimizer.go index 6d28cad25a..f17577997b 100644 --- a/go/pserver/optimizer.go +++ b/go/pserver/optimizer.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/go/pserver/optimizer_test.go b/go/pserver/optimizer_test.go index 565f56dc28..3b923879d5 100644 --- a/go/pserver/optimizer_test.go +++ b/go/pserver/optimizer_test.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/go/pserver/service.go b/go/pserver/service.go index 7484ec90b1..d6ead774af 100644 --- a/go/pserver/service.go +++ b/go/pserver/service.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/go/pserver/service_test.go b/go/pserver/service_test.go index 58a743e1fa..6949348e93 100644 --- a/go/pserver/service_test.go +++ b/go/pserver/service_test.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/go/utils/networkhelper/CMakeLists.txt b/go/utils/networkhelper/CMakeLists.txt index 9233264ff3..3100f2b5a5 100644 --- a/go/utils/networkhelper/CMakeLists.txt +++ b/go/utils/networkhelper/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/go/utils/networkhelper/helper.go b/go/utils/networkhelper/helper.go index c3fc747bda..d205b6c502 100644 --- a/go/utils/networkhelper/helper.go +++ b/go/utils/networkhelper/helper.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/go/utils/networkhelper/helper_test.go b/go/utils/networkhelper/helper_test.go index 0bc02ad42a..60b520fae1 100644 --- a/go/utils/networkhelper/helper_test.go +++ b/go/utils/networkhelper/helper_test.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/api/Arguments.cpp b/paddle/api/Arguments.cpp index c6f9106912..62d6a574d5 100644 --- a/paddle/api/Arguments.cpp +++ b/paddle/api/Arguments.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/api/ConfigParser.cpp b/paddle/api/ConfigParser.cpp index b6ff6ec789..d362a1e7cf 100644 --- a/paddle/api/ConfigParser.cpp +++ b/paddle/api/ConfigParser.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/api/Evaluator.cpp b/paddle/api/Evaluator.cpp index fcda6eaf03..c4aac47cbe 100644 --- a/paddle/api/Evaluator.cpp +++ b/paddle/api/Evaluator.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/api/GradientMachine.cpp b/paddle/api/GradientMachine.cpp index dcb5fe086f..a3d6f0f080 100644 --- a/paddle/api/GradientMachine.cpp +++ b/paddle/api/GradientMachine.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/api/Internal.h b/paddle/api/Internal.h index d48dd3a04c..2195cc6739 100644 --- a/paddle/api/Internal.h +++ b/paddle/api/Internal.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/api/Matrix.cpp b/paddle/api/Matrix.cpp index 7c375e5cfb..8282b4629d 100644 --- a/paddle/api/Matrix.cpp +++ b/paddle/api/Matrix.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index 0b9b83d429..67368d1a99 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/api/PaddleAPIPrivate.h b/paddle/api/PaddleAPIPrivate.h index f41352bfec..e141fcd761 100644 --- a/paddle/api/PaddleAPIPrivate.h +++ b/paddle/api/PaddleAPIPrivate.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/api/Parameter.cpp b/paddle/api/Parameter.cpp index 19f7a898d6..589d22e74e 100644 --- a/paddle/api/Parameter.cpp +++ b/paddle/api/Parameter.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/api/ParameterOptimizer.cpp b/paddle/api/ParameterOptimizer.cpp index 120eea3f70..d4620be3e6 100644 --- a/paddle/api/ParameterOptimizer.cpp +++ b/paddle/api/ParameterOptimizer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/api/ParameterUpdater.cpp b/paddle/api/ParameterUpdater.cpp index 8cd73b348c..63c000c959 100644 --- a/paddle/api/ParameterUpdater.cpp +++ b/paddle/api/ParameterUpdater.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/api/SequenceGenerator.cpp b/paddle/api/SequenceGenerator.cpp index 8428edc60d..1b30aec8f6 100644 --- a/paddle/api/SequenceGenerator.cpp +++ b/paddle/api/SequenceGenerator.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/api/Trainer.cpp b/paddle/api/Trainer.cpp index 84e4ca054a..795460b650 100644 --- a/paddle/api/Trainer.cpp +++ b/paddle/api/Trainer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/api/Util.cpp b/paddle/api/Util.cpp index 11bd05c09d..618e87e964 100644 --- a/paddle/api/Util.cpp +++ b/paddle/api/Util.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/api/Vector.cpp b/paddle/api/Vector.cpp index 500bc448c9..e2a7b974ca 100644 --- a/paddle/api/Vector.cpp +++ b/paddle/api/Vector.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/api/test/testTrainConfig.py b/paddle/api/test/testTrainConfig.py index 1a1283e116..c02d61ebad 100644 --- a/paddle/api/test/testTrainConfig.py +++ b/paddle/api/test/testTrainConfig.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/paddle/capi/Arguments.cpp b/paddle/capi/Arguments.cpp index 1ec403077e..87fac3d6c6 100644 --- a/paddle/capi/Arguments.cpp +++ b/paddle/capi/Arguments.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/capi/Main.cpp b/paddle/capi/Main.cpp index c038789340..0a289dede6 100644 --- a/paddle/capi/Main.cpp +++ b/paddle/capi/Main.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/capi/Matrix.cpp b/paddle/capi/Matrix.cpp index cbacd1fb71..24b0020636 100644 --- a/paddle/capi/Matrix.cpp +++ b/paddle/capi/Matrix.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/capi/Vector.cpp b/paddle/capi/Vector.cpp index 564708e963..afb5a9afef 100644 --- a/paddle/capi/Vector.cpp +++ b/paddle/capi/Vector.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/capi/arguments.h b/paddle/capi/arguments.h index 7c32524a00..69a66bb012 100644 --- a/paddle/capi/arguments.h +++ b/paddle/capi/arguments.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/capi/capi.h b/paddle/capi/capi.h index 4097a1a35a..749fcc4b79 100644 --- a/paddle/capi/capi.h +++ b/paddle/capi/capi.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/capi/capi_private.h b/paddle/capi/capi_private.h index c7cdbd5f6f..3332f42a4a 100644 --- a/paddle/capi/capi_private.h +++ b/paddle/capi/capi_private.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/capi/error.cpp b/paddle/capi/error.cpp index 96ce31b45f..0c25de5ba9 100644 --- a/paddle/capi/error.cpp +++ b/paddle/capi/error.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/capi/error.h b/paddle/capi/error.h index 2da9e0a3ef..b0940725b5 100644 --- a/paddle/capi/error.h +++ b/paddle/capi/error.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/capi/examples/model_inference/common/common.h b/paddle/capi/examples/model_inference/common/common.h index 9efcbc387e..23248b0caf 100644 --- a/paddle/capi/examples/model_inference/common/common.h +++ b/paddle/capi/examples/model_inference/common/common.h @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/capi/examples/model_inference/dense/main.c b/paddle/capi/examples/model_inference/dense/main.c index f795bfe11d..90444889a7 100644 --- a/paddle/capi/examples/model_inference/dense/main.c +++ b/paddle/capi/examples/model_inference/dense/main.c @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/capi/examples/model_inference/dense/merge_v2_model.py b/paddle/capi/examples/model_inference/dense/merge_v2_model.py index 7aeb482903..673aba2036 100644 --- a/paddle/capi/examples/model_inference/dense/merge_v2_model.py +++ b/paddle/capi/examples/model_inference/dense/merge_v2_model.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/paddle/capi/examples/model_inference/dense/mnist_v2.py b/paddle/capi/examples/model_inference/dense/mnist_v2.py index 183eecfdf2..3fd15d658a 100644 --- a/paddle/capi/examples/model_inference/dense/mnist_v2.py +++ b/paddle/capi/examples/model_inference/dense/mnist_v2.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/paddle/capi/examples/model_inference/dense/trainer_config.py b/paddle/capi/examples/model_inference/dense/trainer_config.py index b94a21a7e4..eca2dce114 100644 --- a/paddle/capi/examples/model_inference/dense/trainer_config.py +++ b/paddle/capi/examples/model_inference/dense/trainer_config.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/paddle/capi/examples/model_inference/multi_thread/main.c b/paddle/capi/examples/model_inference/multi_thread/main.c index eecb9138e7..0a99e6b9c8 100644 --- a/paddle/capi/examples/model_inference/multi_thread/main.c +++ b/paddle/capi/examples/model_inference/multi_thread/main.c @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/capi/examples/model_inference/multi_thread/main_gpu.c b/paddle/capi/examples/model_inference/multi_thread/main_gpu.c index 85bb456584..60f0c59e77 100644 --- a/paddle/capi/examples/model_inference/multi_thread/main_gpu.c +++ b/paddle/capi/examples/model_inference/multi_thread/main_gpu.c @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/capi/examples/model_inference/multi_thread/trainer_config.py b/paddle/capi/examples/model_inference/multi_thread/trainer_config.py deleted file mode 120000 index 70cfb1f7f4..0000000000 --- a/paddle/capi/examples/model_inference/multi_thread/trainer_config.py +++ /dev/null @@ -1 +0,0 @@ -../dense/trainer_config.py \ No newline at end of file diff --git a/paddle/capi/examples/model_inference/multi_thread/trainer_config.py b/paddle/capi/examples/model_inference/multi_thread/trainer_config.py new file mode 100755 index 0000000000..fa6a12319a --- /dev/null +++ b/paddle/capi/examples/model_inference/multi_thread/trainer_config.py @@ -0,0 +1,13 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reservedd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/paddle/capi/examples/model_inference/sequence/main.c b/paddle/capi/examples/model_inference/sequence/main.c index 80937c830d..25a38d32f0 100644 --- a/paddle/capi/examples/model_inference/sequence/main.c +++ b/paddle/capi/examples/model_inference/sequence/main.c @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/capi/examples/model_inference/sequence/trainer_config.py b/paddle/capi/examples/model_inference/sequence/trainer_config.py index 889f8acdfd..62ae97e262 100644 --- a/paddle/capi/examples/model_inference/sequence/trainer_config.py +++ b/paddle/capi/examples/model_inference/sequence/trainer_config.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/paddle/capi/examples/model_inference/sparse_binary/main.c b/paddle/capi/examples/model_inference/sparse_binary/main.c index efec010a91..8df1b60088 100644 --- a/paddle/capi/examples/model_inference/sparse_binary/main.c +++ b/paddle/capi/examples/model_inference/sparse_binary/main.c @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/capi/examples/model_inference/sparse_binary/trainer_config.py b/paddle/capi/examples/model_inference/sparse_binary/trainer_config.py deleted file mode 120000 index 70cfb1f7f4..0000000000 --- a/paddle/capi/examples/model_inference/sparse_binary/trainer_config.py +++ /dev/null @@ -1 +0,0 @@ -../dense/trainer_config.py \ No newline at end of file diff --git a/paddle/capi/examples/model_inference/sparse_binary/trainer_config.py b/paddle/capi/examples/model_inference/sparse_binary/trainer_config.py new file mode 100755 index 0000000000..fa6a12319a --- /dev/null +++ b/paddle/capi/examples/model_inference/sparse_binary/trainer_config.py @@ -0,0 +1,13 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reservedd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/paddle/capi/gradient_machine.cpp b/paddle/capi/gradient_machine.cpp index 1f0e033c5b..ea9aab00e3 100644 --- a/paddle/capi/gradient_machine.cpp +++ b/paddle/capi/gradient_machine.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/capi/gradient_machine.h b/paddle/capi/gradient_machine.h index 7e37dea00b..f46498b375 100644 --- a/paddle/capi/gradient_machine.h +++ b/paddle/capi/gradient_machine.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/capi/main.h b/paddle/capi/main.h index 99c4e8428d..a0cb7bc296 100644 --- a/paddle/capi/main.h +++ b/paddle/capi/main.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/capi/matrix.h b/paddle/capi/matrix.h index 8cc3e0034e..f6747f7b1a 100644 --- a/paddle/capi/matrix.h +++ b/paddle/capi/matrix.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/capi/tests/test_Arguments.cpp b/paddle/capi/tests/test_Arguments.cpp index 4792ceb49a..bb08adf716 100644 --- a/paddle/capi/tests/test_Arguments.cpp +++ b/paddle/capi/tests/test_Arguments.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/capi/tests/test_GradientMachine.cpp b/paddle/capi/tests/test_GradientMachine.cpp index 89aa64608d..73b9e477b2 100644 --- a/paddle/capi/tests/test_GradientMachine.cpp +++ b/paddle/capi/tests/test_GradientMachine.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/capi/tests/test_Matrix.cpp b/paddle/capi/tests/test_Matrix.cpp index 6940c28448..5ba051ae17 100644 --- a/paddle/capi/tests/test_Matrix.cpp +++ b/paddle/capi/tests/test_Matrix.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/capi/tests/test_Vector.cpp b/paddle/capi/tests/test_Vector.cpp index 365160dc9a..fa7407e484 100644 --- a/paddle/capi/tests/test_Vector.cpp +++ b/paddle/capi/tests/test_Vector.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/capi/tests/test_predict_network.py b/paddle/capi/tests/test_predict_network.py index 6560417b2a..b8efb25704 100644 --- a/paddle/capi/tests/test_predict_network.py +++ b/paddle/capi/tests/test_predict_network.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/paddle/capi/vector.h b/paddle/capi/vector.h index a92aeff164..a79f7fdf78 100644 --- a/paddle/capi/vector.h +++ b/paddle/capi/vector.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_activation_functions.h b/paddle/cuda/include/hl_activation_functions.h index 93957fd964..29ec248420 100644 --- a/paddle/cuda/include/hl_activation_functions.h +++ b/paddle/cuda/include/hl_activation_functions.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_aggregate.h b/paddle/cuda/include/hl_aggregate.h index d2189de689..1ca26aa3bb 100644 --- a/paddle/cuda/include/hl_aggregate.h +++ b/paddle/cuda/include/hl_aggregate.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_avx_functions.h b/paddle/cuda/include/hl_avx_functions.h index 35f4eabb4c..9fb99a36ea 100644 --- a/paddle/cuda/include/hl_avx_functions.h +++ b/paddle/cuda/include/hl_avx_functions.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_base.h b/paddle/cuda/include/hl_base.h index 5b9884b786..6c4f09dacb 100644 --- a/paddle/cuda/include/hl_base.h +++ b/paddle/cuda/include/hl_base.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_batch_norm.h b/paddle/cuda/include/hl_batch_norm.h index afc5e0b2de..7814204d1b 100644 --- a/paddle/cuda/include/hl_batch_norm.h +++ b/paddle/cuda/include/hl_batch_norm.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_batch_transpose.h b/paddle/cuda/include/hl_batch_transpose.h index e2e958cd67..a16d3764fc 100644 --- a/paddle/cuda/include/hl_batch_transpose.h +++ b/paddle/cuda/include/hl_batch_transpose.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_cnn.h b/paddle/cuda/include/hl_cnn.h index 8841806292..63ec515647 100644 --- a/paddle/cuda/include/hl_cnn.h +++ b/paddle/cuda/include/hl_cnn.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_cpu_gru.cuh b/paddle/cuda/include/hl_cpu_gru.cuh index e4f6bf42c6..ce1643932d 100644 --- a/paddle/cuda/include/hl_cpu_gru.cuh +++ b/paddle/cuda/include/hl_cpu_gru.cuh @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_cpu_lstm.cuh b/paddle/cuda/include/hl_cpu_lstm.cuh index 0e412fcdf5..58a97d1230 100644 --- a/paddle/cuda/include/hl_cpu_lstm.cuh +++ b/paddle/cuda/include/hl_cpu_lstm.cuh @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_cpu_matrix_kernel.cuh b/paddle/cuda/include/hl_cpu_matrix_kernel.cuh index aaa2432551..4db9bb74e0 100644 --- a/paddle/cuda/include/hl_cpu_matrix_kernel.cuh +++ b/paddle/cuda/include/hl_cpu_matrix_kernel.cuh @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_cpu_matrix_kernel_detail.cuh b/paddle/cuda/include/hl_cpu_matrix_kernel_detail.cuh index 85ca836fdc..54a749b990 100644 --- a/paddle/cuda/include/hl_cpu_matrix_kernel_detail.cuh +++ b/paddle/cuda/include/hl_cpu_matrix_kernel_detail.cuh @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_cpu_scalar.cuh b/paddle/cuda/include/hl_cpu_scalar.cuh index 93043cd4bc..939302e971 100644 --- a/paddle/cuda/include/hl_cpu_scalar.cuh +++ b/paddle/cuda/include/hl_cpu_scalar.cuh @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_cpu_simd_neon.cuh b/paddle/cuda/include/hl_cpu_simd_neon.cuh index 0b1cf4abdc..e54e0f4646 100644 --- a/paddle/cuda/include/hl_cpu_simd_neon.cuh +++ b/paddle/cuda/include/hl_cpu_simd_neon.cuh @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_cpu_simd_sse.cuh b/paddle/cuda/include/hl_cpu_simd_sse.cuh index a104b62622..20c37d4dd3 100644 --- a/paddle/cuda/include/hl_cpu_simd_sse.cuh +++ b/paddle/cuda/include/hl_cpu_simd_sse.cuh @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_cuda.h b/paddle/cuda/include/hl_cuda.h index 5383c1130b..70efcccb81 100644 --- a/paddle/cuda/include/hl_cuda.h +++ b/paddle/cuda/include/hl_cuda.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_cuda.ph b/paddle/cuda/include/hl_cuda.ph index 701916b279..7c4465e51f 100644 --- a/paddle/cuda/include/hl_cuda.ph +++ b/paddle/cuda/include/hl_cuda.ph @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_cuda_cublas.h b/paddle/cuda/include/hl_cuda_cublas.h index e206e42b2a..3959f81677 100644 --- a/paddle/cuda/include/hl_cuda_cublas.h +++ b/paddle/cuda/include/hl_cuda_cublas.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_cuda_cudnn.h b/paddle/cuda/include/hl_cuda_cudnn.h index b44b071bd1..4664e4144a 100644 --- a/paddle/cuda/include/hl_cuda_cudnn.h +++ b/paddle/cuda/include/hl_cuda_cudnn.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_cuda_cudnn.ph b/paddle/cuda/include/hl_cuda_cudnn.ph index 61378937ce..bb3b89f6fa 100644 --- a/paddle/cuda/include/hl_cuda_cudnn.ph +++ b/paddle/cuda/include/hl_cuda_cudnn.ph @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_device_functions.cuh b/paddle/cuda/include/hl_device_functions.cuh index e0b5632f23..ef068e1062 100755 --- a/paddle/cuda/include/hl_device_functions.cuh +++ b/paddle/cuda/include/hl_device_functions.cuh @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_functions.h b/paddle/cuda/include/hl_functions.h index 0d7e80a855..9912b4c179 100644 --- a/paddle/cuda/include/hl_functions.h +++ b/paddle/cuda/include/hl_functions.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_gpu.h b/paddle/cuda/include/hl_gpu.h index 4ab8de80d1..50a2e9cdd2 100644 --- a/paddle/cuda/include/hl_gpu.h +++ b/paddle/cuda/include/hl_gpu.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_gpu_functions.cuh b/paddle/cuda/include/hl_gpu_functions.cuh index 8e64cbe360..705aa71f4b 100644 --- a/paddle/cuda/include/hl_gpu_functions.cuh +++ b/paddle/cuda/include/hl_gpu_functions.cuh @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_gpu_gru.cuh b/paddle/cuda/include/hl_gpu_gru.cuh index 6668e135d2..9fcad2c3bc 100644 --- a/paddle/cuda/include/hl_gpu_gru.cuh +++ b/paddle/cuda/include/hl_gpu_gru.cuh @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_gpu_lstm.cuh b/paddle/cuda/include/hl_gpu_lstm.cuh index 5dceba2f5b..92517a44d2 100644 --- a/paddle/cuda/include/hl_gpu_lstm.cuh +++ b/paddle/cuda/include/hl_gpu_lstm.cuh @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_gpu_matrix_kernel.cuh b/paddle/cuda/include/hl_gpu_matrix_kernel.cuh index 9bbdf5fa72..0db023ce37 100644 --- a/paddle/cuda/include/hl_gpu_matrix_kernel.cuh +++ b/paddle/cuda/include/hl_gpu_matrix_kernel.cuh @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_gru_ops.cuh b/paddle/cuda/include/hl_gru_ops.cuh index 45f66ad533..6c647c514d 100644 --- a/paddle/cuda/include/hl_gru_ops.cuh +++ b/paddle/cuda/include/hl_gru_ops.cuh @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_lstm.h b/paddle/cuda/include/hl_lstm.h index 857756e5cd..5db4783bf4 100644 --- a/paddle/cuda/include/hl_lstm.h +++ b/paddle/cuda/include/hl_lstm.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_lstm_ops.cuh b/paddle/cuda/include/hl_lstm_ops.cuh index 2601060cc2..394fdf5ac0 100644 --- a/paddle/cuda/include/hl_lstm_ops.cuh +++ b/paddle/cuda/include/hl_lstm_ops.cuh @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_matrix.h b/paddle/cuda/include/hl_matrix.h index 7daca18761..88d538343f 100644 --- a/paddle/cuda/include/hl_matrix.h +++ b/paddle/cuda/include/hl_matrix.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_matrix_apply.cuh b/paddle/cuda/include/hl_matrix_apply.cuh index b10d177b97..a067c8233b 100644 --- a/paddle/cuda/include/hl_matrix_apply.cuh +++ b/paddle/cuda/include/hl_matrix_apply.cuh @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_matrix_base.cuh b/paddle/cuda/include/hl_matrix_base.cuh index 53fdb47ec9..a309bb0011 100644 --- a/paddle/cuda/include/hl_matrix_base.cuh +++ b/paddle/cuda/include/hl_matrix_base.cuh @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_matrix_base_detail.cuh b/paddle/cuda/include/hl_matrix_base_detail.cuh index de1fd17d52..74211bcb92 100644 --- a/paddle/cuda/include/hl_matrix_base_detail.cuh +++ b/paddle/cuda/include/hl_matrix_base_detail.cuh @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_matrix_ops.cuh b/paddle/cuda/include/hl_matrix_ops.cuh index fc29201357..4e8bd91234 100644 --- a/paddle/cuda/include/hl_matrix_ops.cuh +++ b/paddle/cuda/include/hl_matrix_ops.cuh @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_matrix_type.cuh b/paddle/cuda/include/hl_matrix_type.cuh index e18235219b..e61c0d0a47 100644 --- a/paddle/cuda/include/hl_matrix_type.cuh +++ b/paddle/cuda/include/hl_matrix_type.cuh @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_perturbation_util.cuh b/paddle/cuda/include/hl_perturbation_util.cuh index 93b81bf035..e0a27778ca 100644 --- a/paddle/cuda/include/hl_perturbation_util.cuh +++ b/paddle/cuda/include/hl_perturbation_util.cuh @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_recurrent_apply.cuh b/paddle/cuda/include/hl_recurrent_apply.cuh index 113446cf75..b2cc231f58 100644 --- a/paddle/cuda/include/hl_recurrent_apply.cuh +++ b/paddle/cuda/include/hl_recurrent_apply.cuh @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_sequence.h b/paddle/cuda/include/hl_sequence.h index 973ddcceed..3923bdd921 100644 --- a/paddle/cuda/include/hl_sequence.h +++ b/paddle/cuda/include/hl_sequence.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_sparse.h b/paddle/cuda/include/hl_sparse.h index 67fe701c10..9aab52e045 100644 --- a/paddle/cuda/include/hl_sparse.h +++ b/paddle/cuda/include/hl_sparse.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_sparse.ph b/paddle/cuda/include/hl_sparse.ph index 13bba17811..c0fdccb942 100644 --- a/paddle/cuda/include/hl_sparse.ph +++ b/paddle/cuda/include/hl_sparse.ph @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_table_apply.h b/paddle/cuda/include/hl_table_apply.h index 2170b97f4d..dff60aa0a2 100644 --- a/paddle/cuda/include/hl_table_apply.h +++ b/paddle/cuda/include/hl_table_apply.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_tensor_ops.h b/paddle/cuda/include/hl_tensor_ops.h index b2bf334dab..85a022ff5e 100644 --- a/paddle/cuda/include/hl_tensor_ops.h +++ b/paddle/cuda/include/hl_tensor_ops.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_thread.ph b/paddle/cuda/include/hl_thread.ph index a3830ff8d8..4abede1517 100644 --- a/paddle/cuda/include/hl_thread.ph +++ b/paddle/cuda/include/hl_thread.ph @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_time.h b/paddle/cuda/include/hl_time.h index f63f025820..61d80c065c 100644 --- a/paddle/cuda/include/hl_time.h +++ b/paddle/cuda/include/hl_time.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_top_k.h b/paddle/cuda/include/hl_top_k.h index 79ae0d0e74..a3c7872f52 100644 --- a/paddle/cuda/include/hl_top_k.h +++ b/paddle/cuda/include/hl_top_k.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/hl_warpctc_wrap.h b/paddle/cuda/include/hl_warpctc_wrap.h index 7885ae5701..0857bd1aa1 100644 --- a/paddle/cuda/include/hl_warpctc_wrap.h +++ b/paddle/cuda/include/hl_warpctc_wrap.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/stub/hl_aggregate_stub.h b/paddle/cuda/include/stub/hl_aggregate_stub.h index bbfa9b8fad..2ac841facc 100644 --- a/paddle/cuda/include/stub/hl_aggregate_stub.h +++ b/paddle/cuda/include/stub/hl_aggregate_stub.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/stub/hl_cnn_stub.h b/paddle/cuda/include/stub/hl_cnn_stub.h index 706cc59a8e..c39bd3228d 100644 --- a/paddle/cuda/include/stub/hl_cnn_stub.h +++ b/paddle/cuda/include/stub/hl_cnn_stub.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/stub/hl_cuda_cublas_stub.h b/paddle/cuda/include/stub/hl_cuda_cublas_stub.h index e86fd853f4..0b2300cda9 100644 --- a/paddle/cuda/include/stub/hl_cuda_cublas_stub.h +++ b/paddle/cuda/include/stub/hl_cuda_cublas_stub.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/stub/hl_cuda_cudnn_stub.h b/paddle/cuda/include/stub/hl_cuda_cudnn_stub.h index 3afcc6fa85..4b8bdf7507 100644 --- a/paddle/cuda/include/stub/hl_cuda_cudnn_stub.h +++ b/paddle/cuda/include/stub/hl_cuda_cudnn_stub.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/stub/hl_cuda_stub.h b/paddle/cuda/include/stub/hl_cuda_stub.h index 5246a8d5a4..ac8b22ef31 100644 --- a/paddle/cuda/include/stub/hl_cuda_stub.h +++ b/paddle/cuda/include/stub/hl_cuda_stub.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/stub/hl_lstm_stub.h b/paddle/cuda/include/stub/hl_lstm_stub.h index 246ba79f63..be2b71787e 100644 --- a/paddle/cuda/include/stub/hl_lstm_stub.h +++ b/paddle/cuda/include/stub/hl_lstm_stub.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/stub/hl_matrix_stub.h b/paddle/cuda/include/stub/hl_matrix_stub.h index 46e77e1407..914a2edaf2 100644 --- a/paddle/cuda/include/stub/hl_matrix_stub.h +++ b/paddle/cuda/include/stub/hl_matrix_stub.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/stub/hl_sequence_stub.h b/paddle/cuda/include/stub/hl_sequence_stub.h index 920b417b1c..44bc3dbaff 100644 --- a/paddle/cuda/include/stub/hl_sequence_stub.h +++ b/paddle/cuda/include/stub/hl_sequence_stub.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/include/stub/hl_sparse_stub.h b/paddle/cuda/include/stub/hl_sparse_stub.h index bd17461d88..4001d4fb74 100644 --- a/paddle/cuda/include/stub/hl_sparse_stub.h +++ b/paddle/cuda/include/stub/hl_sparse_stub.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/src/avx_mathfun.h b/paddle/cuda/src/avx_mathfun.h index a0ba71faba..8e698e746a 100644 --- a/paddle/cuda/src/avx_mathfun.h +++ b/paddle/cuda/src/avx_mathfun.h @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/cuda/src/hl_avx_functions.cc b/paddle/cuda/src/hl_avx_functions.cc index 9066475876..6fb7c9dd06 100644 --- a/paddle/cuda/src/hl_avx_functions.cc +++ b/paddle/cuda/src/hl_avx_functions.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/src/hl_batch_norm.cu b/paddle/cuda/src/hl_batch_norm.cu index 5828ecb8e0..f9ffde0d53 100644 --- a/paddle/cuda/src/hl_batch_norm.cu +++ b/paddle/cuda/src/hl_batch_norm.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/src/hl_batch_transpose.cu b/paddle/cuda/src/hl_batch_transpose.cu index f4c253df7b..221839905d 100644 --- a/paddle/cuda/src/hl_batch_transpose.cu +++ b/paddle/cuda/src/hl_batch_transpose.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/src/hl_cpu_functions.cc b/paddle/cuda/src/hl_cpu_functions.cc index c2117a7315..1306576bcb 100644 --- a/paddle/cuda/src/hl_cpu_functions.cc +++ b/paddle/cuda/src/hl_cpu_functions.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/src/hl_cuda_aggregate.cu b/paddle/cuda/src/hl_cuda_aggregate.cu index 16a54ad343..d30c264127 100644 --- a/paddle/cuda/src/hl_cuda_aggregate.cu +++ b/paddle/cuda/src/hl_cuda_aggregate.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/src/hl_cuda_cnn.cu b/paddle/cuda/src/hl_cuda_cnn.cu index 2d1bc4f6d5..a4459243e8 100644 --- a/paddle/cuda/src/hl_cuda_cnn.cu +++ b/paddle/cuda/src/hl_cuda_cnn.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/src/hl_cuda_cublas.cc b/paddle/cuda/src/hl_cuda_cublas.cc index 6163209e9b..975df42878 100644 --- a/paddle/cuda/src/hl_cuda_cublas.cc +++ b/paddle/cuda/src/hl_cuda_cublas.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/src/hl_cuda_cudnn.cc b/paddle/cuda/src/hl_cuda_cudnn.cc index b8caf48f9c..dfa935dcff 100644 --- a/paddle/cuda/src/hl_cuda_cudnn.cc +++ b/paddle/cuda/src/hl_cuda_cudnn.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/src/hl_cuda_device.cc b/paddle/cuda/src/hl_cuda_device.cc index 4042d9742a..3025aa4852 100644 --- a/paddle/cuda/src/hl_cuda_device.cc +++ b/paddle/cuda/src/hl_cuda_device.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/src/hl_cuda_lstm.cu b/paddle/cuda/src/hl_cuda_lstm.cu index a5ce81a904..21c0c26b6e 100644 --- a/paddle/cuda/src/hl_cuda_lstm.cu +++ b/paddle/cuda/src/hl_cuda_lstm.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/src/hl_cuda_matrix.cu b/paddle/cuda/src/hl_cuda_matrix.cu index 607efb4f6b..3e17c8090c 100644 --- a/paddle/cuda/src/hl_cuda_matrix.cu +++ b/paddle/cuda/src/hl_cuda_matrix.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/src/hl_cuda_sequence.cu b/paddle/cuda/src/hl_cuda_sequence.cu index c52780dfca..a3a5f038de 100644 --- a/paddle/cuda/src/hl_cuda_sequence.cu +++ b/paddle/cuda/src/hl_cuda_sequence.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/src/hl_cuda_sparse.cu b/paddle/cuda/src/hl_cuda_sparse.cu index 6351e7e01e..432041fed5 100644 --- a/paddle/cuda/src/hl_cuda_sparse.cu +++ b/paddle/cuda/src/hl_cuda_sparse.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/src/hl_cuda_sparse.cuh b/paddle/cuda/src/hl_cuda_sparse.cuh index 72572756a6..adb898c9ac 100644 --- a/paddle/cuda/src/hl_cuda_sparse.cuh +++ b/paddle/cuda/src/hl_cuda_sparse.cuh @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/src/hl_math.cc b/paddle/cuda/src/hl_math.cc index 3048693fb8..585b356d0a 100644 --- a/paddle/cuda/src/hl_math.cc +++ b/paddle/cuda/src/hl_math.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/src/hl_perturbation_util.cu b/paddle/cuda/src/hl_perturbation_util.cu index d01a91561e..e15cbb1439 100644 --- a/paddle/cuda/src/hl_perturbation_util.cu +++ b/paddle/cuda/src/hl_perturbation_util.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/src/hl_table_apply.cu b/paddle/cuda/src/hl_table_apply.cu index d3b71c75e6..efa4bef02b 100644 --- a/paddle/cuda/src/hl_table_apply.cu +++ b/paddle/cuda/src/hl_table_apply.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/src/hl_time.cc b/paddle/cuda/src/hl_time.cc index 7e5d7e8aae..26af9ec806 100644 --- a/paddle/cuda/src/hl_time.cc +++ b/paddle/cuda/src/hl_time.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/src/hl_top_k.cu b/paddle/cuda/src/hl_top_k.cu index 1896a56634..fea8712a77 100644 --- a/paddle/cuda/src/hl_top_k.cu +++ b/paddle/cuda/src/hl_top_k.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/cuda/src/hl_warpctc_wrap.cc b/paddle/cuda/src/hl_warpctc_wrap.cc index 9f812dd0de..5111bceaff 100644 --- a/paddle/cuda/src/hl_warpctc_wrap.cc +++ b/paddle/cuda/src/hl_warpctc_wrap.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/attribute.cc b/paddle/fluid/framework/attribute.cc index 1d7e7366b0..0dcecb62db 100644 --- a/paddle/fluid/framework/attribute.cc +++ b/paddle/fluid/framework/attribute.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/attribute.h b/paddle/fluid/framework/attribute.h index 16be42ae71..8428bf8e33 100644 --- a/paddle/fluid/framework/attribute.h +++ b/paddle/fluid/framework/attribute.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/backward.cc b/paddle/fluid/framework/backward.cc index c4795f4fc5..68f4fd4424 100644 --- a/paddle/fluid/framework/backward.cc +++ b/paddle/fluid/framework/backward.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/backward.h b/paddle/fluid/framework/backward.h index 2ea6922426..3a971090c2 100644 --- a/paddle/fluid/framework/backward.h +++ b/paddle/fluid/framework/backward.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/backward_test.cc b/paddle/fluid/framework/backward_test.cc index f9604c6891..cc1f871360 100644 --- a/paddle/fluid/framework/backward_test.cc +++ b/paddle/fluid/framework/backward_test.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/block_desc.cc b/paddle/fluid/framework/block_desc.cc index 9550159155..0dd37e7df0 100644 --- a/paddle/fluid/framework/block_desc.cc +++ b/paddle/fluid/framework/block_desc.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/block_desc.h b/paddle/fluid/framework/block_desc.h index 5f7eca3878..4e2b03e245 100644 --- a/paddle/fluid/framework/block_desc.h +++ b/paddle/fluid/framework/block_desc.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/data_device_transform.cc b/paddle/fluid/framework/data_device_transform.cc index 3c6dd28455..728a2fb6f3 100644 --- a/paddle/fluid/framework/data_device_transform.cc +++ b/paddle/fluid/framework/data_device_transform.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/paddle/fluid/framework/data_device_transform.h b/paddle/fluid/framework/data_device_transform.h index 0c4559f586..8ff97646cf 100644 --- a/paddle/fluid/framework/data_device_transform.h +++ b/paddle/fluid/framework/data_device_transform.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/paddle/fluid/framework/data_device_transform_test.cu b/paddle/fluid/framework/data_device_transform_test.cu index f740f9b326..c9ba071175 100644 --- a/paddle/fluid/framework/data_device_transform_test.cu +++ b/paddle/fluid/framework/data_device_transform_test.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/data_layout.h b/paddle/fluid/framework/data_layout.h index b72f13f2e8..39222fc4ed 100644 --- a/paddle/fluid/framework/data_layout.h +++ b/paddle/fluid/framework/data_layout.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/data_layout_transform.cc b/paddle/fluid/framework/data_layout_transform.cc index c546a508fe..4ca447d50a 100644 --- a/paddle/fluid/framework/data_layout_transform.cc +++ b/paddle/fluid/framework/data_layout_transform.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/data_layout_transform.h b/paddle/fluid/framework/data_layout_transform.h index 862405fbf4..ba15be9fc7 100644 --- a/paddle/fluid/framework/data_layout_transform.h +++ b/paddle/fluid/framework/data_layout_transform.h @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/data_layout_transform_test.cc b/paddle/fluid/framework/data_layout_transform_test.cc index 99eb46bde3..73689cc9bc 100644 --- a/paddle/fluid/framework/data_layout_transform_test.cc +++ b/paddle/fluid/framework/data_layout_transform_test.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -41,4 +41,4 @@ TEST(DataTransform, DataLayoutFunction) { EXPECT_TRUE(in.layout() == DataLayout::kNHWC); EXPECT_TRUE(in.dims() == make_ddim({2, 3, 1, 2})); -} \ No newline at end of file +} diff --git a/paddle/fluid/framework/data_transform.cc b/paddle/fluid/framework/data_transform.cc index 9575d01af8..0475fc1d9a 100644 --- a/paddle/fluid/framework/data_transform.cc +++ b/paddle/fluid/framework/data_transform.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/data_transform.h b/paddle/fluid/framework/data_transform.h index 70d3a174ac..9ec67e6f3d 100644 --- a/paddle/fluid/framework/data_transform.h +++ b/paddle/fluid/framework/data_transform.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/data_type.h b/paddle/fluid/framework/data_type.h index 7a527f0d0c..127bbcf5d0 100644 --- a/paddle/fluid/framework/data_type.h +++ b/paddle/fluid/framework/data_type.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/data_type_transform.cc b/paddle/fluid/framework/data_type_transform.cc index 6921927305..e5836998e2 100644 --- a/paddle/fluid/framework/data_type_transform.cc +++ b/paddle/fluid/framework/data_type_transform.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/data_type_transform.h b/paddle/fluid/framework/data_type_transform.h index 830cced093..e75da2588d 100644 --- a/paddle/fluid/framework/data_type_transform.h +++ b/paddle/fluid/framework/data_type_transform.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/data_type_transform_test.cc b/paddle/fluid/framework/data_type_transform_test.cc index 88dbc51b21..444d3b823c 100644 --- a/paddle/fluid/framework/data_type_transform_test.cc +++ b/paddle/fluid/framework/data_type_transform_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/ddim.cc b/paddle/fluid/framework/ddim.cc index 98f1fa9d20..97afd36638 100644 --- a/paddle/fluid/framework/ddim.cc +++ b/paddle/fluid/framework/ddim.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/ddim.h b/paddle/fluid/framework/ddim.h index 405d9af702..5aff10d3b9 100644 --- a/paddle/fluid/framework/ddim.h +++ b/paddle/fluid/framework/ddim.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/ddim_test.cc b/paddle/fluid/framework/ddim_test.cc index 18d305a403..c1eb3210fd 100644 --- a/paddle/fluid/framework/ddim_test.cc +++ b/paddle/fluid/framework/ddim_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/dim.h b/paddle/fluid/framework/dim.h index 3938fd3df5..08b708006a 100644 --- a/paddle/fluid/framework/dim.h +++ b/paddle/fluid/framework/dim.h @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/dim_test.cu b/paddle/fluid/framework/dim_test.cu index 0f1969d797..0f384d12e6 100644 --- a/paddle/fluid/framework/dim_test.cu +++ b/paddle/fluid/framework/dim_test.cu @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/eigen.h b/paddle/fluid/framework/eigen.h index d1b8c701a7..4ea1df655d 100644 --- a/paddle/fluid/framework/eigen.h +++ b/paddle/fluid/framework/eigen.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/eigen_test.cc b/paddle/fluid/framework/eigen_test.cc index f9e3abeccb..bdc526d86f 100644 --- a/paddle/fluid/framework/eigen_test.cc +++ b/paddle/fluid/framework/eigen_test.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index 816ad8d659..179f9194a9 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/executor.h b/paddle/fluid/framework/executor.h index 893c949939..c1f4d4e02a 100644 --- a/paddle/fluid/framework/executor.h +++ b/paddle/fluid/framework/executor.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/feed_fetch_method.cc b/paddle/fluid/framework/feed_fetch_method.cc index a9bb17355d..a8c3e227db 100644 --- a/paddle/fluid/framework/feed_fetch_method.cc +++ b/paddle/fluid/framework/feed_fetch_method.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/feed_fetch_method.h b/paddle/fluid/framework/feed_fetch_method.h index 5355c29047..d6130f421e 100644 --- a/paddle/fluid/framework/feed_fetch_method.h +++ b/paddle/fluid/framework/feed_fetch_method.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/feed_fetch_type.h b/paddle/fluid/framework/feed_fetch_type.h index 4281e36b13..b0d1e9f0a7 100644 --- a/paddle/fluid/framework/feed_fetch_type.h +++ b/paddle/fluid/framework/feed_fetch_type.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/framework.proto b/paddle/fluid/framework/framework.proto index d7be1a7352..ad8da21ae0 100644 --- a/paddle/fluid/framework/framework.proto +++ b/paddle/fluid/framework/framework.proto @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/grad_op_desc_maker.h b/paddle/fluid/framework/grad_op_desc_maker.h index 21dd4e8854..cf697187d6 100644 --- a/paddle/fluid/framework/grad_op_desc_maker.h +++ b/paddle/fluid/framework/grad_op_desc_maker.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/init.cc b/paddle/fluid/framework/init.cc index ad806a8cd7..2e0a224ff5 100644 --- a/paddle/fluid/framework/init.cc +++ b/paddle/fluid/framework/init.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/init.h b/paddle/fluid/framework/init.h index c8fd964d00..7d86d15811 100644 --- a/paddle/fluid/framework/init.h +++ b/paddle/fluid/framework/init.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/init_test.cc b/paddle/fluid/framework/init_test.cc index f3018541e2..2a03f0afe6 100644 --- a/paddle/fluid/framework/init_test.cc +++ b/paddle/fluid/framework/init_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/library_type.h b/paddle/fluid/framework/library_type.h index 1e30848354..ea538731b4 100644 --- a/paddle/fluid/framework/library_type.h +++ b/paddle/fluid/framework/library_type.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/lod_rank_table.cc b/paddle/fluid/framework/lod_rank_table.cc index 31c8749234..6bc795b642 100644 --- a/paddle/fluid/framework/lod_rank_table.cc +++ b/paddle/fluid/framework/lod_rank_table.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/lod_rank_table.h b/paddle/fluid/framework/lod_rank_table.h index 0eaaf49e4c..ef83e71160 100644 --- a/paddle/fluid/framework/lod_rank_table.h +++ b/paddle/fluid/framework/lod_rank_table.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/lod_tensor.cc b/paddle/fluid/framework/lod_tensor.cc index 05c67e453d..89768bcfd5 100644 --- a/paddle/fluid/framework/lod_tensor.cc +++ b/paddle/fluid/framework/lod_tensor.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/lod_tensor.h b/paddle/fluid/framework/lod_tensor.h index 1509a9fb13..948389afb6 100644 --- a/paddle/fluid/framework/lod_tensor.h +++ b/paddle/fluid/framework/lod_tensor.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/lod_tensor_array.h b/paddle/fluid/framework/lod_tensor_array.h index 652513bd22..6d7b6a4ada 100644 --- a/paddle/fluid/framework/lod_tensor_array.h +++ b/paddle/fluid/framework/lod_tensor_array.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/lod_tensor_test.cc b/paddle/fluid/framework/lod_tensor_test.cc index 7e0ed2495d..5e135192ce 100644 --- a/paddle/fluid/framework/lod_tensor_test.cc +++ b/paddle/fluid/framework/lod_tensor_test.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/lod_tensor_test.cu b/paddle/fluid/framework/lod_tensor_test.cu index 4dd7810c1b..be65da5ba2 100644 --- a/paddle/fluid/framework/lod_tensor_test.cu +++ b/paddle/fluid/framework/lod_tensor_test.cu @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/mixed_vector.h b/paddle/fluid/framework/mixed_vector.h index 6e5ceefadd..c1a89a1261 100644 --- a/paddle/fluid/framework/mixed_vector.h +++ b/paddle/fluid/framework/mixed_vector.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/mixed_vector_test.cu b/paddle/fluid/framework/mixed_vector_test.cu index 8ea574b31c..4bf78499f2 100644 --- a/paddle/fluid/framework/mixed_vector_test.cu +++ b/paddle/fluid/framework/mixed_vector_test.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/op_desc.cc b/paddle/fluid/framework/op_desc.cc index cbc15e60b8..e740010c63 100644 --- a/paddle/fluid/framework/op_desc.cc +++ b/paddle/fluid/framework/op_desc.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/op_desc.h b/paddle/fluid/framework/op_desc.h index 698df829e5..b72aad6fb5 100644 --- a/paddle/fluid/framework/op_desc.h +++ b/paddle/fluid/framework/op_desc.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/op_info.cc b/paddle/fluid/framework/op_info.cc index 703c9c3234..b99e82f8c4 100644 --- a/paddle/fluid/framework/op_info.cc +++ b/paddle/fluid/framework/op_info.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/op_info.h b/paddle/fluid/framework/op_info.h index e6b3ff9e65..19e5c2c73e 100644 --- a/paddle/fluid/framework/op_info.h +++ b/paddle/fluid/framework/op_info.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/op_kernel_type.h b/paddle/fluid/framework/op_kernel_type.h index b5dbff26d7..980e4eafaa 100644 --- a/paddle/fluid/framework/op_kernel_type.h +++ b/paddle/fluid/framework/op_kernel_type.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/op_kernel_type_test.cc b/paddle/fluid/framework/op_kernel_type_test.cc index 64096907df..e56fe35c01 100644 --- a/paddle/fluid/framework/op_kernel_type_test.cc +++ b/paddle/fluid/framework/op_kernel_type_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/op_proto_maker.cc b/paddle/fluid/framework/op_proto_maker.cc index 0a779b10b4..3116b03d04 100644 --- a/paddle/fluid/framework/op_proto_maker.cc +++ b/paddle/fluid/framework/op_proto_maker.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/paddle/fluid/framework/op_proto_maker.h b/paddle/fluid/framework/op_proto_maker.h index 1dbfc7d37b..cf56b0fa18 100644 --- a/paddle/fluid/framework/op_proto_maker.h +++ b/paddle/fluid/framework/op_proto_maker.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/paddle/fluid/framework/op_proto_maker_test.cc b/paddle/fluid/framework/op_proto_maker_test.cc index cfefee8dbd..a8d8c6386a 100644 --- a/paddle/fluid/framework/op_proto_maker_test.cc +++ b/paddle/fluid/framework/op_proto_maker_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/op_registry.cc b/paddle/fluid/framework/op_registry.cc index 739ec72ebc..bfc411ca2c 100644 --- a/paddle/fluid/framework/op_registry.cc +++ b/paddle/fluid/framework/op_registry.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/op_registry.h b/paddle/fluid/framework/op_registry.h index 73faa99668..f1424f13b4 100644 --- a/paddle/fluid/framework/op_registry.h +++ b/paddle/fluid/framework/op_registry.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/op_registry_test.cc b/paddle/fluid/framework/op_registry_test.cc index 2746168f1d..b92647e892 100644 --- a/paddle/fluid/framework/op_registry_test.cc +++ b/paddle/fluid/framework/op_registry_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 8effbf1bc6..bc529b8269 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index 708f87dc86..c2782066ce 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/operator_test.cc b/paddle/fluid/framework/operator_test.cc index 0732ec5afe..08a471e0a1 100644 --- a/paddle/fluid/framework/operator_test.cc +++ b/paddle/fluid/framework/operator_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/program_desc.cc b/paddle/fluid/framework/program_desc.cc index b3f2e97cd9..049731c721 100644 --- a/paddle/fluid/framework/program_desc.cc +++ b/paddle/fluid/framework/program_desc.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/program_desc.h b/paddle/fluid/framework/program_desc.h index 937de6ba92..8d4b999ad2 100644 --- a/paddle/fluid/framework/program_desc.h +++ b/paddle/fluid/framework/program_desc.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/program_desc_test.cc b/paddle/fluid/framework/program_desc_test.cc index afd5c9dabf..3a4a87cfa5 100644 --- a/paddle/fluid/framework/program_desc_test.cc +++ b/paddle/fluid/framework/program_desc_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/proto_desc.h b/paddle/fluid/framework/proto_desc.h index fa01224fef..40521c0782 100644 --- a/paddle/fluid/framework/proto_desc.h +++ b/paddle/fluid/framework/proto_desc.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/prune.cc b/paddle/fluid/framework/prune.cc index 79dbd3bcab..71d6e08112 100644 --- a/paddle/fluid/framework/prune.cc +++ b/paddle/fluid/framework/prune.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/prune.h b/paddle/fluid/framework/prune.h index 601e66b67a..4c5a1dedd9 100644 --- a/paddle/fluid/framework/prune.h +++ b/paddle/fluid/framework/prune.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/prune_test.cc b/paddle/fluid/framework/prune_test.cc index 36b76f0763..b612fe8ad5 100644 --- a/paddle/fluid/framework/prune_test.cc +++ b/paddle/fluid/framework/prune_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/scope.cc b/paddle/fluid/framework/scope.cc index 91a8617d66..ea6c8cebd3 100644 --- a/paddle/fluid/framework/scope.cc +++ b/paddle/fluid/framework/scope.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/scope.h b/paddle/fluid/framework/scope.h index 2da9e0716e..d8fad162e5 100644 --- a/paddle/fluid/framework/scope.h +++ b/paddle/fluid/framework/scope.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/scope_test.cc b/paddle/fluid/framework/scope_test.cc index d64acb130c..ebf8178a83 100644 --- a/paddle/fluid/framework/scope_test.cc +++ b/paddle/fluid/framework/scope_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/selected_rows.cc b/paddle/fluid/framework/selected_rows.cc index f5d9e9a495..08c319002d 100644 --- a/paddle/fluid/framework/selected_rows.cc +++ b/paddle/fluid/framework/selected_rows.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/paddle/fluid/framework/selected_rows.h b/paddle/fluid/framework/selected_rows.h index f1a263962b..c9c2c1bb72 100644 --- a/paddle/fluid/framework/selected_rows.h +++ b/paddle/fluid/framework/selected_rows.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/paddle/fluid/framework/selected_rows_test.cc b/paddle/fluid/framework/selected_rows_test.cc index d414f2a593..960d8d64f0 100644 --- a/paddle/fluid/framework/selected_rows_test.cc +++ b/paddle/fluid/framework/selected_rows_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/paddle/fluid/framework/shape_inference.cc b/paddle/fluid/framework/shape_inference.cc index cfd2334f1a..1b518970ac 100644 --- a/paddle/fluid/framework/shape_inference.cc +++ b/paddle/fluid/framework/shape_inference.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/shape_inference.h b/paddle/fluid/framework/shape_inference.h index c907523325..3739d640fe 100644 --- a/paddle/fluid/framework/shape_inference.h +++ b/paddle/fluid/framework/shape_inference.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/tensor.cc b/paddle/fluid/framework/tensor.cc index a56091d3c6..e97ada06f0 100644 --- a/paddle/fluid/framework/tensor.cc +++ b/paddle/fluid/framework/tensor.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/tensor.h b/paddle/fluid/framework/tensor.h index 44d2c7dae9..f95af384eb 100644 --- a/paddle/fluid/framework/tensor.h +++ b/paddle/fluid/framework/tensor.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/tensor_impl.h b/paddle/fluid/framework/tensor_impl.h index e69836292c..59e6269ea0 100644 --- a/paddle/fluid/framework/tensor_impl.h +++ b/paddle/fluid/framework/tensor_impl.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/tensor_test.cc b/paddle/fluid/framework/tensor_test.cc index 6ed416e46f..e1012de2ec 100644 --- a/paddle/fluid/framework/tensor_test.cc +++ b/paddle/fluid/framework/tensor_test.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/tensor_util.h b/paddle/fluid/framework/tensor_util.h index b7e772b6da..22519013cc 100644 --- a/paddle/fluid/framework/tensor_util.h +++ b/paddle/fluid/framework/tensor_util.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/tensor_util_test.cc b/paddle/fluid/framework/tensor_util_test.cc index 8764c692e8..dcdbf9d395 100644 --- a/paddle/fluid/framework/tensor_util_test.cc +++ b/paddle/fluid/framework/tensor_util_test.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/threadpool.cc b/paddle/fluid/framework/threadpool.cc index 2c4de41b0c..9854d618d2 100644 --- a/paddle/fluid/framework/threadpool.cc +++ b/paddle/fluid/framework/threadpool.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/threadpool.h b/paddle/fluid/framework/threadpool.h index e88e6c01f0..606a93e13b 100644 --- a/paddle/fluid/framework/threadpool.h +++ b/paddle/fluid/framework/threadpool.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/threadpool_test.cc b/paddle/fluid/framework/threadpool_test.cc index 3fbfe7efc8..4da83d630a 100644 --- a/paddle/fluid/framework/threadpool_test.cc +++ b/paddle/fluid/framework/threadpool_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/type_defs.h b/paddle/fluid/framework/type_defs.h index 786d78a644..4879209ece 100644 --- a/paddle/fluid/framework/type_defs.h +++ b/paddle/fluid/framework/type_defs.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/var_desc.cc b/paddle/fluid/framework/var_desc.cc index 7ec9b2ced9..eb88146969 100644 --- a/paddle/fluid/framework/var_desc.cc +++ b/paddle/fluid/framework/var_desc.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/var_desc.h b/paddle/fluid/framework/var_desc.h index cdb1bc3ec0..b272e5063e 100644 --- a/paddle/fluid/framework/var_desc.h +++ b/paddle/fluid/framework/var_desc.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/var_type.h b/paddle/fluid/framework/var_type.h index 2dc4de5298..b5a6183892 100644 --- a/paddle/fluid/framework/var_type.h +++ b/paddle/fluid/framework/var_type.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/var_type_inference.h b/paddle/fluid/framework/var_type_inference.h index 44fd4cd622..f3035cd712 100644 --- a/paddle/fluid/framework/var_type_inference.h +++ b/paddle/fluid/framework/var_type_inference.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/var_type_inference_test.cc b/paddle/fluid/framework/var_type_inference_test.cc index 0ee589c821..961f209ee1 100644 --- a/paddle/fluid/framework/var_type_inference_test.cc +++ b/paddle/fluid/framework/var_type_inference_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -102,4 +102,4 @@ TEST(InferVarType, sum_op_without_infer_var_type) { } } // namespace framework -} // namespace paddle \ No newline at end of file +} // namespace paddle diff --git a/paddle/fluid/framework/variable.h b/paddle/fluid/framework/variable.h index 9fb8ca92d6..87ddfe2ff9 100644 --- a/paddle/fluid/framework/variable.h +++ b/paddle/fluid/framework/variable.h @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/framework/variable_test.cc b/paddle/fluid/framework/variable_test.cc index 8c14e506fd..c5c1d215f4 100644 --- a/paddle/fluid/framework/variable_test.cc +++ b/paddle/fluid/framework/variable_test.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/inference/io.cc b/paddle/fluid/inference/io.cc index 58d7ab40bf..71c5ab3db9 100644 --- a/paddle/fluid/inference/io.cc +++ b/paddle/fluid/inference/io.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/inference/io.h b/paddle/fluid/inference/io.h index 9d78640606..6817a6fca0 100644 --- a/paddle/fluid/inference/io.h +++ b/paddle/fluid/inference/io.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc b/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc index fa18e69b3a..9ab808efec 100644 --- a/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc +++ b/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/paddle/fluid/inference/tests/book/test_inference_image_classification.cc b/paddle/fluid/inference/tests/book/test_inference_image_classification.cc index 27f17712bc..d6fc51301b 100644 --- a/paddle/fluid/inference/tests/book/test_inference_image_classification.cc +++ b/paddle/fluid/inference/tests/book/test_inference_image_classification.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc b/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc index 55acd95f50..443193aae8 100644 --- a/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc +++ b/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc b/paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc index 99cf0f3095..bd71948916 100644 --- a/paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc +++ b/paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/inference/tests/book/test_inference_recommender_system.cc b/paddle/fluid/inference/tests/book/test_inference_recommender_system.cc index 9208c2a599..b42a33c9a9 100644 --- a/paddle/fluid/inference/tests/book/test_inference_recommender_system.cc +++ b/paddle/fluid/inference/tests/book/test_inference_recommender_system.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/inference/tests/book/test_inference_rnn_encoder_decoder.cc b/paddle/fluid/inference/tests/book/test_inference_rnn_encoder_decoder.cc index c88ca30cb7..a0523905bd 100644 --- a/paddle/fluid/inference/tests/book/test_inference_rnn_encoder_decoder.cc +++ b/paddle/fluid/inference/tests/book/test_inference_rnn_encoder_decoder.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc b/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc index 3b29d52880..e67064fb61 100644 --- a/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc +++ b/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/inference/tests/book/test_inference_word2vec.cc b/paddle/fluid/inference/tests/book/test_inference_word2vec.cc index 93376b6824..e2f2f36a82 100644 --- a/paddle/fluid/inference/tests/book/test_inference_word2vec.cc +++ b/paddle/fluid/inference/tests/book/test_inference_word2vec.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/inference/tests/test_helper.h b/paddle/fluid/inference/tests/test_helper.h index a6c93aa073..abe2032cc0 100644 --- a/paddle/fluid/inference/tests/test_helper.h +++ b/paddle/fluid/inference/tests/test_helper.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/memory/detail/buddy_allocator.cc b/paddle/fluid/memory/detail/buddy_allocator.cc index 2cee8271d2..8768378386 100644 --- a/paddle/fluid/memory/detail/buddy_allocator.cc +++ b/paddle/fluid/memory/detail/buddy_allocator.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/memory/detail/buddy_allocator.h b/paddle/fluid/memory/detail/buddy_allocator.h index 644d793306..a4ee70c258 100644 --- a/paddle/fluid/memory/detail/buddy_allocator.h +++ b/paddle/fluid/memory/detail/buddy_allocator.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/memory/detail/memory_block.cc b/paddle/fluid/memory/detail/memory_block.cc index 23388cdd5b..07123f2669 100644 --- a/paddle/fluid/memory/detail/memory_block.cc +++ b/paddle/fluid/memory/detail/memory_block.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/memory/detail/memory_block.h b/paddle/fluid/memory/detail/memory_block.h index a4ca51b31b..72b40b7317 100644 --- a/paddle/fluid/memory/detail/memory_block.h +++ b/paddle/fluid/memory/detail/memory_block.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/memory/detail/meta_cache.cc b/paddle/fluid/memory/detail/meta_cache.cc index 7d78811c77..43249e842a 100644 --- a/paddle/fluid/memory/detail/meta_cache.cc +++ b/paddle/fluid/memory/detail/meta_cache.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/memory/detail/meta_cache.h b/paddle/fluid/memory/detail/meta_cache.h index 635d6398e6..3283d756a6 100644 --- a/paddle/fluid/memory/detail/meta_cache.h +++ b/paddle/fluid/memory/detail/meta_cache.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/memory/detail/meta_data.cc b/paddle/fluid/memory/detail/meta_data.cc index eae49ebdcf..ad862af170 100644 --- a/paddle/fluid/memory/detail/meta_data.cc +++ b/paddle/fluid/memory/detail/meta_data.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/memory/detail/meta_data.h b/paddle/fluid/memory/detail/meta_data.h index 368523701e..14895ee872 100644 --- a/paddle/fluid/memory/detail/meta_data.h +++ b/paddle/fluid/memory/detail/meta_data.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/memory/detail/system_allocator.cc b/paddle/fluid/memory/detail/system_allocator.cc index 1f07c5e789..8ac8978120 100644 --- a/paddle/fluid/memory/detail/system_allocator.cc +++ b/paddle/fluid/memory/detail/system_allocator.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/memory/detail/system_allocator.h b/paddle/fluid/memory/detail/system_allocator.h index 552cab4f96..e93c2c1e32 100644 --- a/paddle/fluid/memory/detail/system_allocator.h +++ b/paddle/fluid/memory/detail/system_allocator.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/memory/detail/system_allocator_test.cc b/paddle/fluid/memory/detail/system_allocator_test.cc index a850e480ec..d5df9e6897 100644 --- a/paddle/fluid/memory/detail/system_allocator_test.cc +++ b/paddle/fluid/memory/detail/system_allocator_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/memory/memcpy.cc b/paddle/fluid/memory/memcpy.cc index 8938b36133..b991360d04 100644 --- a/paddle/fluid/memory/memcpy.cc +++ b/paddle/fluid/memory/memcpy.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/memory/memcpy.h b/paddle/fluid/memory/memcpy.h index 77d209c3fb..7b2b8eb066 100644 --- a/paddle/fluid/memory/memcpy.h +++ b/paddle/fluid/memory/memcpy.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/memory/memory.cc b/paddle/fluid/memory/memory.cc index 6eedab5d03..d07f89439a 100644 --- a/paddle/fluid/memory/memory.cc +++ b/paddle/fluid/memory/memory.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/memory/memory.h b/paddle/fluid/memory/memory.h index a9166a6746..7c5db815d6 100644 --- a/paddle/fluid/memory/memory.h +++ b/paddle/fluid/memory/memory.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/memory/memory_test.cc b/paddle/fluid/memory/memory_test.cc index d7505ef0f3..ae98d0d525 100644 --- a/paddle/fluid/memory/memory_test.cc +++ b/paddle/fluid/memory/memory_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/accuracy_op.cc b/paddle/fluid/operators/accuracy_op.cc index 43689b3b7d..ac10d759fe 100644 --- a/paddle/fluid/operators/accuracy_op.cc +++ b/paddle/fluid/operators/accuracy_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/accuracy_op.cu b/paddle/fluid/operators/accuracy_op.cu index 4462b9ba5c..630a4a2df2 100644 --- a/paddle/fluid/operators/accuracy_op.cu +++ b/paddle/fluid/operators/accuracy_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/accuracy_op.h b/paddle/fluid/operators/accuracy_op.h index b3ed1d3fe0..803244dd48 100644 --- a/paddle/fluid/operators/accuracy_op.h +++ b/paddle/fluid/operators/accuracy_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/activation_op.cc b/paddle/fluid/operators/activation_op.cc index c04dd8cb91..d74c47b981 100644 --- a/paddle/fluid/operators/activation_op.cc +++ b/paddle/fluid/operators/activation_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/activation_op.cu b/paddle/fluid/operators/activation_op.cu index b86a7926a9..b2633d0176 100644 --- a/paddle/fluid/operators/activation_op.cu +++ b/paddle/fluid/operators/activation_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/activation_op.h b/paddle/fluid/operators/activation_op.h index 7a6ae2224c..8f791a6ca8 100644 --- a/paddle/fluid/operators/activation_op.h +++ b/paddle/fluid/operators/activation_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/adadelta_op.cc b/paddle/fluid/operators/adadelta_op.cc index ececd47e6a..c9ed221a6e 100644 --- a/paddle/fluid/operators/adadelta_op.cc +++ b/paddle/fluid/operators/adadelta_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/adadelta_op.cu b/paddle/fluid/operators/adadelta_op.cu index 733482f788..fc10c66574 100644 --- a/paddle/fluid/operators/adadelta_op.cu +++ b/paddle/fluid/operators/adadelta_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/adadelta_op.h b/paddle/fluid/operators/adadelta_op.h index 82ced08710..822458daf6 100644 --- a/paddle/fluid/operators/adadelta_op.h +++ b/paddle/fluid/operators/adadelta_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/adagrad_op.cc b/paddle/fluid/operators/adagrad_op.cc index 61c0ecd019..c990fe7843 100644 --- a/paddle/fluid/operators/adagrad_op.cc +++ b/paddle/fluid/operators/adagrad_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/adagrad_op.cu b/paddle/fluid/operators/adagrad_op.cu index 1117363c13..e798101ca6 100644 --- a/paddle/fluid/operators/adagrad_op.cu +++ b/paddle/fluid/operators/adagrad_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/adagrad_op.h b/paddle/fluid/operators/adagrad_op.h index ee503b2c36..df520fcc89 100644 --- a/paddle/fluid/operators/adagrad_op.h +++ b/paddle/fluid/operators/adagrad_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/adam_op.cc b/paddle/fluid/operators/adam_op.cc index 25da9336b2..267dcab810 100644 --- a/paddle/fluid/operators/adam_op.cc +++ b/paddle/fluid/operators/adam_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/adam_op.cu b/paddle/fluid/operators/adam_op.cu index 85b806eb6a..77f1991002 100644 --- a/paddle/fluid/operators/adam_op.cu +++ b/paddle/fluid/operators/adam_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/adam_op.h b/paddle/fluid/operators/adam_op.h index a51b46ef15..b332b67163 100644 --- a/paddle/fluid/operators/adam_op.h +++ b/paddle/fluid/operators/adam_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/adamax_op.cc b/paddle/fluid/operators/adamax_op.cc index b2249b8f96..7e2f1cc66e 100644 --- a/paddle/fluid/operators/adamax_op.cc +++ b/paddle/fluid/operators/adamax_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/adamax_op.cu b/paddle/fluid/operators/adamax_op.cu index 44a5d6c7bd..05cafd7a8e 100644 --- a/paddle/fluid/operators/adamax_op.cu +++ b/paddle/fluid/operators/adamax_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/adamax_op.h b/paddle/fluid/operators/adamax_op.h index 124453c0ec..de644676fd 100644 --- a/paddle/fluid/operators/adamax_op.h +++ b/paddle/fluid/operators/adamax_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/array_operator.h b/paddle/fluid/operators/array_operator.h index 4ffb414ece..d0fc153347 100644 --- a/paddle/fluid/operators/array_operator.h +++ b/paddle/fluid/operators/array_operator.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/array_to_lod_tensor_op.cc b/paddle/fluid/operators/array_to_lod_tensor_op.cc index 69464c4cff..f59bfad6cc 100644 --- a/paddle/fluid/operators/array_to_lod_tensor_op.cc +++ b/paddle/fluid/operators/array_to_lod_tensor_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/assign_op.cc b/paddle/fluid/operators/assign_op.cc index b72e72b12f..eedf6b8c66 100644 --- a/paddle/fluid/operators/assign_op.cc +++ b/paddle/fluid/operators/assign_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/assign_value_op.cc b/paddle/fluid/operators/assign_value_op.cc index 835043d9ab..2985fc28a0 100644 --- a/paddle/fluid/operators/assign_value_op.cc +++ b/paddle/fluid/operators/assign_value_op.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/assign_value_op.cu.cc b/paddle/fluid/operators/assign_value_op.cu.cc index 616163f97b..08bfde5dc9 100644 --- a/paddle/fluid/operators/assign_value_op.cu.cc +++ b/paddle/fluid/operators/assign_value_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/assign_value_op.h b/paddle/fluid/operators/assign_value_op.h index 33a344cad5..90c9496a3c 100644 --- a/paddle/fluid/operators/assign_value_op.h +++ b/paddle/fluid/operators/assign_value_op.h @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/auc_op.cc b/paddle/fluid/operators/auc_op.cc index 8ac08ea4a1..71de78b118 100644 --- a/paddle/fluid/operators/auc_op.cc +++ b/paddle/fluid/operators/auc_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/auc_op.h b/paddle/fluid/operators/auc_op.h index e648db7097..f4e8208c3f 100644 --- a/paddle/fluid/operators/auc_op.h +++ b/paddle/fluid/operators/auc_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/batch_norm_op.cc b/paddle/fluid/operators/batch_norm_op.cc index 506c25d50d..215ae229af 100644 --- a/paddle/fluid/operators/batch_norm_op.cc +++ b/paddle/fluid/operators/batch_norm_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/batch_norm_op.cu.cc b/paddle/fluid/operators/batch_norm_op.cu.cc index b9c97211e1..2d1556efc6 100644 --- a/paddle/fluid/operators/batch_norm_op.cu.cc +++ b/paddle/fluid/operators/batch_norm_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/batch_norm_op.h b/paddle/fluid/operators/batch_norm_op.h index fa9942ad09..9e5fc41598 100644 --- a/paddle/fluid/operators/batch_norm_op.h +++ b/paddle/fluid/operators/batch_norm_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/beam_search_decode_op.cc b/paddle/fluid/operators/beam_search_decode_op.cc index 6d3efcfeb8..dacb0e2681 100644 --- a/paddle/fluid/operators/beam_search_decode_op.cc +++ b/paddle/fluid/operators/beam_search_decode_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/beam_search_decode_op.h b/paddle/fluid/operators/beam_search_decode_op.h index aeecb8d39a..40147ce1eb 100644 --- a/paddle/fluid/operators/beam_search_decode_op.h +++ b/paddle/fluid/operators/beam_search_decode_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/beam_search_decode_op_test.cc b/paddle/fluid/operators/beam_search_decode_op_test.cc index 24f87279d5..c3faf46e09 100644 --- a/paddle/fluid/operators/beam_search_decode_op_test.cc +++ b/paddle/fluid/operators/beam_search_decode_op_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/beam_search_op.cc b/paddle/fluid/operators/beam_search_op.cc index 6f4c8c7e06..76985ea9c2 100644 --- a/paddle/fluid/operators/beam_search_op.cc +++ b/paddle/fluid/operators/beam_search_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/beam_search_op.h b/paddle/fluid/operators/beam_search_op.h index bfbe78097d..b333ef4e6c 100644 --- a/paddle/fluid/operators/beam_search_op.h +++ b/paddle/fluid/operators/beam_search_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/beam_search_op_test.cc b/paddle/fluid/operators/beam_search_op_test.cc index ea2afda4d4..ec666359aa 100644 --- a/paddle/fluid/operators/beam_search_op_test.cc +++ b/paddle/fluid/operators/beam_search_op_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/bilinear_tensor_product_op.cc b/paddle/fluid/operators/bilinear_tensor_product_op.cc index cc378b1b45..2ec984d8e0 100644 --- a/paddle/fluid/operators/bilinear_tensor_product_op.cc +++ b/paddle/fluid/operators/bilinear_tensor_product_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/bilinear_tensor_product_op.cu b/paddle/fluid/operators/bilinear_tensor_product_op.cu index 2cec48ee69..9426ffbe17 100644 --- a/paddle/fluid/operators/bilinear_tensor_product_op.cu +++ b/paddle/fluid/operators/bilinear_tensor_product_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/bilinear_tensor_product_op.h b/paddle/fluid/operators/bilinear_tensor_product_op.h index 626fa957c4..ca80e6085c 100644 --- a/paddle/fluid/operators/bilinear_tensor_product_op.h +++ b/paddle/fluid/operators/bilinear_tensor_product_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/bipartite_match_op.cc b/paddle/fluid/operators/bipartite_match_op.cc index d614bf7043..c536cf6b6b 100644 --- a/paddle/fluid/operators/bipartite_match_op.cc +++ b/paddle/fluid/operators/bipartite_match_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/box_coder_op.cc b/paddle/fluid/operators/box_coder_op.cc index 8e0fee22d8..1fc201286f 100644 --- a/paddle/fluid/operators/box_coder_op.cc +++ b/paddle/fluid/operators/box_coder_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/paddle/fluid/operators/box_coder_op.cu b/paddle/fluid/operators/box_coder_op.cu index dd9299ceac..7ab242edfa 100644 --- a/paddle/fluid/operators/box_coder_op.cu +++ b/paddle/fluid/operators/box_coder_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/paddle/fluid/operators/box_coder_op.h b/paddle/fluid/operators/box_coder_op.h index c41bcc212b..5e105aff52 100644 --- a/paddle/fluid/operators/box_coder_op.h +++ b/paddle/fluid/operators/box_coder_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/paddle/fluid/operators/cast_op.cc b/paddle/fluid/operators/cast_op.cc index 364c21f761..a5ec47d84f 100644 --- a/paddle/fluid/operators/cast_op.cc +++ b/paddle/fluid/operators/cast_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/cast_op.cu b/paddle/fluid/operators/cast_op.cu index fb597be9d9..507e9a531a 100644 --- a/paddle/fluid/operators/cast_op.cu +++ b/paddle/fluid/operators/cast_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/cast_op.h b/paddle/fluid/operators/cast_op.h index 9ab4961cef..ccfbd09a6b 100644 --- a/paddle/fluid/operators/cast_op.h +++ b/paddle/fluid/operators/cast_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/chunk_eval_op.cc b/paddle/fluid/operators/chunk_eval_op.cc index 080e4d80da..09d090e187 100644 --- a/paddle/fluid/operators/chunk_eval_op.cc +++ b/paddle/fluid/operators/chunk_eval_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/chunk_eval_op.h b/paddle/fluid/operators/chunk_eval_op.h index 3dca3d2c0f..9e97f7c776 100644 --- a/paddle/fluid/operators/chunk_eval_op.h +++ b/paddle/fluid/operators/chunk_eval_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/clip_by_norm_op.cc b/paddle/fluid/operators/clip_by_norm_op.cc index 89df118c06..f43726b479 100644 --- a/paddle/fluid/operators/clip_by_norm_op.cc +++ b/paddle/fluid/operators/clip_by_norm_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/clip_by_norm_op.cu b/paddle/fluid/operators/clip_by_norm_op.cu index a466b33591..788eab7cb2 100644 --- a/paddle/fluid/operators/clip_by_norm_op.cu +++ b/paddle/fluid/operators/clip_by_norm_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/clip_by_norm_op.h b/paddle/fluid/operators/clip_by_norm_op.h index 82bcf07657..5af0eb0b2a 100644 --- a/paddle/fluid/operators/clip_by_norm_op.h +++ b/paddle/fluid/operators/clip_by_norm_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/clip_op.cc b/paddle/fluid/operators/clip_op.cc index 76b2cefbf9..a3b67964c7 100644 --- a/paddle/fluid/operators/clip_op.cc +++ b/paddle/fluid/operators/clip_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/clip_op.cu b/paddle/fluid/operators/clip_op.cu index 7b044d6e69..10bee444f6 100644 --- a/paddle/fluid/operators/clip_op.cu +++ b/paddle/fluid/operators/clip_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/clip_op.h b/paddle/fluid/operators/clip_op.h index aecd6f83bf..85607a6b0e 100644 --- a/paddle/fluid/operators/clip_op.h +++ b/paddle/fluid/operators/clip_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/compare_op.cc b/paddle/fluid/operators/compare_op.cc index b1f09fb002..cdeb28cc1d 100644 --- a/paddle/fluid/operators/compare_op.cc +++ b/paddle/fluid/operators/compare_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/compare_op.cu b/paddle/fluid/operators/compare_op.cu index 00263a2ade..2cc0c7c572 100644 --- a/paddle/fluid/operators/compare_op.cu +++ b/paddle/fluid/operators/compare_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/compare_op.h b/paddle/fluid/operators/compare_op.h index c651335268..d7b62782fc 100644 --- a/paddle/fluid/operators/compare_op.h +++ b/paddle/fluid/operators/compare_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/concat_op.cc b/paddle/fluid/operators/concat_op.cc index 68eb5412be..bdce8f0a6f 100644 --- a/paddle/fluid/operators/concat_op.cc +++ b/paddle/fluid/operators/concat_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/concat_op.cu.cc b/paddle/fluid/operators/concat_op.cu.cc index 143bda6116..590eca9d06 100644 --- a/paddle/fluid/operators/concat_op.cu.cc +++ b/paddle/fluid/operators/concat_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/concat_op.h b/paddle/fluid/operators/concat_op.h index c8a4292932..eb0e43ad2d 100644 --- a/paddle/fluid/operators/concat_op.h +++ b/paddle/fluid/operators/concat_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/cond_op.cc b/paddle/fluid/operators/cond_op.cc index d63748a61c..15dce9e3e2 100644 --- a/paddle/fluid/operators/cond_op.cc +++ b/paddle/fluid/operators/cond_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/cond_op.h b/paddle/fluid/operators/cond_op.h index 0bb14bc8c2..a04fae2182 100644 --- a/paddle/fluid/operators/cond_op.h +++ b/paddle/fluid/operators/cond_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/conditional_block_op.cc b/paddle/fluid/operators/conditional_block_op.cc index 228b099836..337b34e8f0 100644 --- a/paddle/fluid/operators/conditional_block_op.cc +++ b/paddle/fluid/operators/conditional_block_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/conv_cudnn_op.cu.cc b/paddle/fluid/operators/conv_cudnn_op.cu.cc index a729d376ac..ff0fbf21f8 100644 --- a/paddle/fluid/operators/conv_cudnn_op.cu.cc +++ b/paddle/fluid/operators/conv_cudnn_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index a047e57916..6b378ec1bc 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/conv_op.cu.cc b/paddle/fluid/operators/conv_op.cu.cc index b2129d3b46..d07593f5c0 100644 --- a/paddle/fluid/operators/conv_op.cu.cc +++ b/paddle/fluid/operators/conv_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/conv_op.h b/paddle/fluid/operators/conv_op.h index 1156e6c8fe..ecbe3d505a 100644 --- a/paddle/fluid/operators/conv_op.h +++ b/paddle/fluid/operators/conv_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/conv_shift_op.cc b/paddle/fluid/operators/conv_shift_op.cc index a96aac63e0..a1a0b00208 100644 --- a/paddle/fluid/operators/conv_shift_op.cc +++ b/paddle/fluid/operators/conv_shift_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/conv_shift_op.cu b/paddle/fluid/operators/conv_shift_op.cu index 9818707ce3..344bbade70 100644 --- a/paddle/fluid/operators/conv_shift_op.cu +++ b/paddle/fluid/operators/conv_shift_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/conv_shift_op.h b/paddle/fluid/operators/conv_shift_op.h index 987a690895..6d8ddd7937 100644 --- a/paddle/fluid/operators/conv_shift_op.h +++ b/paddle/fluid/operators/conv_shift_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc b/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc index 0aed4ebeff..901682edbb 100644 --- a/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc +++ b/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/conv_transpose_op.cc b/paddle/fluid/operators/conv_transpose_op.cc index 974cffad92..b2a3cfc89f 100644 --- a/paddle/fluid/operators/conv_transpose_op.cc +++ b/paddle/fluid/operators/conv_transpose_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/conv_transpose_op.cu.cc b/paddle/fluid/operators/conv_transpose_op.cu.cc index ed90c6ec62..640fa7d14a 100644 --- a/paddle/fluid/operators/conv_transpose_op.cu.cc +++ b/paddle/fluid/operators/conv_transpose_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/conv_transpose_op.h b/paddle/fluid/operators/conv_transpose_op.h index f512575468..d4e4b641ec 100644 --- a/paddle/fluid/operators/conv_transpose_op.h +++ b/paddle/fluid/operators/conv_transpose_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/cos_sim_op.cc b/paddle/fluid/operators/cos_sim_op.cc index 57c5a6025a..4c8af408f6 100644 --- a/paddle/fluid/operators/cos_sim_op.cc +++ b/paddle/fluid/operators/cos_sim_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/cos_sim_op.cu b/paddle/fluid/operators/cos_sim_op.cu index c8cf363cdc..82205e9c75 100644 --- a/paddle/fluid/operators/cos_sim_op.cu +++ b/paddle/fluid/operators/cos_sim_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/cos_sim_op.h b/paddle/fluid/operators/cos_sim_op.h index 9cd8b196da..76cfc68051 100644 --- a/paddle/fluid/operators/cos_sim_op.h +++ b/paddle/fluid/operators/cos_sim_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/crf_decoding_op.cc b/paddle/fluid/operators/crf_decoding_op.cc index e3c1fc95a3..a83013c428 100644 --- a/paddle/fluid/operators/crf_decoding_op.cc +++ b/paddle/fluid/operators/crf_decoding_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/crf_decoding_op.h b/paddle/fluid/operators/crf_decoding_op.h index c3c161eec5..2b2a733fb9 100644 --- a/paddle/fluid/operators/crf_decoding_op.h +++ b/paddle/fluid/operators/crf_decoding_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/crop_op.cc b/paddle/fluid/operators/crop_op.cc index 8e80f77e49..fd7ea70c64 100644 --- a/paddle/fluid/operators/crop_op.cc +++ b/paddle/fluid/operators/crop_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/crop_op.cu b/paddle/fluid/operators/crop_op.cu index f3610675aa..1a39186046 100644 --- a/paddle/fluid/operators/crop_op.cu +++ b/paddle/fluid/operators/crop_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/crop_op.h b/paddle/fluid/operators/crop_op.h index 9c7c0446d4..c5ac684978 100644 --- a/paddle/fluid/operators/crop_op.h +++ b/paddle/fluid/operators/crop_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/cross_entropy_op.cc b/paddle/fluid/operators/cross_entropy_op.cc index 5e34b248b6..55810371c8 100644 --- a/paddle/fluid/operators/cross_entropy_op.cc +++ b/paddle/fluid/operators/cross_entropy_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/cross_entropy_op.cu b/paddle/fluid/operators/cross_entropy_op.cu index de0976c69f..6449149d4b 100644 --- a/paddle/fluid/operators/cross_entropy_op.cu +++ b/paddle/fluid/operators/cross_entropy_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/cross_entropy_op.h b/paddle/fluid/operators/cross_entropy_op.h index 4a5b20ecb7..ec315695a6 100644 --- a/paddle/fluid/operators/cross_entropy_op.h +++ b/paddle/fluid/operators/cross_entropy_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/ctc_align_op.cc b/paddle/fluid/operators/ctc_align_op.cc index 3c7db78813..19e7649660 100644 --- a/paddle/fluid/operators/ctc_align_op.cc +++ b/paddle/fluid/operators/ctc_align_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/ctc_align_op.cu b/paddle/fluid/operators/ctc_align_op.cu index f629e0a9f1..54e0b1d9ad 100644 --- a/paddle/fluid/operators/ctc_align_op.cu +++ b/paddle/fluid/operators/ctc_align_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/ctc_align_op.h b/paddle/fluid/operators/ctc_align_op.h index 1ef034c2f5..70698d9958 100644 --- a/paddle/fluid/operators/ctc_align_op.h +++ b/paddle/fluid/operators/ctc_align_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/cum_op.h b/paddle/fluid/operators/cum_op.h index 3b22491478..999fdcff90 100644 --- a/paddle/fluid/operators/cum_op.h +++ b/paddle/fluid/operators/cum_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/cumsum_op.cc b/paddle/fluid/operators/cumsum_op.cc index d15d4e3db3..0da6f18852 100644 --- a/paddle/fluid/operators/cumsum_op.cc +++ b/paddle/fluid/operators/cumsum_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/cumsum_op.cu b/paddle/fluid/operators/cumsum_op.cu index e063cc0f65..70e2a1de5e 100644 --- a/paddle/fluid/operators/cumsum_op.cu +++ b/paddle/fluid/operators/cumsum_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/decayed_adagrad_op.cc b/paddle/fluid/operators/decayed_adagrad_op.cc index d827155919..5eeb3dee09 100644 --- a/paddle/fluid/operators/decayed_adagrad_op.cc +++ b/paddle/fluid/operators/decayed_adagrad_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/decayed_adagrad_op.cu b/paddle/fluid/operators/decayed_adagrad_op.cu index 215d6dbc7d..7da16acf05 100644 --- a/paddle/fluid/operators/decayed_adagrad_op.cu +++ b/paddle/fluid/operators/decayed_adagrad_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/decayed_adagrad_op.h b/paddle/fluid/operators/decayed_adagrad_op.h index 52b67586ea..a46af078e0 100644 --- a/paddle/fluid/operators/decayed_adagrad_op.h +++ b/paddle/fluid/operators/decayed_adagrad_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/detail/grpc_client.cc b/paddle/fluid/operators/detail/grpc_client.cc index 0d395d347b..ee9044b1f5 100644 --- a/paddle/fluid/operators/detail/grpc_client.cc +++ b/paddle/fluid/operators/detail/grpc_client.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/detail/grpc_client.h b/paddle/fluid/operators/detail/grpc_client.h index 314fe8168f..669838810d 100644 --- a/paddle/fluid/operators/detail/grpc_client.h +++ b/paddle/fluid/operators/detail/grpc_client.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/detail/grpc_server.cc b/paddle/fluid/operators/detail/grpc_server.cc index 96f4ea797b..2a56751661 100644 --- a/paddle/fluid/operators/detail/grpc_server.cc +++ b/paddle/fluid/operators/detail/grpc_server.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/detail/grpc_server.h b/paddle/fluid/operators/detail/grpc_server.h index 1382d17318..e9402ff6aa 100644 --- a/paddle/fluid/operators/detail/grpc_server.h +++ b/paddle/fluid/operators/detail/grpc_server.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/detail/safe_ref.h b/paddle/fluid/operators/detail/safe_ref.h index ff2a156f3d..9cb5851deb 100644 --- a/paddle/fluid/operators/detail/safe_ref.h +++ b/paddle/fluid/operators/detail/safe_ref.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/detail/sendrecvop_utils.cc b/paddle/fluid/operators/detail/sendrecvop_utils.cc index ba3ae6add6..5403dbc2a0 100644 --- a/paddle/fluid/operators/detail/sendrecvop_utils.cc +++ b/paddle/fluid/operators/detail/sendrecvop_utils.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/detail/sendrecvop_utils.h b/paddle/fluid/operators/detail/sendrecvop_utils.h index fed887c027..670d0e1624 100644 --- a/paddle/fluid/operators/detail/sendrecvop_utils.h +++ b/paddle/fluid/operators/detail/sendrecvop_utils.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/detail/simple_block_queue.h b/paddle/fluid/operators/detail/simple_block_queue.h index c7f5ff4b5f..36b58b0c67 100644 --- a/paddle/fluid/operators/detail/simple_block_queue.h +++ b/paddle/fluid/operators/detail/simple_block_queue.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/detail/strided_memcpy.h b/paddle/fluid/operators/detail/strided_memcpy.h index d7a7eed50b..bac5cdc99c 100644 --- a/paddle/fluid/operators/detail/strided_memcpy.h +++ b/paddle/fluid/operators/detail/strided_memcpy.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/detection_map_op.cc b/paddle/fluid/operators/detection_map_op.cc index 48308a11b4..0af3ba621a 100644 --- a/paddle/fluid/operators/detection_map_op.cc +++ b/paddle/fluid/operators/detection_map_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/detection_map_op.h b/paddle/fluid/operators/detection_map_op.h index 0f5f588e9c..39d17a7cb3 100644 --- a/paddle/fluid/operators/detection_map_op.h +++ b/paddle/fluid/operators/detection_map_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/detection_output_op.cc b/paddle/fluid/operators/detection_output_op.cc index 6dee522295..f752047591 100644 --- a/paddle/fluid/operators/detection_output_op.cc +++ b/paddle/fluid/operators/detection_output_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/detection_output_op.cu.cc b/paddle/fluid/operators/detection_output_op.cu.cc index 309e03a25b..0f48765c9c 100644 --- a/paddle/fluid/operators/detection_output_op.cu.cc +++ b/paddle/fluid/operators/detection_output_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/detection_output_op.h b/paddle/fluid/operators/detection_output_op.h index 05e5b72bd3..0aa5fc010d 100644 --- a/paddle/fluid/operators/detection_output_op.h +++ b/paddle/fluid/operators/detection_output_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/dropout_op.cc b/paddle/fluid/operators/dropout_op.cc index e1dc900512..1074ed6acc 100644 --- a/paddle/fluid/operators/dropout_op.cc +++ b/paddle/fluid/operators/dropout_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/dropout_op.cu b/paddle/fluid/operators/dropout_op.cu index a4a96d48f9..d6f9c04359 100644 --- a/paddle/fluid/operators/dropout_op.cu +++ b/paddle/fluid/operators/dropout_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/dropout_op.h b/paddle/fluid/operators/dropout_op.h index 9dd1f33669..209e4dec17 100644 --- a/paddle/fluid/operators/dropout_op.h +++ b/paddle/fluid/operators/dropout_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/edit_distance_op.cc b/paddle/fluid/operators/edit_distance_op.cc index ae82408da7..dbcbfec971 100644 --- a/paddle/fluid/operators/edit_distance_op.cc +++ b/paddle/fluid/operators/edit_distance_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/edit_distance_op.cu b/paddle/fluid/operators/edit_distance_op.cu index bdfead75e7..3b89ad5d49 100644 --- a/paddle/fluid/operators/edit_distance_op.cu +++ b/paddle/fluid/operators/edit_distance_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/edit_distance_op.h b/paddle/fluid/operators/edit_distance_op.h index 205e16e6bf..73d0af490b 100644 --- a/paddle/fluid/operators/edit_distance_op.h +++ b/paddle/fluid/operators/edit_distance_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/elementwise_add_op.cc b/paddle/fluid/operators/elementwise_add_op.cc index 5b9947b8c9..e9068fcd50 100644 --- a/paddle/fluid/operators/elementwise_add_op.cc +++ b/paddle/fluid/operators/elementwise_add_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/elementwise_add_op.cu b/paddle/fluid/operators/elementwise_add_op.cu index 2ac3a998ec..19dc4a5215 100644 --- a/paddle/fluid/operators/elementwise_add_op.cu +++ b/paddle/fluid/operators/elementwise_add_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/elementwise_add_op.h b/paddle/fluid/operators/elementwise_add_op.h index 248e3b9d61..3c546bf3e4 100644 --- a/paddle/fluid/operators/elementwise_add_op.h +++ b/paddle/fluid/operators/elementwise_add_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/elementwise_div_op.cc b/paddle/fluid/operators/elementwise_div_op.cc index 818ae82f44..6f9a090c8e 100644 --- a/paddle/fluid/operators/elementwise_div_op.cc +++ b/paddle/fluid/operators/elementwise_div_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/elementwise_div_op.cu b/paddle/fluid/operators/elementwise_div_op.cu index d1bb7a474c..588d1f7420 100644 --- a/paddle/fluid/operators/elementwise_div_op.cu +++ b/paddle/fluid/operators/elementwise_div_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/elementwise_div_op.h b/paddle/fluid/operators/elementwise_div_op.h index 8e0726d946..6bcc577456 100644 --- a/paddle/fluid/operators/elementwise_div_op.h +++ b/paddle/fluid/operators/elementwise_div_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/elementwise_max_op.cc b/paddle/fluid/operators/elementwise_max_op.cc index 1331bcadc8..61da7c5944 100644 --- a/paddle/fluid/operators/elementwise_max_op.cc +++ b/paddle/fluid/operators/elementwise_max_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/elementwise_max_op.cu b/paddle/fluid/operators/elementwise_max_op.cu index 7f0259ad00..32c99835d6 100644 --- a/paddle/fluid/operators/elementwise_max_op.cu +++ b/paddle/fluid/operators/elementwise_max_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/elementwise_max_op.h b/paddle/fluid/operators/elementwise_max_op.h index e1db9bcc01..ab3a3d5827 100644 --- a/paddle/fluid/operators/elementwise_max_op.h +++ b/paddle/fluid/operators/elementwise_max_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/elementwise_min_op.cc b/paddle/fluid/operators/elementwise_min_op.cc index 1d69099c8e..c74ff36db1 100644 --- a/paddle/fluid/operators/elementwise_min_op.cc +++ b/paddle/fluid/operators/elementwise_min_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/elementwise_min_op.cu b/paddle/fluid/operators/elementwise_min_op.cu index ed53204735..a237c9c503 100644 --- a/paddle/fluid/operators/elementwise_min_op.cu +++ b/paddle/fluid/operators/elementwise_min_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/elementwise_min_op.h b/paddle/fluid/operators/elementwise_min_op.h index bfe213dd43..f0eec9d246 100644 --- a/paddle/fluid/operators/elementwise_min_op.h +++ b/paddle/fluid/operators/elementwise_min_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/elementwise_mul_op.cc b/paddle/fluid/operators/elementwise_mul_op.cc index 0cb96f21d1..5d7f2cdffd 100644 --- a/paddle/fluid/operators/elementwise_mul_op.cc +++ b/paddle/fluid/operators/elementwise_mul_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/elementwise_mul_op.cu b/paddle/fluid/operators/elementwise_mul_op.cu index d72b6250ee..2fb1b4bee6 100644 --- a/paddle/fluid/operators/elementwise_mul_op.cu +++ b/paddle/fluid/operators/elementwise_mul_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/elementwise_mul_op.h b/paddle/fluid/operators/elementwise_mul_op.h index dc292eb1e7..46d69ed87d 100644 --- a/paddle/fluid/operators/elementwise_mul_op.h +++ b/paddle/fluid/operators/elementwise_mul_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/elementwise_op.h b/paddle/fluid/operators/elementwise_op.h index 38f83d7ad3..06bcd0be64 100644 --- a/paddle/fluid/operators/elementwise_op.h +++ b/paddle/fluid/operators/elementwise_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/elementwise_op_function.h b/paddle/fluid/operators/elementwise_op_function.h index c1269382a4..0ee7291f04 100644 --- a/paddle/fluid/operators/elementwise_op_function.h +++ b/paddle/fluid/operators/elementwise_op_function.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/elementwise_pow_op.cc b/paddle/fluid/operators/elementwise_pow_op.cc index 911b5dbd25..60302c5e59 100644 --- a/paddle/fluid/operators/elementwise_pow_op.cc +++ b/paddle/fluid/operators/elementwise_pow_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/elementwise_pow_op.cu b/paddle/fluid/operators/elementwise_pow_op.cu index 2996600738..1f19ebd470 100644 --- a/paddle/fluid/operators/elementwise_pow_op.cu +++ b/paddle/fluid/operators/elementwise_pow_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/paddle/fluid/operators/elementwise_pow_op.h b/paddle/fluid/operators/elementwise_pow_op.h index b793c1eae0..8c1c5f9f98 100644 --- a/paddle/fluid/operators/elementwise_pow_op.h +++ b/paddle/fluid/operators/elementwise_pow_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/elementwise_sub_op.cc b/paddle/fluid/operators/elementwise_sub_op.cc index 46ce01c7cf..6f770820c8 100644 --- a/paddle/fluid/operators/elementwise_sub_op.cc +++ b/paddle/fluid/operators/elementwise_sub_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/elementwise_sub_op.cu b/paddle/fluid/operators/elementwise_sub_op.cu index eb09d6c5ed..8709f686f9 100644 --- a/paddle/fluid/operators/elementwise_sub_op.cu +++ b/paddle/fluid/operators/elementwise_sub_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/elementwise_sub_op.h b/paddle/fluid/operators/elementwise_sub_op.h index af2d497b9a..a8fc242ed7 100644 --- a/paddle/fluid/operators/elementwise_sub_op.h +++ b/paddle/fluid/operators/elementwise_sub_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/expand_op.cc b/paddle/fluid/operators/expand_op.cc index ccb9a94856..51a66bd832 100644 --- a/paddle/fluid/operators/expand_op.cc +++ b/paddle/fluid/operators/expand_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/expand_op.cu b/paddle/fluid/operators/expand_op.cu index 8a9f39708b..60363bfc86 100644 --- a/paddle/fluid/operators/expand_op.cu +++ b/paddle/fluid/operators/expand_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/expand_op.h b/paddle/fluid/operators/expand_op.h index 8df1cd34d7..953d75adae 100644 --- a/paddle/fluid/operators/expand_op.h +++ b/paddle/fluid/operators/expand_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/feed_op.cc b/paddle/fluid/operators/feed_op.cc index 41fa69a097..438d975429 100644 --- a/paddle/fluid/operators/feed_op.cc +++ b/paddle/fluid/operators/feed_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/fetch_op.cc b/paddle/fluid/operators/fetch_op.cc index 6cb5565013..2684e64634 100644 --- a/paddle/fluid/operators/fetch_op.cc +++ b/paddle/fluid/operators/fetch_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/fill_constant_batch_size_like_op.cc b/paddle/fluid/operators/fill_constant_batch_size_like_op.cc index e6992ba371..a36248531e 100644 --- a/paddle/fluid/operators/fill_constant_batch_size_like_op.cc +++ b/paddle/fluid/operators/fill_constant_batch_size_like_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/fill_constant_batch_size_like_op.cu.cc b/paddle/fluid/operators/fill_constant_batch_size_like_op.cu.cc index b4f4d2a503..2cbbd05bfb 100644 --- a/paddle/fluid/operators/fill_constant_batch_size_like_op.cu.cc +++ b/paddle/fluid/operators/fill_constant_batch_size_like_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/fill_constant_batch_size_like_op.h b/paddle/fluid/operators/fill_constant_batch_size_like_op.h index da4a20d99a..2a7df149a9 100644 --- a/paddle/fluid/operators/fill_constant_batch_size_like_op.h +++ b/paddle/fluid/operators/fill_constant_batch_size_like_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/fill_constant_op.cc b/paddle/fluid/operators/fill_constant_op.cc index 6dd58d28db..0b65c83d3a 100644 --- a/paddle/fluid/operators/fill_constant_op.cc +++ b/paddle/fluid/operators/fill_constant_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/fill_op.cc b/paddle/fluid/operators/fill_op.cc index 0b97c9c282..c505c739d4 100644 --- a/paddle/fluid/operators/fill_op.cc +++ b/paddle/fluid/operators/fill_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/fill_zeros_like_op.cc b/paddle/fluid/operators/fill_zeros_like_op.cc index 958bfb1557..58c814ba64 100644 --- a/paddle/fluid/operators/fill_zeros_like_op.cc +++ b/paddle/fluid/operators/fill_zeros_like_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/fill_zeros_like_op.cu.cc b/paddle/fluid/operators/fill_zeros_like_op.cu.cc index 07078573d8..9538177460 100644 --- a/paddle/fluid/operators/fill_zeros_like_op.cu.cc +++ b/paddle/fluid/operators/fill_zeros_like_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/fill_zeros_like_op.h b/paddle/fluid/operators/fill_zeros_like_op.h index 141c3809e9..4bbe0df6b6 100644 --- a/paddle/fluid/operators/fill_zeros_like_op.h +++ b/paddle/fluid/operators/fill_zeros_like_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/ftrl_op.cc b/paddle/fluid/operators/ftrl_op.cc index e72a173751..0a456f0981 100644 --- a/paddle/fluid/operators/ftrl_op.cc +++ b/paddle/fluid/operators/ftrl_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/ftrl_op.cu b/paddle/fluid/operators/ftrl_op.cu index dbdfcb927e..e7371c80da 100644 --- a/paddle/fluid/operators/ftrl_op.cu +++ b/paddle/fluid/operators/ftrl_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); You may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/ftrl_op.h b/paddle/fluid/operators/ftrl_op.h index 0a9405fcef..6f821e7e99 100644 --- a/paddle/fluid/operators/ftrl_op.h +++ b/paddle/fluid/operators/ftrl_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/gather.cu.h b/paddle/fluid/operators/gather.cu.h index af5898e29e..d74d4db925 100644 --- a/paddle/fluid/operators/gather.cu.h +++ b/paddle/fluid/operators/gather.cu.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/gather.h b/paddle/fluid/operators/gather.h index 287732eeb6..d15cb55647 100644 --- a/paddle/fluid/operators/gather.h +++ b/paddle/fluid/operators/gather.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/gather_op.cc b/paddle/fluid/operators/gather_op.cc index dceeb71ee3..6be06b8816 100644 --- a/paddle/fluid/operators/gather_op.cc +++ b/paddle/fluid/operators/gather_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/gather_op.cu b/paddle/fluid/operators/gather_op.cu index 484f423262..3819549c71 100644 --- a/paddle/fluid/operators/gather_op.cu +++ b/paddle/fluid/operators/gather_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/gather_op.h b/paddle/fluid/operators/gather_op.h index 7ba4a31c81..5a8b1ebbe3 100644 --- a/paddle/fluid/operators/gather_op.h +++ b/paddle/fluid/operators/gather_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/gather_test.cc b/paddle/fluid/operators/gather_test.cc index 4d86cf5ce3..7625bd45d9 100644 --- a/paddle/fluid/operators/gather_test.cc +++ b/paddle/fluid/operators/gather_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/gaussian_random_op.cc b/paddle/fluid/operators/gaussian_random_op.cc index b090f87597..cf3a528bdd 100644 --- a/paddle/fluid/operators/gaussian_random_op.cc +++ b/paddle/fluid/operators/gaussian_random_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/gaussian_random_op.cu b/paddle/fluid/operators/gaussian_random_op.cu index 70d655d4bb..7340590c3e 100644 --- a/paddle/fluid/operators/gaussian_random_op.cu +++ b/paddle/fluid/operators/gaussian_random_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/get_places_op.cc b/paddle/fluid/operators/get_places_op.cc index ef635048bd..8555b0778f 100644 --- a/paddle/fluid/operators/get_places_op.cc +++ b/paddle/fluid/operators/get_places_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/gru_op.cc b/paddle/fluid/operators/gru_op.cc index 1436e55b0e..2a91dcbcd4 100644 --- a/paddle/fluid/operators/gru_op.cc +++ b/paddle/fluid/operators/gru_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/gru_op.cu.cc b/paddle/fluid/operators/gru_op.cu.cc index e908d01d29..baf455a840 100644 --- a/paddle/fluid/operators/gru_op.cu.cc +++ b/paddle/fluid/operators/gru_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/gru_op.h b/paddle/fluid/operators/gru_op.h index 37f3ae1a83..0886bebc41 100644 --- a/paddle/fluid/operators/gru_op.h +++ b/paddle/fluid/operators/gru_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/gru_unit_op.cc b/paddle/fluid/operators/gru_unit_op.cc index 21ad3aeb49..f4c766db0a 100644 --- a/paddle/fluid/operators/gru_unit_op.cc +++ b/paddle/fluid/operators/gru_unit_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/gru_unit_op.cu b/paddle/fluid/operators/gru_unit_op.cu index 88b707fd13..fc92b3d4a7 100644 --- a/paddle/fluid/operators/gru_unit_op.cu +++ b/paddle/fluid/operators/gru_unit_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/gru_unit_op.h b/paddle/fluid/operators/gru_unit_op.h index c4031a5a57..15d91ca305 100644 --- a/paddle/fluid/operators/gru_unit_op.h +++ b/paddle/fluid/operators/gru_unit_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/hinge_loss_op.cc b/paddle/fluid/operators/hinge_loss_op.cc index f644c22c9f..efe84f1409 100644 --- a/paddle/fluid/operators/hinge_loss_op.cc +++ b/paddle/fluid/operators/hinge_loss_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/hinge_loss_op.cu b/paddle/fluid/operators/hinge_loss_op.cu index cb53a9b7f4..9c0a85bee6 100644 --- a/paddle/fluid/operators/hinge_loss_op.cu +++ b/paddle/fluid/operators/hinge_loss_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/hinge_loss_op.h b/paddle/fluid/operators/hinge_loss_op.h index 1e924d236e..10c17a0982 100644 --- a/paddle/fluid/operators/hinge_loss_op.h +++ b/paddle/fluid/operators/hinge_loss_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/huber_loss_op.cc b/paddle/fluid/operators/huber_loss_op.cc index dc1f609dcf..134b23b461 100644 --- a/paddle/fluid/operators/huber_loss_op.cc +++ b/paddle/fluid/operators/huber_loss_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/huber_loss_op.cu b/paddle/fluid/operators/huber_loss_op.cu index ef5120c69d..659464df9d 100644 --- a/paddle/fluid/operators/huber_loss_op.cu +++ b/paddle/fluid/operators/huber_loss_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/huber_loss_op.h b/paddle/fluid/operators/huber_loss_op.h index caca89fcf6..9efda3dfc9 100644 --- a/paddle/fluid/operators/huber_loss_op.h +++ b/paddle/fluid/operators/huber_loss_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/im2sequence_op.cc b/paddle/fluid/operators/im2sequence_op.cc index 936e5fe49e..5bc28e0a52 100644 --- a/paddle/fluid/operators/im2sequence_op.cc +++ b/paddle/fluid/operators/im2sequence_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/im2sequence_op.cu b/paddle/fluid/operators/im2sequence_op.cu index 1e7bf46312..e0a5a90c1c 100644 --- a/paddle/fluid/operators/im2sequence_op.cu +++ b/paddle/fluid/operators/im2sequence_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/im2sequence_op.h b/paddle/fluid/operators/im2sequence_op.h index 59456f0ea2..4193819b78 100644 --- a/paddle/fluid/operators/im2sequence_op.h +++ b/paddle/fluid/operators/im2sequence_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); You may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/increment_op.cc b/paddle/fluid/operators/increment_op.cc index de4949584b..6b5c3db13c 100644 --- a/paddle/fluid/operators/increment_op.cc +++ b/paddle/fluid/operators/increment_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/iou_similarity_op.cc b/paddle/fluid/operators/iou_similarity_op.cc index c2e452cdfa..ffbd7c7814 100755 --- a/paddle/fluid/operators/iou_similarity_op.cc +++ b/paddle/fluid/operators/iou_similarity_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/iou_similarity_op.cu b/paddle/fluid/operators/iou_similarity_op.cu index f8df1f4aa4..f40a388d62 100755 --- a/paddle/fluid/operators/iou_similarity_op.cu +++ b/paddle/fluid/operators/iou_similarity_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/iou_similarity_op.h b/paddle/fluid/operators/iou_similarity_op.h index 2fb1b5f707..c76448c736 100644 --- a/paddle/fluid/operators/iou_similarity_op.h +++ b/paddle/fluid/operators/iou_similarity_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/is_empty_op.cc b/paddle/fluid/operators/is_empty_op.cc index dac8505e3f..2a7be90dab 100644 --- a/paddle/fluid/operators/is_empty_op.cc +++ b/paddle/fluid/operators/is_empty_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/l1_norm_op.cc b/paddle/fluid/operators/l1_norm_op.cc index 974ee404f8..963b0587c3 100644 --- a/paddle/fluid/operators/l1_norm_op.cc +++ b/paddle/fluid/operators/l1_norm_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/l1_norm_op.cu b/paddle/fluid/operators/l1_norm_op.cu index 5e9e864a34..1b48571dd7 100644 --- a/paddle/fluid/operators/l1_norm_op.cu +++ b/paddle/fluid/operators/l1_norm_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/l1_norm_op.h b/paddle/fluid/operators/l1_norm_op.h index 7ddf2ac6a9..7c6503bb21 100644 --- a/paddle/fluid/operators/l1_norm_op.h +++ b/paddle/fluid/operators/l1_norm_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/label_smooth_op.cc b/paddle/fluid/operators/label_smooth_op.cc index c018965bee..eef25f8a06 100644 --- a/paddle/fluid/operators/label_smooth_op.cc +++ b/paddle/fluid/operators/label_smooth_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/label_smooth_op.cu b/paddle/fluid/operators/label_smooth_op.cu index 4a40a4e9ec..ab259b48e3 100644 --- a/paddle/fluid/operators/label_smooth_op.cu +++ b/paddle/fluid/operators/label_smooth_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/label_smooth_op.h b/paddle/fluid/operators/label_smooth_op.h index 15752377f6..f56fd95e96 100644 --- a/paddle/fluid/operators/label_smooth_op.h +++ b/paddle/fluid/operators/label_smooth_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/layer_norm_op.cc b/paddle/fluid/operators/layer_norm_op.cc index 60e37ed01b..88b3b08af5 100644 --- a/paddle/fluid/operators/layer_norm_op.cc +++ b/paddle/fluid/operators/layer_norm_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/layer_norm_op.cu b/paddle/fluid/operators/layer_norm_op.cu index aa54fd5415..6840e1e08f 100644 --- a/paddle/fluid/operators/layer_norm_op.cu +++ b/paddle/fluid/operators/layer_norm_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/layer_norm_op.h b/paddle/fluid/operators/layer_norm_op.h index 60c0b07add..84f5a40aac 100644 --- a/paddle/fluid/operators/layer_norm_op.h +++ b/paddle/fluid/operators/layer_norm_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/linear_chain_crf_op.cc b/paddle/fluid/operators/linear_chain_crf_op.cc index 3e1dfa4948..ef568a578b 100644 --- a/paddle/fluid/operators/linear_chain_crf_op.cc +++ b/paddle/fluid/operators/linear_chain_crf_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/linear_chain_crf_op.cu b/paddle/fluid/operators/linear_chain_crf_op.cu index 6e04e76eeb..4f7738e8c3 100644 --- a/paddle/fluid/operators/linear_chain_crf_op.cu +++ b/paddle/fluid/operators/linear_chain_crf_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/linear_chain_crf_op.h b/paddle/fluid/operators/linear_chain_crf_op.h index 15b64c09bf..800a1303e1 100644 --- a/paddle/fluid/operators/linear_chain_crf_op.h +++ b/paddle/fluid/operators/linear_chain_crf_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index 8e88a7dcf1..6c0292ecb2 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/load_combine_op.cc b/paddle/fluid/operators/load_combine_op.cc index d043702eba..ba8fc4a683 100644 --- a/paddle/fluid/operators/load_combine_op.cc +++ b/paddle/fluid/operators/load_combine_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/load_op.cc b/paddle/fluid/operators/load_op.cc index 9393cccfc6..d72b7a7eb9 100644 --- a/paddle/fluid/operators/load_op.cc +++ b/paddle/fluid/operators/load_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/lod_array_length_op.cc b/paddle/fluid/operators/lod_array_length_op.cc index daa57c2045..e621240577 100644 --- a/paddle/fluid/operators/lod_array_length_op.cc +++ b/paddle/fluid/operators/lod_array_length_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/lod_rank_table_op.cc b/paddle/fluid/operators/lod_rank_table_op.cc index 3264766d6b..2d01ed6737 100644 --- a/paddle/fluid/operators/lod_rank_table_op.cc +++ b/paddle/fluid/operators/lod_rank_table_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/lod_reset_op.cc b/paddle/fluid/operators/lod_reset_op.cc index 55ae71c181..6a66297cb8 100644 --- a/paddle/fluid/operators/lod_reset_op.cc +++ b/paddle/fluid/operators/lod_reset_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/lod_reset_op.cu b/paddle/fluid/operators/lod_reset_op.cu index 8bfc8bd3bf..b0e87a851a 100644 --- a/paddle/fluid/operators/lod_reset_op.cu +++ b/paddle/fluid/operators/lod_reset_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/lod_reset_op.h b/paddle/fluid/operators/lod_reset_op.h index a10efee0bd..e612bc2d36 100644 --- a/paddle/fluid/operators/lod_reset_op.h +++ b/paddle/fluid/operators/lod_reset_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/lod_tensor_to_array_op.cc b/paddle/fluid/operators/lod_tensor_to_array_op.cc index d6e24dc976..be47fdfd04 100644 --- a/paddle/fluid/operators/lod_tensor_to_array_op.cc +++ b/paddle/fluid/operators/lod_tensor_to_array_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/log_loss_op.cc b/paddle/fluid/operators/log_loss_op.cc index 6c5cd29568..f44996d8ac 100644 --- a/paddle/fluid/operators/log_loss_op.cc +++ b/paddle/fluid/operators/log_loss_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/log_loss_op.cu b/paddle/fluid/operators/log_loss_op.cu index c164a6d040..e8bf7d8159 100644 --- a/paddle/fluid/operators/log_loss_op.cu +++ b/paddle/fluid/operators/log_loss_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/log_loss_op.h b/paddle/fluid/operators/log_loss_op.h index 67fac7cfe5..e62de17a98 100644 --- a/paddle/fluid/operators/log_loss_op.h +++ b/paddle/fluid/operators/log_loss_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/logical_op.cc b/paddle/fluid/operators/logical_op.cc index ff49895df1..6a7db31cf3 100644 --- a/paddle/fluid/operators/logical_op.cc +++ b/paddle/fluid/operators/logical_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/logical_op.cu b/paddle/fluid/operators/logical_op.cu index 2b17444061..7ffe4dfc26 100644 --- a/paddle/fluid/operators/logical_op.cu +++ b/paddle/fluid/operators/logical_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/logical_op.h b/paddle/fluid/operators/logical_op.h index f6d5866c2c..4a83e0fda6 100644 --- a/paddle/fluid/operators/logical_op.h +++ b/paddle/fluid/operators/logical_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/lookup_table_op.cc b/paddle/fluid/operators/lookup_table_op.cc index 2c555f1a3f..d338553f7c 100644 --- a/paddle/fluid/operators/lookup_table_op.cc +++ b/paddle/fluid/operators/lookup_table_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/lookup_table_op.cu b/paddle/fluid/operators/lookup_table_op.cu index 801adba5a4..923340f461 100644 --- a/paddle/fluid/operators/lookup_table_op.cu +++ b/paddle/fluid/operators/lookup_table_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/lookup_table_op.h b/paddle/fluid/operators/lookup_table_op.h index d264496882..d88b034e91 100644 --- a/paddle/fluid/operators/lookup_table_op.h +++ b/paddle/fluid/operators/lookup_table_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/lrn_op.cc b/paddle/fluid/operators/lrn_op.cc index c84507f231..b0c213d637 100644 --- a/paddle/fluid/operators/lrn_op.cc +++ b/paddle/fluid/operators/lrn_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/lrn_op.cu b/paddle/fluid/operators/lrn_op.cu index 03112bf3e0..64f3fea6be 100644 --- a/paddle/fluid/operators/lrn_op.cu +++ b/paddle/fluid/operators/lrn_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/lrn_op.h b/paddle/fluid/operators/lrn_op.h index b7b78b4591..95796f7eec 100644 --- a/paddle/fluid/operators/lrn_op.h +++ b/paddle/fluid/operators/lrn_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/lstm_op.cc b/paddle/fluid/operators/lstm_op.cc index d1f1b5f235..d75537741e 100644 --- a/paddle/fluid/operators/lstm_op.cc +++ b/paddle/fluid/operators/lstm_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/lstm_op.cu.cc b/paddle/fluid/operators/lstm_op.cu.cc index 679d02b1f9..c1cbfada41 100644 --- a/paddle/fluid/operators/lstm_op.cu.cc +++ b/paddle/fluid/operators/lstm_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/lstm_op.h b/paddle/fluid/operators/lstm_op.h index 1c48495533..11f9f223b5 100644 --- a/paddle/fluid/operators/lstm_op.h +++ b/paddle/fluid/operators/lstm_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/lstm_unit_op.cc b/paddle/fluid/operators/lstm_unit_op.cc index 3d33d47e0c..b3c9d7c34d 100644 --- a/paddle/fluid/operators/lstm_unit_op.cc +++ b/paddle/fluid/operators/lstm_unit_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/lstm_unit_op.cu b/paddle/fluid/operators/lstm_unit_op.cu index 12ebffca37..76245a1b5a 100644 --- a/paddle/fluid/operators/lstm_unit_op.cu +++ b/paddle/fluid/operators/lstm_unit_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/lstm_unit_op.h b/paddle/fluid/operators/lstm_unit_op.h index 9f2370fe69..4ead9c2293 100644 --- a/paddle/fluid/operators/lstm_unit_op.h +++ b/paddle/fluid/operators/lstm_unit_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/lstmp_op.cc b/paddle/fluid/operators/lstmp_op.cc index 2d30edf5c3..a881ef82ec 100644 --- a/paddle/fluid/operators/lstmp_op.cc +++ b/paddle/fluid/operators/lstmp_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/lstmp_op.cu b/paddle/fluid/operators/lstmp_op.cu index bcefb94c75..f601b897af 100644 --- a/paddle/fluid/operators/lstmp_op.cu +++ b/paddle/fluid/operators/lstmp_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/lstmp_op.h b/paddle/fluid/operators/lstmp_op.h index 22ef472186..dfa7f74d51 100644 --- a/paddle/fluid/operators/lstmp_op.h +++ b/paddle/fluid/operators/lstmp_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/margin_rank_loss_op.cc b/paddle/fluid/operators/margin_rank_loss_op.cc index fc31befb20..b146b50883 100644 --- a/paddle/fluid/operators/margin_rank_loss_op.cc +++ b/paddle/fluid/operators/margin_rank_loss_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/margin_rank_loss_op.cu b/paddle/fluid/operators/margin_rank_loss_op.cu index ca4593a48d..d7e77e9230 100644 --- a/paddle/fluid/operators/margin_rank_loss_op.cu +++ b/paddle/fluid/operators/margin_rank_loss_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/margin_rank_loss_op.h b/paddle/fluid/operators/margin_rank_loss_op.h index 934a5da0f8..c1bf445107 100644 --- a/paddle/fluid/operators/margin_rank_loss_op.h +++ b/paddle/fluid/operators/margin_rank_loss_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/context_project.cc b/paddle/fluid/operators/math/context_project.cc index b73d976d1b..537d0b4786 100644 --- a/paddle/fluid/operators/math/context_project.cc +++ b/paddle/fluid/operators/math/context_project.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/context_project.cu b/paddle/fluid/operators/math/context_project.cu index bbd36a6e8f..16205c0e14 100644 --- a/paddle/fluid/operators/math/context_project.cu +++ b/paddle/fluid/operators/math/context_project.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/context_project.h b/paddle/fluid/operators/math/context_project.h index 2fe593ec3a..83f6ae45fc 100644 --- a/paddle/fluid/operators/math/context_project.h +++ b/paddle/fluid/operators/math/context_project.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/cos_sim_functor.cc b/paddle/fluid/operators/math/cos_sim_functor.cc index 701a9c23c0..cbe1699912 100644 --- a/paddle/fluid/operators/math/cos_sim_functor.cc +++ b/paddle/fluid/operators/math/cos_sim_functor.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/cos_sim_functor.cu b/paddle/fluid/operators/math/cos_sim_functor.cu index 0323680870..55c1e72633 100644 --- a/paddle/fluid/operators/math/cos_sim_functor.cu +++ b/paddle/fluid/operators/math/cos_sim_functor.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/cos_sim_functor.h b/paddle/fluid/operators/math/cos_sim_functor.h index 445d94f975..30ea5e60e8 100644 --- a/paddle/fluid/operators/math/cos_sim_functor.h +++ b/paddle/fluid/operators/math/cos_sim_functor.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/cross_entropy.cc b/paddle/fluid/operators/math/cross_entropy.cc index 76abd03ff8..fc0fca5ad3 100644 --- a/paddle/fluid/operators/math/cross_entropy.cc +++ b/paddle/fluid/operators/math/cross_entropy.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/cross_entropy.cu b/paddle/fluid/operators/math/cross_entropy.cu index 39222c484c..f4935c2813 100644 --- a/paddle/fluid/operators/math/cross_entropy.cu +++ b/paddle/fluid/operators/math/cross_entropy.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/cross_entropy.h b/paddle/fluid/operators/math/cross_entropy.h index 2fe216a805..adc5b3fe47 100644 --- a/paddle/fluid/operators/math/cross_entropy.h +++ b/paddle/fluid/operators/math/cross_entropy.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/depthwise_conv.cu b/paddle/fluid/operators/math/depthwise_conv.cu index 7b75e59307..a5e6e4031b 100644 --- a/paddle/fluid/operators/math/depthwise_conv.cu +++ b/paddle/fluid/operators/math/depthwise_conv.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/depthwise_conv.h b/paddle/fluid/operators/math/depthwise_conv.h index c3081e7a0d..081bda891d 100644 --- a/paddle/fluid/operators/math/depthwise_conv.h +++ b/paddle/fluid/operators/math/depthwise_conv.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/detail/activation_functions.h b/paddle/fluid/operators/math/detail/activation_functions.h index 3af7ba790c..d205ebf210 100644 --- a/paddle/fluid/operators/math/detail/activation_functions.h +++ b/paddle/fluid/operators/math/detail/activation_functions.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/detail/avx_functions.cc b/paddle/fluid/operators/math/detail/avx_functions.cc index 838cd30e3d..b95109d3f7 100644 --- a/paddle/fluid/operators/math/detail/avx_functions.cc +++ b/paddle/fluid/operators/math/detail/avx_functions.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/detail/gru_cpu_kernel.h b/paddle/fluid/operators/math/detail/gru_cpu_kernel.h index 75c5c8eb29..1e5ff8ef46 100644 --- a/paddle/fluid/operators/math/detail/gru_cpu_kernel.h +++ b/paddle/fluid/operators/math/detail/gru_cpu_kernel.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/detail/gru_gpu_kernel.h b/paddle/fluid/operators/math/detail/gru_gpu_kernel.h index fbf69d4a85..6576525627 100644 --- a/paddle/fluid/operators/math/detail/gru_gpu_kernel.h +++ b/paddle/fluid/operators/math/detail/gru_gpu_kernel.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/detail/gru_kernel.h b/paddle/fluid/operators/math/detail/gru_kernel.h index 705787e2ff..991f2e758c 100644 --- a/paddle/fluid/operators/math/detail/gru_kernel.h +++ b/paddle/fluid/operators/math/detail/gru_kernel.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/detail/lstm_cpu_kernel.h b/paddle/fluid/operators/math/detail/lstm_cpu_kernel.h index bf26509ba1..6ad77830fd 100644 --- a/paddle/fluid/operators/math/detail/lstm_cpu_kernel.h +++ b/paddle/fluid/operators/math/detail/lstm_cpu_kernel.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/detail/lstm_gpu_kernel.h b/paddle/fluid/operators/math/detail/lstm_gpu_kernel.h index 7865d0c0ba..ee7b16da41 100644 --- a/paddle/fluid/operators/math/detail/lstm_gpu_kernel.h +++ b/paddle/fluid/operators/math/detail/lstm_gpu_kernel.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/detail/lstm_kernel.h b/paddle/fluid/operators/math/detail/lstm_kernel.h index 0679cc62ba..9080634f2b 100644 --- a/paddle/fluid/operators/math/detail/lstm_kernel.h +++ b/paddle/fluid/operators/math/detail/lstm_kernel.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/detection_util.h b/paddle/fluid/operators/math/detection_util.h index 13e5d406c1..c31764cfaf 100644 --- a/paddle/fluid/operators/math/detection_util.h +++ b/paddle/fluid/operators/math/detection_util.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/gru_compute.cc b/paddle/fluid/operators/math/gru_compute.cc index 1003180416..3f044b7751 100644 --- a/paddle/fluid/operators/math/gru_compute.cc +++ b/paddle/fluid/operators/math/gru_compute.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/paddle/fluid/operators/math/gru_compute.cu b/paddle/fluid/operators/math/gru_compute.cu index 0d5d5d7a74..27caf3383d 100644 --- a/paddle/fluid/operators/math/gru_compute.cu +++ b/paddle/fluid/operators/math/gru_compute.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/paddle/fluid/operators/math/gru_compute.h b/paddle/fluid/operators/math/gru_compute.h index 93e19cf557..c5816b16cd 100644 --- a/paddle/fluid/operators/math/gru_compute.h +++ b/paddle/fluid/operators/math/gru_compute.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/paddle/fluid/operators/math/im2col.cc b/paddle/fluid/operators/math/im2col.cc index c298b00bb4..123e10586f 100644 --- a/paddle/fluid/operators/math/im2col.cc +++ b/paddle/fluid/operators/math/im2col.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/im2col.cu b/paddle/fluid/operators/math/im2col.cu index c26343aacf..f41c78140f 100644 --- a/paddle/fluid/operators/math/im2col.cu +++ b/paddle/fluid/operators/math/im2col.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/im2col.h b/paddle/fluid/operators/math/im2col.h index 525c0f5dda..451ec9d534 100644 --- a/paddle/fluid/operators/math/im2col.h +++ b/paddle/fluid/operators/math/im2col.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/im2col_test.cc b/paddle/fluid/operators/math/im2col_test.cc index 59d6a84b89..3051925315 100644 --- a/paddle/fluid/operators/math/im2col_test.cc +++ b/paddle/fluid/operators/math/im2col_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/lstm_compute.cc b/paddle/fluid/operators/math/lstm_compute.cc index 09eb89ec58..b6882b4fd8 100644 --- a/paddle/fluid/operators/math/lstm_compute.cc +++ b/paddle/fluid/operators/math/lstm_compute.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/lstm_compute.cu b/paddle/fluid/operators/math/lstm_compute.cu index adedee28bd..1233000083 100644 --- a/paddle/fluid/operators/math/lstm_compute.cu +++ b/paddle/fluid/operators/math/lstm_compute.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/lstm_compute.h b/paddle/fluid/operators/math/lstm_compute.h index 8610e96cf1..ca2f78e6f3 100644 --- a/paddle/fluid/operators/math/lstm_compute.h +++ b/paddle/fluid/operators/math/lstm_compute.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/math_function.cc b/paddle/fluid/operators/math/math_function.cc index 2636dbddde..41eab3ade2 100644 --- a/paddle/fluid/operators/math/math_function.cc +++ b/paddle/fluid/operators/math/math_function.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/math_function.cu b/paddle/fluid/operators/math/math_function.cu index 5764da71c8..f8d0349ac5 100644 --- a/paddle/fluid/operators/math/math_function.cu +++ b/paddle/fluid/operators/math/math_function.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/math_function.h b/paddle/fluid/operators/math/math_function.h index 84916af1f8..47e2386d05 100644 --- a/paddle/fluid/operators/math/math_function.h +++ b/paddle/fluid/operators/math/math_function.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/math_function_impl.h b/paddle/fluid/operators/math/math_function_impl.h index a55ed6c58b..f9d4e45324 100644 --- a/paddle/fluid/operators/math/math_function_impl.h +++ b/paddle/fluid/operators/math/math_function_impl.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/math_function_test.cc b/paddle/fluid/operators/math/math_function_test.cc index 6cd8e8b35a..25a9d0111e 100644 --- a/paddle/fluid/operators/math/math_function_test.cc +++ b/paddle/fluid/operators/math/math_function_test.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/math_function_test.cu b/paddle/fluid/operators/math/math_function_test.cu index 2ef53a8209..f333c6c98e 100644 --- a/paddle/fluid/operators/math/math_function_test.cu +++ b/paddle/fluid/operators/math/math_function_test.cu @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/matmul.h b/paddle/fluid/operators/math/matmul.h index 50f79979d9..6e2d35cd0f 100644 --- a/paddle/fluid/operators/math/matmul.h +++ b/paddle/fluid/operators/math/matmul.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/maxouting.cc b/paddle/fluid/operators/math/maxouting.cc index 746328cd45..730f71e96b 100644 --- a/paddle/fluid/operators/math/maxouting.cc +++ b/paddle/fluid/operators/math/maxouting.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/maxouting.cu b/paddle/fluid/operators/math/maxouting.cu index 68e5dfc3c5..1e1a6a221c 100644 --- a/paddle/fluid/operators/math/maxouting.cu +++ b/paddle/fluid/operators/math/maxouting.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/maxouting.h b/paddle/fluid/operators/math/maxouting.h index 0e81790f0a..4166fb5494 100644 --- a/paddle/fluid/operators/math/maxouting.h +++ b/paddle/fluid/operators/math/maxouting.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/pooling.cc b/paddle/fluid/operators/math/pooling.cc index 9adb142f14..97a2e81c84 100644 --- a/paddle/fluid/operators/math/pooling.cc +++ b/paddle/fluid/operators/math/pooling.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/pooling.cu b/paddle/fluid/operators/math/pooling.cu index c65632de90..274263c69c 100644 --- a/paddle/fluid/operators/math/pooling.cu +++ b/paddle/fluid/operators/math/pooling.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/pooling.h b/paddle/fluid/operators/math/pooling.h index 1195038f6a..74cb42f0d0 100644 --- a/paddle/fluid/operators/math/pooling.h +++ b/paddle/fluid/operators/math/pooling.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/sampler.cc b/paddle/fluid/operators/math/sampler.cc index 4f1cbfe31a..3ec6538d7f 100644 --- a/paddle/fluid/operators/math/sampler.cc +++ b/paddle/fluid/operators/math/sampler.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/sampler.h b/paddle/fluid/operators/math/sampler.h index 8f82089e7b..9d6a6c28c4 100644 --- a/paddle/fluid/operators/math/sampler.h +++ b/paddle/fluid/operators/math/sampler.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/selected_rows_functor.cc b/paddle/fluid/operators/math/selected_rows_functor.cc index 01aa37ab35..5da3d15277 100644 --- a/paddle/fluid/operators/math/selected_rows_functor.cc +++ b/paddle/fluid/operators/math/selected_rows_functor.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/selected_rows_functor.cu b/paddle/fluid/operators/math/selected_rows_functor.cu index ee3b5d5205..5d78fd9d21 100644 --- a/paddle/fluid/operators/math/selected_rows_functor.cu +++ b/paddle/fluid/operators/math/selected_rows_functor.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/selected_rows_functor.h b/paddle/fluid/operators/math/selected_rows_functor.h index 510a9ed8be..18304f83f8 100644 --- a/paddle/fluid/operators/math/selected_rows_functor.h +++ b/paddle/fluid/operators/math/selected_rows_functor.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/selected_rows_functor_test.cc b/paddle/fluid/operators/math/selected_rows_functor_test.cc index db6b41cd52..679b6568ad 100644 --- a/paddle/fluid/operators/math/selected_rows_functor_test.cc +++ b/paddle/fluid/operators/math/selected_rows_functor_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/selected_rows_functor_test.cu b/paddle/fluid/operators/math/selected_rows_functor_test.cu index b3c4bc9244..cefe239bd2 100644 --- a/paddle/fluid/operators/math/selected_rows_functor_test.cu +++ b/paddle/fluid/operators/math/selected_rows_functor_test.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/sequence2batch.cc b/paddle/fluid/operators/math/sequence2batch.cc index 0485070fd9..72bf2ab170 100644 --- a/paddle/fluid/operators/math/sequence2batch.cc +++ b/paddle/fluid/operators/math/sequence2batch.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/sequence2batch.cu b/paddle/fluid/operators/math/sequence2batch.cu index 450be80ea2..3185f10d41 100644 --- a/paddle/fluid/operators/math/sequence2batch.cu +++ b/paddle/fluid/operators/math/sequence2batch.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/sequence2batch.h b/paddle/fluid/operators/math/sequence2batch.h index 00bd25ab61..e78aafd37d 100644 --- a/paddle/fluid/operators/math/sequence2batch.h +++ b/paddle/fluid/operators/math/sequence2batch.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/sequence_padding.cc b/paddle/fluid/operators/math/sequence_padding.cc index ad8cd82567..38bd3b9975 100644 --- a/paddle/fluid/operators/math/sequence_padding.cc +++ b/paddle/fluid/operators/math/sequence_padding.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/sequence_padding.cu b/paddle/fluid/operators/math/sequence_padding.cu index c1a3905778..9eb52f6fd9 100644 --- a/paddle/fluid/operators/math/sequence_padding.cu +++ b/paddle/fluid/operators/math/sequence_padding.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/sequence_padding.h b/paddle/fluid/operators/math/sequence_padding.h index 0d84f9dcb3..17f044b9d6 100644 --- a/paddle/fluid/operators/math/sequence_padding.h +++ b/paddle/fluid/operators/math/sequence_padding.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/sequence_padding_test.cc b/paddle/fluid/operators/math/sequence_padding_test.cc index 147cb37da2..e1177fb0d7 100644 --- a/paddle/fluid/operators/math/sequence_padding_test.cc +++ b/paddle/fluid/operators/math/sequence_padding_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/sequence_pooling.cc b/paddle/fluid/operators/math/sequence_pooling.cc index b3b87ec93e..f7a6f2bdf4 100644 --- a/paddle/fluid/operators/math/sequence_pooling.cc +++ b/paddle/fluid/operators/math/sequence_pooling.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/sequence_pooling.cu b/paddle/fluid/operators/math/sequence_pooling.cu index c4267e992a..d61407c020 100644 --- a/paddle/fluid/operators/math/sequence_pooling.cu +++ b/paddle/fluid/operators/math/sequence_pooling.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/sequence_pooling.h b/paddle/fluid/operators/math/sequence_pooling.h index 9ba9cad74b..ecb76884f6 100644 --- a/paddle/fluid/operators/math/sequence_pooling.h +++ b/paddle/fluid/operators/math/sequence_pooling.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/sequence_scale.cc b/paddle/fluid/operators/math/sequence_scale.cc index 427689b971..2c46d4183b 100644 --- a/paddle/fluid/operators/math/sequence_scale.cc +++ b/paddle/fluid/operators/math/sequence_scale.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/sequence_scale.cu b/paddle/fluid/operators/math/sequence_scale.cu index 7c081ed7f4..74085153c6 100644 --- a/paddle/fluid/operators/math/sequence_scale.cu +++ b/paddle/fluid/operators/math/sequence_scale.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/sequence_scale.h b/paddle/fluid/operators/math/sequence_scale.h index e8e07fd315..6cdcbe21cb 100644 --- a/paddle/fluid/operators/math/sequence_scale.h +++ b/paddle/fluid/operators/math/sequence_scale.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/softmax.cc b/paddle/fluid/operators/math/softmax.cc index eab31ec567..78c65af24a 100644 --- a/paddle/fluid/operators/math/softmax.cc +++ b/paddle/fluid/operators/math/softmax.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/softmax.cu b/paddle/fluid/operators/math/softmax.cu index 733d7eeee6..38e93fdf15 100644 --- a/paddle/fluid/operators/math/softmax.cu +++ b/paddle/fluid/operators/math/softmax.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/softmax.h b/paddle/fluid/operators/math/softmax.h index b7d67d5f12..14b2690c2a 100644 --- a/paddle/fluid/operators/math/softmax.h +++ b/paddle/fluid/operators/math/softmax.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/softmax_impl.h b/paddle/fluid/operators/math/softmax_impl.h index f7c61cb647..3e123f7bf5 100644 --- a/paddle/fluid/operators/math/softmax_impl.h +++ b/paddle/fluid/operators/math/softmax_impl.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/unpooling.cc b/paddle/fluid/operators/math/unpooling.cc index e02bc02e00..13f0845bb8 100644 --- a/paddle/fluid/operators/math/unpooling.cc +++ b/paddle/fluid/operators/math/unpooling.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/unpooling.cu b/paddle/fluid/operators/math/unpooling.cu index 2e74270fdf..367f343d51 100644 --- a/paddle/fluid/operators/math/unpooling.cu +++ b/paddle/fluid/operators/math/unpooling.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/unpooling.h b/paddle/fluid/operators/math/unpooling.h index f245ba7ba8..74ca39d114 100644 --- a/paddle/fluid/operators/math/unpooling.h +++ b/paddle/fluid/operators/math/unpooling.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/vol2col.cc b/paddle/fluid/operators/math/vol2col.cc index ded0bbc744..09e9f85cca 100644 --- a/paddle/fluid/operators/math/vol2col.cc +++ b/paddle/fluid/operators/math/vol2col.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/vol2col.cu b/paddle/fluid/operators/math/vol2col.cu index 35ef24c7f5..619730d394 100644 --- a/paddle/fluid/operators/math/vol2col.cu +++ b/paddle/fluid/operators/math/vol2col.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/vol2col.h b/paddle/fluid/operators/math/vol2col.h index 3ce38b2d11..dbc2ed7a69 100644 --- a/paddle/fluid/operators/math/vol2col.h +++ b/paddle/fluid/operators/math/vol2col.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/math/vol2col_test.cc b/paddle/fluid/operators/math/vol2col_test.cc index af0a900f80..751d3ef19a 100644 --- a/paddle/fluid/operators/math/vol2col_test.cc +++ b/paddle/fluid/operators/math/vol2col_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/matmul_op.cc b/paddle/fluid/operators/matmul_op.cc index 267b0057bf..8585592852 100644 --- a/paddle/fluid/operators/matmul_op.cc +++ b/paddle/fluid/operators/matmul_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/matmul_op.cu.cc b/paddle/fluid/operators/matmul_op.cu.cc index 988787f0fe..e021bbe645 100644 --- a/paddle/fluid/operators/matmul_op.cu.cc +++ b/paddle/fluid/operators/matmul_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/matmul_op.h b/paddle/fluid/operators/matmul_op.h index f4cae3c91c..1cd8fe55dc 100644 --- a/paddle/fluid/operators/matmul_op.h +++ b/paddle/fluid/operators/matmul_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/max_sequence_len_op.cc b/paddle/fluid/operators/max_sequence_len_op.cc index cef0dc307d..4cd7c89b48 100644 --- a/paddle/fluid/operators/max_sequence_len_op.cc +++ b/paddle/fluid/operators/max_sequence_len_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/maxout_op.cc b/paddle/fluid/operators/maxout_op.cc index 8ce12cd4c4..efaae7d5f2 100644 --- a/paddle/fluid/operators/maxout_op.cc +++ b/paddle/fluid/operators/maxout_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/maxout_op.cu.cc b/paddle/fluid/operators/maxout_op.cu.cc index f3f45c90cd..be1e81bb86 100644 --- a/paddle/fluid/operators/maxout_op.cu.cc +++ b/paddle/fluid/operators/maxout_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/maxout_op.h b/paddle/fluid/operators/maxout_op.h index e5de3e3760..5b9e003cb0 100644 --- a/paddle/fluid/operators/maxout_op.h +++ b/paddle/fluid/operators/maxout_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/mean_op.cc b/paddle/fluid/operators/mean_op.cc index 1043820345..a134796bfc 100644 --- a/paddle/fluid/operators/mean_op.cc +++ b/paddle/fluid/operators/mean_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/mean_op.cu b/paddle/fluid/operators/mean_op.cu index ccf2248760..91e0ab28ef 100644 --- a/paddle/fluid/operators/mean_op.cu +++ b/paddle/fluid/operators/mean_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/mean_op.h b/paddle/fluid/operators/mean_op.h index ae162287da..362e9f9ae8 100644 --- a/paddle/fluid/operators/mean_op.h +++ b/paddle/fluid/operators/mean_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/merge_lod_tensor_op.cc b/paddle/fluid/operators/merge_lod_tensor_op.cc index 88e67b6b86..42ebc8e471 100644 --- a/paddle/fluid/operators/merge_lod_tensor_op.cc +++ b/paddle/fluid/operators/merge_lod_tensor_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/mine_hard_examples_op.cc b/paddle/fluid/operators/mine_hard_examples_op.cc index 540cf86741..2128979fae 100644 --- a/paddle/fluid/operators/mine_hard_examples_op.cc +++ b/paddle/fluid/operators/mine_hard_examples_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/minus_op.cc b/paddle/fluid/operators/minus_op.cc index 8a35d668cc..7de9d94979 100644 --- a/paddle/fluid/operators/minus_op.cc +++ b/paddle/fluid/operators/minus_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/minus_op.cu b/paddle/fluid/operators/minus_op.cu index ce0b1fdc04..956d935da9 100644 --- a/paddle/fluid/operators/minus_op.cu +++ b/paddle/fluid/operators/minus_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/minus_op.h b/paddle/fluid/operators/minus_op.h index dc94cbbeca..7791b1456a 100644 --- a/paddle/fluid/operators/minus_op.h +++ b/paddle/fluid/operators/minus_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/modified_huber_loss_op.cc b/paddle/fluid/operators/modified_huber_loss_op.cc index f2d1653165..a8fbd48c4d 100644 --- a/paddle/fluid/operators/modified_huber_loss_op.cc +++ b/paddle/fluid/operators/modified_huber_loss_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/modified_huber_loss_op.cu b/paddle/fluid/operators/modified_huber_loss_op.cu index 69ac2b1ed5..71bfacb928 100644 --- a/paddle/fluid/operators/modified_huber_loss_op.cu +++ b/paddle/fluid/operators/modified_huber_loss_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/modified_huber_loss_op.h b/paddle/fluid/operators/modified_huber_loss_op.h index a470a45e13..d2b6d0c4ba 100644 --- a/paddle/fluid/operators/modified_huber_loss_op.h +++ b/paddle/fluid/operators/modified_huber_loss_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/momentum_op.cc b/paddle/fluid/operators/momentum_op.cc index a3950ac99d..6c70970e15 100644 --- a/paddle/fluid/operators/momentum_op.cc +++ b/paddle/fluid/operators/momentum_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/momentum_op.cu b/paddle/fluid/operators/momentum_op.cu index 28a14cd4b2..da4a6af298 100644 --- a/paddle/fluid/operators/momentum_op.cu +++ b/paddle/fluid/operators/momentum_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/momentum_op.h b/paddle/fluid/operators/momentum_op.h index fdab86b24e..04a1929b84 100644 --- a/paddle/fluid/operators/momentum_op.h +++ b/paddle/fluid/operators/momentum_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/mul_op.cc b/paddle/fluid/operators/mul_op.cc index c9375d8ea1..e7bed2c397 100644 --- a/paddle/fluid/operators/mul_op.cc +++ b/paddle/fluid/operators/mul_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/mul_op.cu.cc b/paddle/fluid/operators/mul_op.cu.cc index 6f605fd84f..0667530e94 100644 --- a/paddle/fluid/operators/mul_op.cu.cc +++ b/paddle/fluid/operators/mul_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/mul_op.h b/paddle/fluid/operators/mul_op.h index 745989f07f..38311cf872 100644 --- a/paddle/fluid/operators/mul_op.h +++ b/paddle/fluid/operators/mul_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/multiclass_nms_op.cc b/paddle/fluid/operators/multiclass_nms_op.cc index 168e6f85d6..2565e7e9ef 100644 --- a/paddle/fluid/operators/multiclass_nms_op.cc +++ b/paddle/fluid/operators/multiclass_nms_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/multiplex_op.cc b/paddle/fluid/operators/multiplex_op.cc index f89b00376b..b698c1bf8a 100644 --- a/paddle/fluid/operators/multiplex_op.cc +++ b/paddle/fluid/operators/multiplex_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/multiplex_op.cu b/paddle/fluid/operators/multiplex_op.cu index 3ef7ef1dfc..cb89eeecfb 100644 --- a/paddle/fluid/operators/multiplex_op.cu +++ b/paddle/fluid/operators/multiplex_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/multiplex_op.h b/paddle/fluid/operators/multiplex_op.h index 682117cb1b..87de000971 100644 --- a/paddle/fluid/operators/multiplex_op.h +++ b/paddle/fluid/operators/multiplex_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/nccl/nccl_gpu_common.cc b/paddle/fluid/operators/nccl/nccl_gpu_common.cc index 2a8ce932ec..fa6aafceb0 100644 --- a/paddle/fluid/operators/nccl/nccl_gpu_common.cc +++ b/paddle/fluid/operators/nccl/nccl_gpu_common.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/nccl/nccl_gpu_common.h b/paddle/fluid/operators/nccl/nccl_gpu_common.h index 6e78613239..be8c8a8f2c 100644 --- a/paddle/fluid/operators/nccl/nccl_gpu_common.h +++ b/paddle/fluid/operators/nccl/nccl_gpu_common.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/nccl_op.cc b/paddle/fluid/operators/nccl_op.cc index 703e8dd00f..7f1278f3a5 100644 --- a/paddle/fluid/operators/nccl_op.cc +++ b/paddle/fluid/operators/nccl_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/nccl_op.cu.cc b/paddle/fluid/operators/nccl_op.cu.cc index 333aed2903..fc83aa2ac2 100644 --- a/paddle/fluid/operators/nccl_op.cu.cc +++ b/paddle/fluid/operators/nccl_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/paddle/fluid/operators/nccl_op_test.cu.cc b/paddle/fluid/operators/nccl_op_test.cu.cc index 212ed2f9b6..24e30f54a1 100644 --- a/paddle/fluid/operators/nccl_op_test.cu.cc +++ b/paddle/fluid/operators/nccl_op_test.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/nce_op.cc b/paddle/fluid/operators/nce_op.cc index 0841313a10..99f38529bb 100644 --- a/paddle/fluid/operators/nce_op.cc +++ b/paddle/fluid/operators/nce_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/nce_op.h b/paddle/fluid/operators/nce_op.h index 624c2d9bbd..9420763847 100644 --- a/paddle/fluid/operators/nce_op.h +++ b/paddle/fluid/operators/nce_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/net_op.cc b/paddle/fluid/operators/net_op.cc index c0ca5873ad..0c2da74417 100644 --- a/paddle/fluid/operators/net_op.cc +++ b/paddle/fluid/operators/net_op.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/net_op.h b/paddle/fluid/operators/net_op.h index 479ba386a7..cbf8820cf4 100644 --- a/paddle/fluid/operators/net_op.h +++ b/paddle/fluid/operators/net_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/net_op_test.cc b/paddle/fluid/operators/net_op_test.cc index 265f15e82e..3b5f575485 100644 --- a/paddle/fluid/operators/net_op_test.cc +++ b/paddle/fluid/operators/net_op_test.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/norm_op.cc b/paddle/fluid/operators/norm_op.cc index ee85b1a90a..5345c5bdb0 100644 --- a/paddle/fluid/operators/norm_op.cc +++ b/paddle/fluid/operators/norm_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/norm_op.cu b/paddle/fluid/operators/norm_op.cu index 438bb3b86e..d1d9be5074 100644 --- a/paddle/fluid/operators/norm_op.cu +++ b/paddle/fluid/operators/norm_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/norm_op.h b/paddle/fluid/operators/norm_op.h index db74c9b02a..0ad29e8a03 100644 --- a/paddle/fluid/operators/norm_op.h +++ b/paddle/fluid/operators/norm_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/one_hot_op.cc b/paddle/fluid/operators/one_hot_op.cc index 2c3a60da72..21d3405b70 100644 --- a/paddle/fluid/operators/one_hot_op.cc +++ b/paddle/fluid/operators/one_hot_op.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/one_hot_op.cu b/paddle/fluid/operators/one_hot_op.cu index 6a8061edaa..87c285df4e 100644 --- a/paddle/fluid/operators/one_hot_op.cu +++ b/paddle/fluid/operators/one_hot_op.cu @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/one_hot_op.h b/paddle/fluid/operators/one_hot_op.h index ddac6edd0e..1409f8af62 100644 --- a/paddle/fluid/operators/one_hot_op.h +++ b/paddle/fluid/operators/one_hot_op.h @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/pad_op.cc b/paddle/fluid/operators/pad_op.cc index 4b021fde7c..d2a0106f80 100644 --- a/paddle/fluid/operators/pad_op.cc +++ b/paddle/fluid/operators/pad_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/pad_op.cu b/paddle/fluid/operators/pad_op.cu index 203c314403..9cddef9cf1 100644 --- a/paddle/fluid/operators/pad_op.cu +++ b/paddle/fluid/operators/pad_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/pad_op.h b/paddle/fluid/operators/pad_op.h index 244d8f9b6c..a36abe3789 100644 --- a/paddle/fluid/operators/pad_op.h +++ b/paddle/fluid/operators/pad_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/parallel_do_op.cc b/paddle/fluid/operators/parallel_do_op.cc index d791d11172..88c83ee213 100644 --- a/paddle/fluid/operators/parallel_do_op.cc +++ b/paddle/fluid/operators/parallel_do_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/pool_cudnn_op.cu.cc b/paddle/fluid/operators/pool_cudnn_op.cu.cc index 75984b7721..781d96981e 100644 --- a/paddle/fluid/operators/pool_cudnn_op.cu.cc +++ b/paddle/fluid/operators/pool_cudnn_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/pool_op.cc b/paddle/fluid/operators/pool_op.cc index 9dd33eefc5..a80b23b8ed 100644 --- a/paddle/fluid/operators/pool_op.cc +++ b/paddle/fluid/operators/pool_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/pool_op.cu.cc b/paddle/fluid/operators/pool_op.cu.cc index 14486c0740..37bc14e2cb 100644 --- a/paddle/fluid/operators/pool_op.cu.cc +++ b/paddle/fluid/operators/pool_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/pool_op.h b/paddle/fluid/operators/pool_op.h index 4cabd634d6..2fec50ef25 100644 --- a/paddle/fluid/operators/pool_op.h +++ b/paddle/fluid/operators/pool_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/pool_with_index_op.cc b/paddle/fluid/operators/pool_with_index_op.cc index ef6d5d867b..3a59365d17 100644 --- a/paddle/fluid/operators/pool_with_index_op.cc +++ b/paddle/fluid/operators/pool_with_index_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/pool_with_index_op.cu.cc b/paddle/fluid/operators/pool_with_index_op.cu.cc index 722a4d1e2a..5fc418b6fd 100644 --- a/paddle/fluid/operators/pool_with_index_op.cu.cc +++ b/paddle/fluid/operators/pool_with_index_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/pool_with_index_op.h b/paddle/fluid/operators/pool_with_index_op.h index da7ef9df73..83e7bd138a 100644 --- a/paddle/fluid/operators/pool_with_index_op.h +++ b/paddle/fluid/operators/pool_with_index_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/precision_recall_op.cc b/paddle/fluid/operators/precision_recall_op.cc index 30d594719c..c34b0d072b 100644 --- a/paddle/fluid/operators/precision_recall_op.cc +++ b/paddle/fluid/operators/precision_recall_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/precision_recall_op.h b/paddle/fluid/operators/precision_recall_op.h index 7dae86b76f..d6d4a5adc3 100644 --- a/paddle/fluid/operators/precision_recall_op.h +++ b/paddle/fluid/operators/precision_recall_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/prelu_op.cc b/paddle/fluid/operators/prelu_op.cc index 22b970d971..447b854544 100644 --- a/paddle/fluid/operators/prelu_op.cc +++ b/paddle/fluid/operators/prelu_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/prelu_op.cu b/paddle/fluid/operators/prelu_op.cu index 038b09a493..37d934a290 100644 --- a/paddle/fluid/operators/prelu_op.cu +++ b/paddle/fluid/operators/prelu_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/prelu_op.h b/paddle/fluid/operators/prelu_op.h index 85ad75d479..a6197d3548 100644 --- a/paddle/fluid/operators/prelu_op.h +++ b/paddle/fluid/operators/prelu_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/print_op.cc b/paddle/fluid/operators/print_op.cc index 4d12fdbb6b..a76ba796fe 100644 --- a/paddle/fluid/operators/print_op.cc +++ b/paddle/fluid/operators/print_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/prior_box_op.cc b/paddle/fluid/operators/prior_box_op.cc index 1385a6cdce..922b2bd237 100644 --- a/paddle/fluid/operators/prior_box_op.cc +++ b/paddle/fluid/operators/prior_box_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/prior_box_op.h b/paddle/fluid/operators/prior_box_op.h index e2c9514ed0..0113d2f09a 100644 --- a/paddle/fluid/operators/prior_box_op.h +++ b/paddle/fluid/operators/prior_box_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/proximal_adagrad_op.cc b/paddle/fluid/operators/proximal_adagrad_op.cc index d9e3894c57..38cd97c17b 100644 --- a/paddle/fluid/operators/proximal_adagrad_op.cc +++ b/paddle/fluid/operators/proximal_adagrad_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/proximal_adagrad_op.cu b/paddle/fluid/operators/proximal_adagrad_op.cu index 54c75b3abb..7e0226c62b 100644 --- a/paddle/fluid/operators/proximal_adagrad_op.cu +++ b/paddle/fluid/operators/proximal_adagrad_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); You may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/proximal_adagrad_op.h b/paddle/fluid/operators/proximal_adagrad_op.h index 70205a8d11..91416450a6 100644 --- a/paddle/fluid/operators/proximal_adagrad_op.h +++ b/paddle/fluid/operators/proximal_adagrad_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/proximal_gd_op.cc b/paddle/fluid/operators/proximal_gd_op.cc index de7c6843c8..efb4e1ac20 100644 --- a/paddle/fluid/operators/proximal_gd_op.cc +++ b/paddle/fluid/operators/proximal_gd_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/proximal_gd_op.cu b/paddle/fluid/operators/proximal_gd_op.cu index 97b672e872..32ee9ab74c 100644 --- a/paddle/fluid/operators/proximal_gd_op.cu +++ b/paddle/fluid/operators/proximal_gd_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); You may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/proximal_gd_op.h b/paddle/fluid/operators/proximal_gd_op.h index 8372380f25..d49badf16d 100644 --- a/paddle/fluid/operators/proximal_gd_op.h +++ b/paddle/fluid/operators/proximal_gd_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/rank_loss_op.cc b/paddle/fluid/operators/rank_loss_op.cc index 222ca73d2a..767eef5686 100644 --- a/paddle/fluid/operators/rank_loss_op.cc +++ b/paddle/fluid/operators/rank_loss_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/rank_loss_op.cu b/paddle/fluid/operators/rank_loss_op.cu index 1b182ced70..ed80527989 100644 --- a/paddle/fluid/operators/rank_loss_op.cu +++ b/paddle/fluid/operators/rank_loss_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/rank_loss_op.h b/paddle/fluid/operators/rank_loss_op.h index 08bb2c2821..28626c0e2e 100644 --- a/paddle/fluid/operators/rank_loss_op.h +++ b/paddle/fluid/operators/rank_loss_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/recurrent_op.cc b/paddle/fluid/operators/recurrent_op.cc index 33a744a5b7..8435d6bcf0 100644 --- a/paddle/fluid/operators/recurrent_op.cc +++ b/paddle/fluid/operators/recurrent_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/recv_op.cc b/paddle/fluid/operators/recv_op.cc index 17b57b5d45..083c1fae5e 100644 --- a/paddle/fluid/operators/recv_op.cc +++ b/paddle/fluid/operators/recv_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/reduce_op.cc b/paddle/fluid/operators/reduce_op.cc index f4d9d4cc07..69e8f8081e 100644 --- a/paddle/fluid/operators/reduce_op.cc +++ b/paddle/fluid/operators/reduce_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/reduce_op.cu b/paddle/fluid/operators/reduce_op.cu index 1ca107ebfe..ae29587f55 100644 --- a/paddle/fluid/operators/reduce_op.cu +++ b/paddle/fluid/operators/reduce_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/reduce_op.h b/paddle/fluid/operators/reduce_op.h index a153cf272b..ec23325e57 100644 --- a/paddle/fluid/operators/reduce_op.h +++ b/paddle/fluid/operators/reduce_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc b/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc index 79ba9e543b..b0df932f43 100644 --- a/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc +++ b/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/reshape_op.cc b/paddle/fluid/operators/reshape_op.cc index b4f80cc06a..a90ffb4ff3 100644 --- a/paddle/fluid/operators/reshape_op.cc +++ b/paddle/fluid/operators/reshape_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/reshape_op.cu b/paddle/fluid/operators/reshape_op.cu index f9ae6da29e..d5ceaf784c 100644 --- a/paddle/fluid/operators/reshape_op.cu +++ b/paddle/fluid/operators/reshape_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/reshape_op.h b/paddle/fluid/operators/reshape_op.h index a17ba7c619..c01100ef4d 100644 --- a/paddle/fluid/operators/reshape_op.h +++ b/paddle/fluid/operators/reshape_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/rmsprop_op.cc b/paddle/fluid/operators/rmsprop_op.cc index 06d3ccafef..a8855b3ccd 100644 --- a/paddle/fluid/operators/rmsprop_op.cc +++ b/paddle/fluid/operators/rmsprop_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/rmsprop_op.cu b/paddle/fluid/operators/rmsprop_op.cu index a909c94279..cdc4737695 100644 --- a/paddle/fluid/operators/rmsprop_op.cu +++ b/paddle/fluid/operators/rmsprop_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/rmsprop_op.h b/paddle/fluid/operators/rmsprop_op.h index 469c102a47..12836f43bd 100644 --- a/paddle/fluid/operators/rmsprop_op.h +++ b/paddle/fluid/operators/rmsprop_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/rnn_memory_helper_op.cc b/paddle/fluid/operators/rnn_memory_helper_op.cc index e9329a0e7e..8ab9f010a2 100644 --- a/paddle/fluid/operators/rnn_memory_helper_op.cc +++ b/paddle/fluid/operators/rnn_memory_helper_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/roi_pool_op.cc b/paddle/fluid/operators/roi_pool_op.cc index 09238f89a7..6d4861f042 100644 --- a/paddle/fluid/operators/roi_pool_op.cc +++ b/paddle/fluid/operators/roi_pool_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/roi_pool_op.cu b/paddle/fluid/operators/roi_pool_op.cu index 0e8fc9ec7a..1931629d13 100644 --- a/paddle/fluid/operators/roi_pool_op.cu +++ b/paddle/fluid/operators/roi_pool_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/roi_pool_op.h b/paddle/fluid/operators/roi_pool_op.h index 15f3b36fcd..f38c5a3c0c 100644 --- a/paddle/fluid/operators/roi_pool_op.h +++ b/paddle/fluid/operators/roi_pool_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/row_conv_op.cc b/paddle/fluid/operators/row_conv_op.cc index 92661ea971..d34beeb650 100644 --- a/paddle/fluid/operators/row_conv_op.cc +++ b/paddle/fluid/operators/row_conv_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/row_conv_op.cu b/paddle/fluid/operators/row_conv_op.cu index 832072edf8..67083455a7 100644 --- a/paddle/fluid/operators/row_conv_op.cu +++ b/paddle/fluid/operators/row_conv_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/row_conv_op.h b/paddle/fluid/operators/row_conv_op.h index 59164b5215..fb999568f8 100644 --- a/paddle/fluid/operators/row_conv_op.h +++ b/paddle/fluid/operators/row_conv_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/save_combine_op.cc b/paddle/fluid/operators/save_combine_op.cc index e3953e4b08..94703393bf 100644 --- a/paddle/fluid/operators/save_combine_op.cc +++ b/paddle/fluid/operators/save_combine_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/save_load_combine_op_test.cc b/paddle/fluid/operators/save_load_combine_op_test.cc index f8325bac6b..286f75df4c 100644 --- a/paddle/fluid/operators/save_load_combine_op_test.cc +++ b/paddle/fluid/operators/save_load_combine_op_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/save_load_op_test.cc b/paddle/fluid/operators/save_load_op_test.cc index da4573a8ed..a7ba1e0ae1 100644 --- a/paddle/fluid/operators/save_load_op_test.cc +++ b/paddle/fluid/operators/save_load_op_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/save_op.cc b/paddle/fluid/operators/save_op.cc index 85ba8e0118..4a715c4baa 100644 --- a/paddle/fluid/operators/save_op.cc +++ b/paddle/fluid/operators/save_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/scale_op.cc b/paddle/fluid/operators/scale_op.cc index 017fc2c00e..b16d06df8d 100644 --- a/paddle/fluid/operators/scale_op.cc +++ b/paddle/fluid/operators/scale_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/scale_op.cu b/paddle/fluid/operators/scale_op.cu index a9b46077aa..04c802da12 100644 --- a/paddle/fluid/operators/scale_op.cu +++ b/paddle/fluid/operators/scale_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/scale_op.h b/paddle/fluid/operators/scale_op.h index b1c2964ca6..c6a59b76ad 100644 --- a/paddle/fluid/operators/scale_op.h +++ b/paddle/fluid/operators/scale_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/scatter.cu.h b/paddle/fluid/operators/scatter.cu.h index 0f1b9426a7..ac7d69bfb5 100644 --- a/paddle/fluid/operators/scatter.cu.h +++ b/paddle/fluid/operators/scatter.cu.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/scatter.h b/paddle/fluid/operators/scatter.h index 70cae1286c..39af717615 100644 --- a/paddle/fluid/operators/scatter.h +++ b/paddle/fluid/operators/scatter.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/scatter_op.cc b/paddle/fluid/operators/scatter_op.cc index e35930af53..3fb8b56d26 100644 --- a/paddle/fluid/operators/scatter_op.cc +++ b/paddle/fluid/operators/scatter_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/scatter_op.cu b/paddle/fluid/operators/scatter_op.cu index f9eaae33a8..bdabb29fa6 100644 --- a/paddle/fluid/operators/scatter_op.cu +++ b/paddle/fluid/operators/scatter_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/scatter_op.h b/paddle/fluid/operators/scatter_op.h index 65d1054632..3c6e7ece32 100644 --- a/paddle/fluid/operators/scatter_op.h +++ b/paddle/fluid/operators/scatter_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/scatter_test.cc b/paddle/fluid/operators/scatter_test.cc index 8fb5ef96af..b67af3c371 100644 --- a/paddle/fluid/operators/scatter_test.cc +++ b/paddle/fluid/operators/scatter_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/send_op.cc b/paddle/fluid/operators/send_op.cc index 39b6c0e8c5..58850bf566 100644 --- a/paddle/fluid/operators/send_op.cc +++ b/paddle/fluid/operators/send_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/send_recv_op_test.cc b/paddle/fluid/operators/send_recv_op_test.cc index 37a3d246d7..008c012a32 100644 --- a/paddle/fluid/operators/send_recv_op_test.cc +++ b/paddle/fluid/operators/send_recv_op_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sequence_concat_op.cc b/paddle/fluid/operators/sequence_concat_op.cc index 4ddf800d85..126753edd0 100644 --- a/paddle/fluid/operators/sequence_concat_op.cc +++ b/paddle/fluid/operators/sequence_concat_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sequence_concat_op.cu.cc b/paddle/fluid/operators/sequence_concat_op.cu.cc index c5a280ef9e..43860b7c51 100644 --- a/paddle/fluid/operators/sequence_concat_op.cu.cc +++ b/paddle/fluid/operators/sequence_concat_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sequence_concat_op.h b/paddle/fluid/operators/sequence_concat_op.h index 9121196369..9f04c41991 100644 --- a/paddle/fluid/operators/sequence_concat_op.h +++ b/paddle/fluid/operators/sequence_concat_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sequence_conv_op.cc b/paddle/fluid/operators/sequence_conv_op.cc index af9938b180..ec1f3a5da8 100644 --- a/paddle/fluid/operators/sequence_conv_op.cc +++ b/paddle/fluid/operators/sequence_conv_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sequence_conv_op.cu.cc b/paddle/fluid/operators/sequence_conv_op.cu.cc index 36f9e8da95..de482b7f10 100644 --- a/paddle/fluid/operators/sequence_conv_op.cu.cc +++ b/paddle/fluid/operators/sequence_conv_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sequence_conv_op.h b/paddle/fluid/operators/sequence_conv_op.h index 1c81067fea..ee48339c52 100644 --- a/paddle/fluid/operators/sequence_conv_op.h +++ b/paddle/fluid/operators/sequence_conv_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sequence_erase_op.cc b/paddle/fluid/operators/sequence_erase_op.cc index 2e0adf8b19..32b9d7f7c1 100644 --- a/paddle/fluid/operators/sequence_erase_op.cc +++ b/paddle/fluid/operators/sequence_erase_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sequence_erase_op.cu b/paddle/fluid/operators/sequence_erase_op.cu index 43fc352fe7..fc9b91c351 100644 --- a/paddle/fluid/operators/sequence_erase_op.cu +++ b/paddle/fluid/operators/sequence_erase_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sequence_erase_op.h b/paddle/fluid/operators/sequence_erase_op.h index e151279c7f..b490c34f54 100644 --- a/paddle/fluid/operators/sequence_erase_op.h +++ b/paddle/fluid/operators/sequence_erase_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sequence_expand_op.cc b/paddle/fluid/operators/sequence_expand_op.cc index 28645e01b8..a5d84d629b 100644 --- a/paddle/fluid/operators/sequence_expand_op.cc +++ b/paddle/fluid/operators/sequence_expand_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sequence_expand_op.cu b/paddle/fluid/operators/sequence_expand_op.cu index 5ac76d83da..26622d23af 100644 --- a/paddle/fluid/operators/sequence_expand_op.cu +++ b/paddle/fluid/operators/sequence_expand_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sequence_expand_op.h b/paddle/fluid/operators/sequence_expand_op.h index 8010627ff6..76dde976db 100644 --- a/paddle/fluid/operators/sequence_expand_op.h +++ b/paddle/fluid/operators/sequence_expand_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sequence_pool_op.cc b/paddle/fluid/operators/sequence_pool_op.cc index 2cfb336b2e..3d4d54a3a3 100644 --- a/paddle/fluid/operators/sequence_pool_op.cc +++ b/paddle/fluid/operators/sequence_pool_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sequence_pool_op.cu b/paddle/fluid/operators/sequence_pool_op.cu index 364769c39b..2bf0697af3 100644 --- a/paddle/fluid/operators/sequence_pool_op.cu +++ b/paddle/fluid/operators/sequence_pool_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sequence_pool_op.h b/paddle/fluid/operators/sequence_pool_op.h index 7b67e6201e..8706ff14aa 100644 --- a/paddle/fluid/operators/sequence_pool_op.h +++ b/paddle/fluid/operators/sequence_pool_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sequence_reshape_op.cc b/paddle/fluid/operators/sequence_reshape_op.cc index c4e42d3eeb..a2999650b8 100644 --- a/paddle/fluid/operators/sequence_reshape_op.cc +++ b/paddle/fluid/operators/sequence_reshape_op.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sequence_reshape_op.cu b/paddle/fluid/operators/sequence_reshape_op.cu index 5ca3497396..232e031c0b 100644 --- a/paddle/fluid/operators/sequence_reshape_op.cu +++ b/paddle/fluid/operators/sequence_reshape_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sequence_reshape_op.h b/paddle/fluid/operators/sequence_reshape_op.h index 7a5d1261da..f0b5be0218 100644 --- a/paddle/fluid/operators/sequence_reshape_op.h +++ b/paddle/fluid/operators/sequence_reshape_op.h @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sequence_slice_op.cc b/paddle/fluid/operators/sequence_slice_op.cc index 87b8eff646..d09e5bca56 100644 --- a/paddle/fluid/operators/sequence_slice_op.cc +++ b/paddle/fluid/operators/sequence_slice_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sequence_slice_op.cu b/paddle/fluid/operators/sequence_slice_op.cu index 041fabdf9a..059e802df0 100755 --- a/paddle/fluid/operators/sequence_slice_op.cu +++ b/paddle/fluid/operators/sequence_slice_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sequence_slice_op.h b/paddle/fluid/operators/sequence_slice_op.h index 65c36a32aa..4f6d70483e 100644 --- a/paddle/fluid/operators/sequence_slice_op.h +++ b/paddle/fluid/operators/sequence_slice_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sequence_softmax_op.cc b/paddle/fluid/operators/sequence_softmax_op.cc index f966b71620..7e685eb3dc 100644 --- a/paddle/fluid/operators/sequence_softmax_op.cc +++ b/paddle/fluid/operators/sequence_softmax_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sequence_softmax_op.cu.cc b/paddle/fluid/operators/sequence_softmax_op.cu.cc index c42dfd7540..295c68c5b9 100644 --- a/paddle/fluid/operators/sequence_softmax_op.cu.cc +++ b/paddle/fluid/operators/sequence_softmax_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sequence_softmax_op.h b/paddle/fluid/operators/sequence_softmax_op.h index e6c21c67b3..cb93a02b83 100644 --- a/paddle/fluid/operators/sequence_softmax_op.h +++ b/paddle/fluid/operators/sequence_softmax_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sgd_op.cc b/paddle/fluid/operators/sgd_op.cc index f1e23a62f4..7cc73de878 100644 --- a/paddle/fluid/operators/sgd_op.cc +++ b/paddle/fluid/operators/sgd_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sgd_op.cu b/paddle/fluid/operators/sgd_op.cu index 09374e2049..9d211541c0 100644 --- a/paddle/fluid/operators/sgd_op.cu +++ b/paddle/fluid/operators/sgd_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sgd_op.h b/paddle/fluid/operators/sgd_op.h index f1eaaecdb1..2fec84815a 100644 --- a/paddle/fluid/operators/sgd_op.h +++ b/paddle/fluid/operators/sgd_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/shrink_rnn_memory_op.cc b/paddle/fluid/operators/shrink_rnn_memory_op.cc index 7fe0526381..183982f90f 100644 --- a/paddle/fluid/operators/shrink_rnn_memory_op.cc +++ b/paddle/fluid/operators/shrink_rnn_memory_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cc b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cc index 3188415a2b..7b93f19bb2 100644 --- a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cc +++ b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cu b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cu index daa9d3e4fa..9aadac1a41 100644 --- a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cu +++ b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.h b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.h index 977849f762..faef72866e 100644 --- a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.h +++ b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sign_op.cc b/paddle/fluid/operators/sign_op.cc index 54b962538b..8f8b7abd03 100644 --- a/paddle/fluid/operators/sign_op.cc +++ b/paddle/fluid/operators/sign_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sign_op.cu b/paddle/fluid/operators/sign_op.cu index 93cdb311eb..e0d7a87e64 100644 --- a/paddle/fluid/operators/sign_op.cu +++ b/paddle/fluid/operators/sign_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sign_op.h b/paddle/fluid/operators/sign_op.h index 1c2ebebee4..b99934daee 100644 --- a/paddle/fluid/operators/sign_op.h +++ b/paddle/fluid/operators/sign_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/smooth_l1_loss_op.cc b/paddle/fluid/operators/smooth_l1_loss_op.cc index e6eede23ee..658eb01952 100644 --- a/paddle/fluid/operators/smooth_l1_loss_op.cc +++ b/paddle/fluid/operators/smooth_l1_loss_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/smooth_l1_loss_op.cu b/paddle/fluid/operators/smooth_l1_loss_op.cu index 94c0d6cd29..dfbb5c9058 100644 --- a/paddle/fluid/operators/smooth_l1_loss_op.cu +++ b/paddle/fluid/operators/smooth_l1_loss_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/smooth_l1_loss_op.h b/paddle/fluid/operators/smooth_l1_loss_op.h index 325ad824e1..efe3afba18 100644 --- a/paddle/fluid/operators/smooth_l1_loss_op.h +++ b/paddle/fluid/operators/smooth_l1_loss_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/softmax_op.cc b/paddle/fluid/operators/softmax_op.cc index 1d9462d08b..09275ef290 100644 --- a/paddle/fluid/operators/softmax_op.cc +++ b/paddle/fluid/operators/softmax_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/softmax_op.cu.cc b/paddle/fluid/operators/softmax_op.cu.cc index c53d8a2bc8..dbd13fd38a 100644 --- a/paddle/fluid/operators/softmax_op.cu.cc +++ b/paddle/fluid/operators/softmax_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/softmax_op.h b/paddle/fluid/operators/softmax_op.h index 9287f02310..600da45a0b 100644 --- a/paddle/fluid/operators/softmax_op.h +++ b/paddle/fluid/operators/softmax_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.cc b/paddle/fluid/operators/softmax_with_cross_entropy_op.cc index 79d56cb97d..857e573357 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.cu b/paddle/fluid/operators/softmax_with_cross_entropy_op.cu index 410d9e8887..39b246a5be 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.cu +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.h b/paddle/fluid/operators/softmax_with_cross_entropy_op.h index 0927efd42c..dd6f6aca5a 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.h +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/split_lod_tensor_op.cc b/paddle/fluid/operators/split_lod_tensor_op.cc index f9600d99a3..1c5d647600 100644 --- a/paddle/fluid/operators/split_lod_tensor_op.cc +++ b/paddle/fluid/operators/split_lod_tensor_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/split_op.cc b/paddle/fluid/operators/split_op.cc index f8bc22fe1d..dffac772f1 100644 --- a/paddle/fluid/operators/split_op.cc +++ b/paddle/fluid/operators/split_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/split_op.cu.cc b/paddle/fluid/operators/split_op.cu.cc index 279691c759..efa378af85 100644 --- a/paddle/fluid/operators/split_op.cu.cc +++ b/paddle/fluid/operators/split_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/split_op.h b/paddle/fluid/operators/split_op.h index 54420e1bf6..ae8562c0c5 100644 --- a/paddle/fluid/operators/split_op.h +++ b/paddle/fluid/operators/split_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/split_selected_rows_op.cc b/paddle/fluid/operators/split_selected_rows_op.cc index c30280f654..b0e21e01ec 100644 --- a/paddle/fluid/operators/split_selected_rows_op.cc +++ b/paddle/fluid/operators/split_selected_rows_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/split_selected_rows_op.cu b/paddle/fluid/operators/split_selected_rows_op.cu index 0bbf1ecfae..7250917036 100644 --- a/paddle/fluid/operators/split_selected_rows_op.cu +++ b/paddle/fluid/operators/split_selected_rows_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/split_selected_rows_op.h b/paddle/fluid/operators/split_selected_rows_op.h index af44b09b70..23baf8e72e 100644 --- a/paddle/fluid/operators/split_selected_rows_op.h +++ b/paddle/fluid/operators/split_selected_rows_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/spp_op.cc b/paddle/fluid/operators/spp_op.cc index e6755b1200..f1c4415f27 100644 --- a/paddle/fluid/operators/spp_op.cc +++ b/paddle/fluid/operators/spp_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/spp_op.cu.cc b/paddle/fluid/operators/spp_op.cu.cc index cad2ca5ef8..7fe63d17c0 100644 --- a/paddle/fluid/operators/spp_op.cu.cc +++ b/paddle/fluid/operators/spp_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/spp_op.h b/paddle/fluid/operators/spp_op.h index 1da1f80580..3d2f226325 100644 --- a/paddle/fluid/operators/spp_op.h +++ b/paddle/fluid/operators/spp_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/squared_l2_distance_op.cc b/paddle/fluid/operators/squared_l2_distance_op.cc index c1d0c2c7f3..1c5e87040a 100644 --- a/paddle/fluid/operators/squared_l2_distance_op.cc +++ b/paddle/fluid/operators/squared_l2_distance_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/squared_l2_distance_op.cu b/paddle/fluid/operators/squared_l2_distance_op.cu index 959e7afac9..3e80ae8dd2 100644 --- a/paddle/fluid/operators/squared_l2_distance_op.cu +++ b/paddle/fluid/operators/squared_l2_distance_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/squared_l2_distance_op.h b/paddle/fluid/operators/squared_l2_distance_op.h index aab241247e..e0133d33e6 100644 --- a/paddle/fluid/operators/squared_l2_distance_op.h +++ b/paddle/fluid/operators/squared_l2_distance_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/squared_l2_norm_op.cc b/paddle/fluid/operators/squared_l2_norm_op.cc index a43cc22994..b64df2a218 100644 --- a/paddle/fluid/operators/squared_l2_norm_op.cc +++ b/paddle/fluid/operators/squared_l2_norm_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/squared_l2_norm_op.cu b/paddle/fluid/operators/squared_l2_norm_op.cu index 52f4ab79b2..87830413da 100644 --- a/paddle/fluid/operators/squared_l2_norm_op.cu +++ b/paddle/fluid/operators/squared_l2_norm_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/squared_l2_norm_op.h b/paddle/fluid/operators/squared_l2_norm_op.h index 56524636b8..b32db4569e 100644 --- a/paddle/fluid/operators/squared_l2_norm_op.h +++ b/paddle/fluid/operators/squared_l2_norm_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/strided_memcpy.h b/paddle/fluid/operators/strided_memcpy.h index 4c7b90693a..22c1db82e9 100644 --- a/paddle/fluid/operators/strided_memcpy.h +++ b/paddle/fluid/operators/strided_memcpy.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/strided_memcpy_test.cc b/paddle/fluid/operators/strided_memcpy_test.cc index a369941a99..a6ca82d16f 100644 --- a/paddle/fluid/operators/strided_memcpy_test.cc +++ b/paddle/fluid/operators/strided_memcpy_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/sum_op.cc b/paddle/fluid/operators/sum_op.cc index 96f851720a..bfc5709c4b 100644 --- a/paddle/fluid/operators/sum_op.cc +++ b/paddle/fluid/operators/sum_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/paddle/fluid/operators/sum_op.cu b/paddle/fluid/operators/sum_op.cu index 8d8f90d751..89bcd1bbc8 100644 --- a/paddle/fluid/operators/sum_op.cu +++ b/paddle/fluid/operators/sum_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/paddle/fluid/operators/sum_op.h b/paddle/fluid/operators/sum_op.h index 08218b6836..c9f22237d9 100644 --- a/paddle/fluid/operators/sum_op.h +++ b/paddle/fluid/operators/sum_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/paddle/fluid/operators/target_assign_op.cc b/paddle/fluid/operators/target_assign_op.cc index bafb830df9..a894b12fa3 100644 --- a/paddle/fluid/operators/target_assign_op.cc +++ b/paddle/fluid/operators/target_assign_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/target_assign_op.cu b/paddle/fluid/operators/target_assign_op.cu index fa02b8aac9..24664f99b2 100644 --- a/paddle/fluid/operators/target_assign_op.cu +++ b/paddle/fluid/operators/target_assign_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/target_assign_op.h b/paddle/fluid/operators/target_assign_op.h index a1b2fe6f35..3d52973741 100644 --- a/paddle/fluid/operators/target_assign_op.h +++ b/paddle/fluid/operators/target_assign_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/tensor_array_read_write_op.cc b/paddle/fluid/operators/tensor_array_read_write_op.cc index 704ee964c9..278b348117 100644 --- a/paddle/fluid/operators/tensor_array_read_write_op.cc +++ b/paddle/fluid/operators/tensor_array_read_write_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/top_k_op.cc b/paddle/fluid/operators/top_k_op.cc index c81ea860d0..2e4e8caed5 100644 --- a/paddle/fluid/operators/top_k_op.cc +++ b/paddle/fluid/operators/top_k_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/top_k_op.cu b/paddle/fluid/operators/top_k_op.cu index 5390cb5063..bfd26c2f22 100644 --- a/paddle/fluid/operators/top_k_op.cu +++ b/paddle/fluid/operators/top_k_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/top_k_op.h b/paddle/fluid/operators/top_k_op.h index e32b351500..42828b7e65 100644 --- a/paddle/fluid/operators/top_k_op.h +++ b/paddle/fluid/operators/top_k_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/transpose_op.cc b/paddle/fluid/operators/transpose_op.cc index a3d8acffc2..87b1f530e0 100644 --- a/paddle/fluid/operators/transpose_op.cc +++ b/paddle/fluid/operators/transpose_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/transpose_op.cu.cc b/paddle/fluid/operators/transpose_op.cu.cc index f8667ab369..bcd1fb6313 100644 --- a/paddle/fluid/operators/transpose_op.cu.cc +++ b/paddle/fluid/operators/transpose_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/transpose_op.h b/paddle/fluid/operators/transpose_op.h index 1fb419474a..90f16499a6 100644 --- a/paddle/fluid/operators/transpose_op.h +++ b/paddle/fluid/operators/transpose_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/uniform_random_op.cc b/paddle/fluid/operators/uniform_random_op.cc index b6fea1d448..6c0167deab 100644 --- a/paddle/fluid/operators/uniform_random_op.cc +++ b/paddle/fluid/operators/uniform_random_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/uniform_random_op.cu b/paddle/fluid/operators/uniform_random_op.cu index 9afca68e59..877d81d5c4 100644 --- a/paddle/fluid/operators/uniform_random_op.cu +++ b/paddle/fluid/operators/uniform_random_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/unpool_op.cc b/paddle/fluid/operators/unpool_op.cc index 2e0b271fed..d3bd7fda09 100644 --- a/paddle/fluid/operators/unpool_op.cc +++ b/paddle/fluid/operators/unpool_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/unpool_op.cu.cc b/paddle/fluid/operators/unpool_op.cu.cc index 15d81eb296..7c59a0feaa 100644 --- a/paddle/fluid/operators/unpool_op.cu.cc +++ b/paddle/fluid/operators/unpool_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/unpool_op.h b/paddle/fluid/operators/unpool_op.h index ceed550739..a442104575 100644 --- a/paddle/fluid/operators/unpool_op.h +++ b/paddle/fluid/operators/unpool_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/warpctc_op.cc b/paddle/fluid/operators/warpctc_op.cc index 1c05fed0b4..940bf4fe7b 100644 --- a/paddle/fluid/operators/warpctc_op.cc +++ b/paddle/fluid/operators/warpctc_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/warpctc_op.cu.cc b/paddle/fluid/operators/warpctc_op.cu.cc index 9ee7f970a9..6f8559f542 100644 --- a/paddle/fluid/operators/warpctc_op.cu.cc +++ b/paddle/fluid/operators/warpctc_op.cu.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/warpctc_op.h b/paddle/fluid/operators/warpctc_op.h index a1de71627e..aefb58bdcd 100644 --- a/paddle/fluid/operators/warpctc_op.h +++ b/paddle/fluid/operators/warpctc_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/operators/while_op.cc b/paddle/fluid/operators/while_op.cc index a7a05cc5f7..94a11eaf78 100644 --- a/paddle/fluid/operators/while_op.cc +++ b/paddle/fluid/operators/while_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/assert.h b/paddle/fluid/platform/assert.h index 1f5a8f6a19..123d3598f4 100644 --- a/paddle/fluid/platform/assert.h +++ b/paddle/fluid/platform/assert.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/call_once.h b/paddle/fluid/platform/call_once.h index 44a4d38f67..fa34972c38 100644 --- a/paddle/fluid/platform/call_once.h +++ b/paddle/fluid/platform/call_once.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/cpu_info.cc b/paddle/fluid/platform/cpu_info.cc index 47473aead0..8db08edba8 100644 --- a/paddle/fluid/platform/cpu_info.cc +++ b/paddle/fluid/platform/cpu_info.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/cpu_info.h b/paddle/fluid/platform/cpu_info.h index 8df7c7b4bc..a930151bd1 100644 --- a/paddle/fluid/platform/cpu_info.h +++ b/paddle/fluid/platform/cpu_info.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/cpu_info_test.cc b/paddle/fluid/platform/cpu_info_test.cc index 046758c594..78332f90cd 100644 --- a/paddle/fluid/platform/cpu_info_test.cc +++ b/paddle/fluid/platform/cpu_info_test.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/cuda_helper.h b/paddle/fluid/platform/cuda_helper.h index 376bb0e688..881d611d4a 100644 --- a/paddle/fluid/platform/cuda_helper.h +++ b/paddle/fluid/platform/cuda_helper.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/cuda_profiler.h b/paddle/fluid/platform/cuda_profiler.h index 67d5f626d4..ebd6aebd76 100644 --- a/paddle/fluid/platform/cuda_profiler.h +++ b/paddle/fluid/platform/cuda_profiler.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/cudnn_helper.h b/paddle/fluid/platform/cudnn_helper.h index f2daa4f4fc..48c967de11 100644 --- a/paddle/fluid/platform/cudnn_helper.h +++ b/paddle/fluid/platform/cudnn_helper.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/cudnn_helper_test.cc b/paddle/fluid/platform/cudnn_helper_test.cc index cd0bd3fe3e..517df68634 100644 --- a/paddle/fluid/platform/cudnn_helper_test.cc +++ b/paddle/fluid/platform/cudnn_helper_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/details/device_ptr_cast.h b/paddle/fluid/platform/details/device_ptr_cast.h index 4015491fcd..1c502a19c0 100644 --- a/paddle/fluid/platform/details/device_ptr_cast.h +++ b/paddle/fluid/platform/details/device_ptr_cast.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/device_context.cc b/paddle/fluid/platform/device_context.cc index c4da846bb1..7da6e04d0a 100644 --- a/paddle/fluid/platform/device_context.cc +++ b/paddle/fluid/platform/device_context.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/paddle/fluid/platform/device_context.h b/paddle/fluid/platform/device_context.h index 10b581f41a..a294ba5101 100644 --- a/paddle/fluid/platform/device_context.h +++ b/paddle/fluid/platform/device_context.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/paddle/fluid/platform/device_context_test.cu b/paddle/fluid/platform/device_context_test.cu index f4dae6e90a..9d8d07362c 100644 --- a/paddle/fluid/platform/device_context_test.cu +++ b/paddle/fluid/platform/device_context_test.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/dynload/cublas.cc b/paddle/fluid/platform/dynload/cublas.cc index c599712554..e90e3105f0 100644 --- a/paddle/fluid/platform/dynload/cublas.cc +++ b/paddle/fluid/platform/dynload/cublas.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/dynload/cublas.h b/paddle/fluid/platform/dynload/cublas.h index 05f69e5065..580ed9bb57 100644 --- a/paddle/fluid/platform/dynload/cublas.h +++ b/paddle/fluid/platform/dynload/cublas.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/dynload/cudnn.cc b/paddle/fluid/platform/dynload/cudnn.cc index 0b1c4c4f96..c65b060ab4 100644 --- a/paddle/fluid/platform/dynload/cudnn.cc +++ b/paddle/fluid/platform/dynload/cudnn.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/dynload/cudnn.h b/paddle/fluid/platform/dynload/cudnn.h index 00dfbc8387..81acc445bd 100644 --- a/paddle/fluid/platform/dynload/cudnn.h +++ b/paddle/fluid/platform/dynload/cudnn.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/dynload/curand.cc b/paddle/fluid/platform/dynload/curand.cc index eac690b145..ce83ebc84f 100644 --- a/paddle/fluid/platform/dynload/curand.cc +++ b/paddle/fluid/platform/dynload/curand.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/dynload/curand.h b/paddle/fluid/platform/dynload/curand.h index ce3115b3ce..1b3ff962d6 100644 --- a/paddle/fluid/platform/dynload/curand.h +++ b/paddle/fluid/platform/dynload/curand.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/dynload/dynamic_loader.cc b/paddle/fluid/platform/dynload/dynamic_loader.cc index eb00f93b7c..db1eb41f28 100644 --- a/paddle/fluid/platform/dynload/dynamic_loader.cc +++ b/paddle/fluid/platform/dynload/dynamic_loader.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/dynload/dynamic_loader.h b/paddle/fluid/platform/dynload/dynamic_loader.h index 7b0c8c16d7..4ffc335332 100644 --- a/paddle/fluid/platform/dynload/dynamic_loader.h +++ b/paddle/fluid/platform/dynload/dynamic_loader.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/dynload/nccl.cc b/paddle/fluid/platform/dynload/nccl.cc index 1dc3e96f04..3edc70c46d 100644 --- a/paddle/fluid/platform/dynload/nccl.cc +++ b/paddle/fluid/platform/dynload/nccl.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/dynload/nccl.h b/paddle/fluid/platform/dynload/nccl.h index 349a4d0ba3..dc78bcb44d 100644 --- a/paddle/fluid/platform/dynload/nccl.h +++ b/paddle/fluid/platform/dynload/nccl.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/dynload/warpctc.cc b/paddle/fluid/platform/dynload/warpctc.cc index 84de2cae94..4a15004895 100644 --- a/paddle/fluid/platform/dynload/warpctc.cc +++ b/paddle/fluid/platform/dynload/warpctc.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/dynload/warpctc.h b/paddle/fluid/platform/dynload/warpctc.h index f1955818de..f5ded0eb6b 100644 --- a/paddle/fluid/platform/dynload/warpctc.h +++ b/paddle/fluid/platform/dynload/warpctc.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/enforce.cc b/paddle/fluid/platform/enforce.cc index 55cd80943c..6d0c656781 100644 --- a/paddle/fluid/platform/enforce.cc +++ b/paddle/fluid/platform/enforce.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/enforce.h b/paddle/fluid/platform/enforce.h index 86e1792801..d303fd6d63 100644 --- a/paddle/fluid/platform/enforce.h +++ b/paddle/fluid/platform/enforce.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/enforce_test.cc b/paddle/fluid/platform/enforce_test.cc index baa34a5c7b..bb9a3543ff 100644 --- a/paddle/fluid/platform/enforce_test.cc +++ b/paddle/fluid/platform/enforce_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/paddle/fluid/platform/for_range.h b/paddle/fluid/platform/for_range.h index 0e695328c3..c153e80fe4 100644 --- a/paddle/fluid/platform/for_range.h +++ b/paddle/fluid/platform/for_range.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/gpu_info.cc b/paddle/fluid/platform/gpu_info.cc index 1797f59a9c..05e1eae853 100644 --- a/paddle/fluid/platform/gpu_info.cc +++ b/paddle/fluid/platform/gpu_info.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/gpu_info.h b/paddle/fluid/platform/gpu_info.h index d05131fa41..3d4883d807 100644 --- a/paddle/fluid/platform/gpu_info.h +++ b/paddle/fluid/platform/gpu_info.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/hostdevice.h b/paddle/fluid/platform/hostdevice.h index fa4659ed29..c0dc92a521 100644 --- a/paddle/fluid/platform/hostdevice.h +++ b/paddle/fluid/platform/hostdevice.h @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/macros.h b/paddle/fluid/platform/macros.h index feae7bdd77..02a2f53b49 100644 --- a/paddle/fluid/platform/macros.h +++ b/paddle/fluid/platform/macros.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/mkldnn_helper.h b/paddle/fluid/platform/mkldnn_helper.h index cd52a8b4c4..6d71f352c6 100644 --- a/paddle/fluid/platform/mkldnn_helper.h +++ b/paddle/fluid/platform/mkldnn_helper.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/nccl_test.cu b/paddle/fluid/platform/nccl_test.cu index 75b95aff1a..7123035363 100644 --- a/paddle/fluid/platform/nccl_test.cu +++ b/paddle/fluid/platform/nccl_test.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/place.cc b/paddle/fluid/platform/place.cc index e99b75d761..de8f958eb0 100644 --- a/paddle/fluid/platform/place.cc +++ b/paddle/fluid/platform/place.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/place.h b/paddle/fluid/platform/place.h index 2977a41036..501bddfc6e 100644 --- a/paddle/fluid/platform/place.h +++ b/paddle/fluid/platform/place.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/place_test.cc b/paddle/fluid/platform/place_test.cc index f248902d91..6a919c5625 100644 --- a/paddle/fluid/platform/place_test.cc +++ b/paddle/fluid/platform/place_test.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/profiler.cc b/paddle/fluid/platform/profiler.cc index 28d2675f79..4804df7966 100644 --- a/paddle/fluid/platform/profiler.cc +++ b/paddle/fluid/platform/profiler.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/profiler.h b/paddle/fluid/platform/profiler.h index 0bc5e666cb..a3d22df700 100644 --- a/paddle/fluid/platform/profiler.h +++ b/paddle/fluid/platform/profiler.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/profiler_test.cc b/paddle/fluid/platform/profiler_test.cc index d2525c38b6..dae4d2206e 100644 --- a/paddle/fluid/platform/profiler_test.cc +++ b/paddle/fluid/platform/profiler_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/transform.h b/paddle/fluid/platform/transform.h index 879daed191..917c48b47f 100644 --- a/paddle/fluid/platform/transform.h +++ b/paddle/fluid/platform/transform.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/transform_test.cu b/paddle/fluid/platform/transform_test.cu index 0e4b9edc2f..7b5cfd8f43 100644 --- a/paddle/fluid/platform/transform_test.cu +++ b/paddle/fluid/platform/transform_test.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/variant.h b/paddle/fluid/platform/variant.h index ea6ef8fddf..05ca33137d 100644 --- a/paddle/fluid/platform/variant.h +++ b/paddle/fluid/platform/variant.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/pybind/const_value.cc b/paddle/fluid/pybind/const_value.cc index 098252a83d..6657b25ed2 100644 --- a/paddle/fluid/pybind/const_value.cc +++ b/paddle/fluid/pybind/const_value.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/pybind/const_value.h b/paddle/fluid/pybind/const_value.h index 67d14ac9ff..79e71e039d 100644 --- a/paddle/fluid/pybind/const_value.h +++ b/paddle/fluid/pybind/const_value.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/pybind/exception.cc b/paddle/fluid/pybind/exception.cc index 7398a88541..4bd3ecf728 100644 --- a/paddle/fluid/pybind/exception.cc +++ b/paddle/fluid/pybind/exception.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/pybind/exception.h b/paddle/fluid/pybind/exception.h index 43e91a7063..bc6b0c0679 100644 --- a/paddle/fluid/pybind/exception.h +++ b/paddle/fluid/pybind/exception.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/pybind/protobuf.cc b/paddle/fluid/pybind/protobuf.cc index 4aefcf1a1c..3341edb370 100644 --- a/paddle/fluid/pybind/protobuf.cc +++ b/paddle/fluid/pybind/protobuf.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/pybind/protobuf.h b/paddle/fluid/pybind/protobuf.h index c828e4583d..d0dc8936b3 100644 --- a/paddle/fluid/pybind/protobuf.h +++ b/paddle/fluid/pybind/protobuf.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 85a6700a61..56c1a935d9 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/pybind/tensor_py.h b/paddle/fluid/pybind/tensor_py.h index 0261709f1e..7e7fb554ac 100644 --- a/paddle/fluid/pybind/tensor_py.h +++ b/paddle/fluid/pybind/tensor_py.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/string/piece.cc b/paddle/fluid/string/piece.cc index 560413dff1..454f5d8d38 100644 --- a/paddle/fluid/string/piece.cc +++ b/paddle/fluid/string/piece.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/string/piece.h b/paddle/fluid/string/piece.h index f2bb6b2c76..8dda484eaa 100644 --- a/paddle/fluid/string/piece.h +++ b/paddle/fluid/string/piece.h @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/string/piece_test.cc b/paddle/fluid/string/piece_test.cc index fc17d315b9..80b712b08c 100644 --- a/paddle/fluid/string/piece_test.cc +++ b/paddle/fluid/string/piece_test.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/string/printf.h b/paddle/fluid/string/printf.h index b55ae21b87..693cf9d6df 100644 --- a/paddle/fluid/string/printf.h +++ b/paddle/fluid/string/printf.h @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/string/printf_test.cc b/paddle/fluid/string/printf_test.cc index 6ca59bdefd..b6a60c8d6b 100644 --- a/paddle/fluid/string/printf_test.cc +++ b/paddle/fluid/string/printf_test.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/string/tinyformat/tinyformat.h b/paddle/fluid/string/tinyformat/tinyformat.h index d1a2c47f1a..a5c1798e10 100644 --- a/paddle/fluid/string/tinyformat/tinyformat.h +++ b/paddle/fluid/string/tinyformat/tinyformat.h @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/fluid/string/to_string.h b/paddle/fluid/string/to_string.h index 178edc1895..8caf149420 100644 --- a/paddle/fluid/string/to_string.h +++ b/paddle/fluid/string/to_string.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/string/to_string_test.cc b/paddle/fluid/string/to_string_test.cc index 1e890f572e..8fc293af0e 100644 --- a/paddle/fluid/string/to_string_test.cc +++ b/paddle/fluid/string/to_string_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/BlockExpandOp.cpp b/paddle/function/BlockExpandOp.cpp index bd0fe119ce..aa53853e08 100644 --- a/paddle/function/BlockExpandOp.cpp +++ b/paddle/function/BlockExpandOp.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/BlockExpandOpTest.cpp b/paddle/function/BlockExpandOpTest.cpp index 59193a3ec3..8fca4f6fdc 100644 --- a/paddle/function/BlockExpandOpTest.cpp +++ b/paddle/function/BlockExpandOpTest.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/BufferArg.cpp b/paddle/function/BufferArg.cpp index 2b70036e3f..2dc931c5d7 100644 --- a/paddle/function/BufferArg.cpp +++ b/paddle/function/BufferArg.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/BufferArg.h b/paddle/function/BufferArg.h index 0dc7792f64..89ee09837d 100644 --- a/paddle/function/BufferArg.h +++ b/paddle/function/BufferArg.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/BufferArgTest.cpp b/paddle/function/BufferArgTest.cpp index 6b8e1e2da9..1a6e0110af 100644 --- a/paddle/function/BufferArgTest.cpp +++ b/paddle/function/BufferArgTest.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/ContextProjectionOp.cpp b/paddle/function/ContextProjectionOp.cpp index 23916c0f4b..904b0958e6 100644 --- a/paddle/function/ContextProjectionOp.cpp +++ b/paddle/function/ContextProjectionOp.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/ContextProjectionOp.h b/paddle/function/ContextProjectionOp.h index 6f7d936379..822734a78e 100644 --- a/paddle/function/ContextProjectionOp.h +++ b/paddle/function/ContextProjectionOp.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/ContextProjectionOpGpu.cu b/paddle/function/ContextProjectionOpGpu.cu index 4492dea5d8..0a4d865e2c 100644 --- a/paddle/function/ContextProjectionOpGpu.cu +++ b/paddle/function/ContextProjectionOpGpu.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/ContextProjectionOpTest.cpp b/paddle/function/ContextProjectionOpTest.cpp index 9e9dd20e6f..d805c3ae92 100644 --- a/paddle/function/ContextProjectionOpTest.cpp +++ b/paddle/function/ContextProjectionOpTest.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/ConvOp.h b/paddle/function/ConvOp.h index 062ea25a11..7d23d0079c 100644 --- a/paddle/function/ConvOp.h +++ b/paddle/function/ConvOp.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/ConvOpTest.h b/paddle/function/ConvOpTest.h index d8d3c792df..5eac608978 100644 --- a/paddle/function/ConvOpTest.h +++ b/paddle/function/ConvOpTest.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/CosSimOp.cpp b/paddle/function/CosSimOp.cpp index 2e5c281f37..81bccc1a9c 100644 --- a/paddle/function/CosSimOp.cpp +++ b/paddle/function/CosSimOp.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/CosSimOp.h b/paddle/function/CosSimOp.h index be73064e63..2d377eb3be 100644 --- a/paddle/function/CosSimOp.h +++ b/paddle/function/CosSimOp.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/CosSimOpGpu.cu b/paddle/function/CosSimOpGpu.cu index a1f88f479b..9fe50529ac 100644 --- a/paddle/function/CosSimOpGpu.cu +++ b/paddle/function/CosSimOpGpu.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/CosSimOpTest.cpp b/paddle/function/CosSimOpTest.cpp index f6c0041101..42b02da0cb 100644 --- a/paddle/function/CosSimOpTest.cpp +++ b/paddle/function/CosSimOpTest.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/CropOp.cpp b/paddle/function/CropOp.cpp index 46f98f12c1..7aa527d216 100644 --- a/paddle/function/CropOp.cpp +++ b/paddle/function/CropOp.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/CropOp.h b/paddle/function/CropOp.h index 87986fbdc7..05d4b163b3 100644 --- a/paddle/function/CropOp.h +++ b/paddle/function/CropOp.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/CropOpGpu.cu b/paddle/function/CropOpGpu.cu index 241356a9ca..5615062433 100644 --- a/paddle/function/CropOpGpu.cu +++ b/paddle/function/CropOpGpu.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/CropOpTest.cpp b/paddle/function/CropOpTest.cpp index 6f11abfdf6..10c83a0321 100644 --- a/paddle/function/CropOpTest.cpp +++ b/paddle/function/CropOpTest.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/CrossMapNormalOp.cpp b/paddle/function/CrossMapNormalOp.cpp index 9e88669d37..75c0fc2a3d 100644 --- a/paddle/function/CrossMapNormalOp.cpp +++ b/paddle/function/CrossMapNormalOp.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/CrossMapNormalOp.h b/paddle/function/CrossMapNormalOp.h index b1e401ad0a..bb9cdf2021 100644 --- a/paddle/function/CrossMapNormalOp.h +++ b/paddle/function/CrossMapNormalOp.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/CrossMapNormalOpGpu.cu b/paddle/function/CrossMapNormalOpGpu.cu index 88b991ff6a..938827610a 100644 --- a/paddle/function/CrossMapNormalOpGpu.cu +++ b/paddle/function/CrossMapNormalOpGpu.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/CrossMapNormalOpTest.cpp b/paddle/function/CrossMapNormalOpTest.cpp index 3b390db77f..dec52adde2 100644 --- a/paddle/function/CrossMapNormalOpTest.cpp +++ b/paddle/function/CrossMapNormalOpTest.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/DepthwiseConvOp.cpp b/paddle/function/DepthwiseConvOp.cpp index 9863e3ae1d..46651345b4 100644 --- a/paddle/function/DepthwiseConvOp.cpp +++ b/paddle/function/DepthwiseConvOp.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/DepthwiseConvOp.h b/paddle/function/DepthwiseConvOp.h index 1bf70e52f3..6700747314 100644 --- a/paddle/function/DepthwiseConvOp.h +++ b/paddle/function/DepthwiseConvOp.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/DepthwiseConvOpGpu.cu b/paddle/function/DepthwiseConvOpGpu.cu index 2d722dfcfc..cd1d55a416 100644 --- a/paddle/function/DepthwiseConvOpGpu.cu +++ b/paddle/function/DepthwiseConvOpGpu.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/DepthwiseConvOpTest.cpp b/paddle/function/DepthwiseConvOpTest.cpp index b1a90da7db..caf8f3597f 100644 --- a/paddle/function/DepthwiseConvOpTest.cpp +++ b/paddle/function/DepthwiseConvOpTest.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/EigenGemm.cpp b/paddle/function/EigenGemm.cpp index 644098a9e7..bac4659e62 100644 --- a/paddle/function/EigenGemm.cpp +++ b/paddle/function/EigenGemm.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/Function.cpp b/paddle/function/Function.cpp index f71c0f681b..344358fd3d 100644 --- a/paddle/function/Function.cpp +++ b/paddle/function/Function.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/Function.h b/paddle/function/Function.h index 15eb35b7f7..01288ef92e 100644 --- a/paddle/function/Function.h +++ b/paddle/function/Function.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/FunctionTest.cpp b/paddle/function/FunctionTest.cpp index 7b0b1c6adb..f5e6ca3f51 100644 --- a/paddle/function/FunctionTest.cpp +++ b/paddle/function/FunctionTest.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/FunctionTest.h b/paddle/function/FunctionTest.h index 370940532e..56c3537b6a 100644 --- a/paddle/function/FunctionTest.h +++ b/paddle/function/FunctionTest.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/GemmConvOp.cpp b/paddle/function/GemmConvOp.cpp index a9876cec2a..2b7c6f9eab 100644 --- a/paddle/function/GemmConvOp.cpp +++ b/paddle/function/GemmConvOp.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/GemmConvOpTest.cpp b/paddle/function/GemmConvOpTest.cpp index b5b5e1f35b..a30b7c90bb 100644 --- a/paddle/function/GemmConvOpTest.cpp +++ b/paddle/function/GemmConvOpTest.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/GemmFunctor.cpp b/paddle/function/GemmFunctor.cpp index 9e25ee58a1..0b1fe1b67d 100644 --- a/paddle/function/GemmFunctor.cpp +++ b/paddle/function/GemmFunctor.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/GemmFunctor.h b/paddle/function/GemmFunctor.h index 0809953b4e..df63fc64f8 100644 --- a/paddle/function/GemmFunctor.h +++ b/paddle/function/GemmFunctor.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/GruFunctor.h b/paddle/function/GruFunctor.h index 9f6392198e..d5a30c3327 100644 --- a/paddle/function/GruFunctor.h +++ b/paddle/function/GruFunctor.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/Im2Col.h b/paddle/function/Im2Col.h index 915119e291..6a07787000 100644 --- a/paddle/function/Im2Col.h +++ b/paddle/function/Im2Col.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/Im2ColOp.cpp b/paddle/function/Im2ColOp.cpp index f864d42f80..ad2aed8f3c 100644 --- a/paddle/function/Im2ColOp.cpp +++ b/paddle/function/Im2ColOp.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/Im2ColOpGpu.cu b/paddle/function/Im2ColOpGpu.cu index 71da11b955..a944a0ee68 100644 --- a/paddle/function/Im2ColOpGpu.cu +++ b/paddle/function/Im2ColOpGpu.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/Im2ColTest.cpp b/paddle/function/Im2ColTest.cpp index fe44a8bf79..967c5b9153 100644 --- a/paddle/function/Im2ColTest.cpp +++ b/paddle/function/Im2ColTest.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/MulOp.cpp b/paddle/function/MulOp.cpp index 704a8c4132..90cd4a2b6d 100644 --- a/paddle/function/MulOp.cpp +++ b/paddle/function/MulOp.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/MulOp.h b/paddle/function/MulOp.h index b6016a6ab6..e6057be4e5 100644 --- a/paddle/function/MulOp.h +++ b/paddle/function/MulOp.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/MulOpGpu.cu b/paddle/function/MulOpGpu.cu index 9449b89056..d63416a8e4 100644 --- a/paddle/function/MulOpGpu.cu +++ b/paddle/function/MulOpGpu.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/MulOpTest.cpp b/paddle/function/MulOpTest.cpp index d31eb0c74f..4e1ebd749c 100644 --- a/paddle/function/MulOpTest.cpp +++ b/paddle/function/MulOpTest.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/NaiveConvOp.cpp b/paddle/function/NaiveConvOp.cpp index e0692fa06d..22d3b33d0f 100644 --- a/paddle/function/NaiveConvOp.cpp +++ b/paddle/function/NaiveConvOp.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/PadOp.cpp b/paddle/function/PadOp.cpp index eed2f2e308..db6dd518ca 100644 --- a/paddle/function/PadOp.cpp +++ b/paddle/function/PadOp.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/PadOp.h b/paddle/function/PadOp.h index 0e226ec737..4b0aa4014b 100644 --- a/paddle/function/PadOp.h +++ b/paddle/function/PadOp.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/PadOpGpu.cu b/paddle/function/PadOpGpu.cu index 5b6f4e6832..01d9b5c3b2 100644 --- a/paddle/function/PadOpGpu.cu +++ b/paddle/function/PadOpGpu.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/PadOpTest.cpp b/paddle/function/PadOpTest.cpp index e286f4e5b8..a4474f8549 100644 --- a/paddle/function/PadOpTest.cpp +++ b/paddle/function/PadOpTest.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/RowConvOp.cpp b/paddle/function/RowConvOp.cpp index 7c802d6627..925860346e 100644 --- a/paddle/function/RowConvOp.cpp +++ b/paddle/function/RowConvOp.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/RowConvOp.h b/paddle/function/RowConvOp.h index 2c5de6151a..bfe775e014 100644 --- a/paddle/function/RowConvOp.h +++ b/paddle/function/RowConvOp.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/RowConvOpGpu.cu b/paddle/function/RowConvOpGpu.cu index b0cbd9fd1d..9d8a6d80bb 100644 --- a/paddle/function/RowConvOpGpu.cu +++ b/paddle/function/RowConvOpGpu.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/RowConvOpTest.cpp b/paddle/function/RowConvOpTest.cpp index f52d18b049..bbc29ad6a6 100644 --- a/paddle/function/RowConvOpTest.cpp +++ b/paddle/function/RowConvOpTest.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/ScaleSubRegionOp.cpp b/paddle/function/ScaleSubRegionOp.cpp index a080505d7d..6ed6eb2dba 100644 --- a/paddle/function/ScaleSubRegionOp.cpp +++ b/paddle/function/ScaleSubRegionOp.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/ScaleSubRegionOp.h b/paddle/function/ScaleSubRegionOp.h index 0480c8577f..ed7d6b8ad3 100644 --- a/paddle/function/ScaleSubRegionOp.h +++ b/paddle/function/ScaleSubRegionOp.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/ScaleSubRegionOpGpu.cu b/paddle/function/ScaleSubRegionOpGpu.cu index 8aae2e44c3..9784c51ae0 100644 --- a/paddle/function/ScaleSubRegionOpGpu.cu +++ b/paddle/function/ScaleSubRegionOpGpu.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/ScaleSubRegionOpTest.cpp b/paddle/function/ScaleSubRegionOpTest.cpp index 43331f258d..dd6ee67108 100644 --- a/paddle/function/ScaleSubRegionOpTest.cpp +++ b/paddle/function/ScaleSubRegionOpTest.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/SwitchOp.cpp b/paddle/function/SwitchOp.cpp index 597723a2dd..50e1d6c04c 100644 --- a/paddle/function/SwitchOp.cpp +++ b/paddle/function/SwitchOp.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/SwitchOp.h b/paddle/function/SwitchOp.h index e4c1c3ac92..b5eb0883cb 100644 --- a/paddle/function/SwitchOp.h +++ b/paddle/function/SwitchOp.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/SwitchOpTest.cpp b/paddle/function/SwitchOpTest.cpp index 03b0dd66dd..08e5a613c0 100644 --- a/paddle/function/SwitchOpTest.cpp +++ b/paddle/function/SwitchOpTest.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/TensorShape.h b/paddle/function/TensorShape.h index cda58f19df..02d38c32c0 100644 --- a/paddle/function/TensorShape.h +++ b/paddle/function/TensorShape.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/TensorShapeTest.cpp b/paddle/function/TensorShapeTest.cpp index e55d516d4a..4d692b9b97 100644 --- a/paddle/function/TensorShapeTest.cpp +++ b/paddle/function/TensorShapeTest.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/TensorType.h b/paddle/function/TensorType.h index 8308bbd8ad..b384591bd8 100644 --- a/paddle/function/TensorType.h +++ b/paddle/function/TensorType.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/TensorTypeTest.cpp b/paddle/function/TensorTypeTest.cpp index d1c559a91e..d0cd63147a 100644 --- a/paddle/function/TensorTypeTest.cpp +++ b/paddle/function/TensorTypeTest.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/neon/NeonDepthwiseConv.cpp b/paddle/function/neon/NeonDepthwiseConv.cpp index 38aa667061..d3298c7538 100644 --- a/paddle/function/neon/NeonDepthwiseConv.cpp +++ b/paddle/function/neon/NeonDepthwiseConv.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/neon/NeonDepthwiseConv.h b/paddle/function/neon/NeonDepthwiseConv.h index 98a86d278f..8b2cba263e 100644 --- a/paddle/function/neon/NeonDepthwiseConv.h +++ b/paddle/function/neon/NeonDepthwiseConv.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/neon/NeonDepthwiseConvTranspose.cpp b/paddle/function/neon/NeonDepthwiseConvTranspose.cpp index 49ca4bc8a0..d443d3fa49 100644 --- a/paddle/function/neon/NeonDepthwiseConvTranspose.cpp +++ b/paddle/function/neon/NeonDepthwiseConvTranspose.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/neon/neon_util.h b/paddle/function/neon/neon_util.h index e2db045067..95076b1387 100644 --- a/paddle/function/neon/neon_util.h +++ b/paddle/function/neon/neon_util.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/nnpack/NNPACKConvOp.cpp b/paddle/function/nnpack/NNPACKConvOp.cpp index 6ccc487cf1..3cdba4f2ed 100644 --- a/paddle/function/nnpack/NNPACKConvOp.cpp +++ b/paddle/function/nnpack/NNPACKConvOp.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/function/nnpack/NNPACKConvOpTest.cpp b/paddle/function/nnpack/NNPACKConvOpTest.cpp index 4dd3982487..c80ffb5d5d 100644 --- a/paddle/function/nnpack/NNPACKConvOpTest.cpp +++ b/paddle/function/nnpack/NNPACKConvOpTest.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/activations/ActivationFunction.cpp b/paddle/gserver/activations/ActivationFunction.cpp index 57c890e488..8d8f01234f 100644 --- a/paddle/gserver/activations/ActivationFunction.cpp +++ b/paddle/gserver/activations/ActivationFunction.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/activations/ActivationFunction.h b/paddle/gserver/activations/ActivationFunction.h index f208224e30..0f4b0fe0ab 100644 --- a/paddle/gserver/activations/ActivationFunction.h +++ b/paddle/gserver/activations/ActivationFunction.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/activations/MKLDNNActivation.cpp b/paddle/gserver/activations/MKLDNNActivation.cpp index f3ccd68160..56ffb83934 100644 --- a/paddle/gserver/activations/MKLDNNActivation.cpp +++ b/paddle/gserver/activations/MKLDNNActivation.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/activations/MKLDNNActivation.h b/paddle/gserver/activations/MKLDNNActivation.h index dd16421fd6..392b32c70d 100644 --- a/paddle/gserver/activations/MKLDNNActivation.h +++ b/paddle/gserver/activations/MKLDNNActivation.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/dataproviders/DataProvider.cpp b/paddle/gserver/dataproviders/DataProvider.cpp index 106cf5b622..580cf821c6 100644 --- a/paddle/gserver/dataproviders/DataProvider.cpp +++ b/paddle/gserver/dataproviders/DataProvider.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/dataproviders/DataProvider.h b/paddle/gserver/dataproviders/DataProvider.h index 265dbb5493..4851168aba 100644 --- a/paddle/gserver/dataproviders/DataProvider.h +++ b/paddle/gserver/dataproviders/DataProvider.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/dataproviders/DataProviderGroup.h b/paddle/gserver/dataproviders/DataProviderGroup.h index 69ac2590b9..768e54fe82 100644 --- a/paddle/gserver/dataproviders/DataProviderGroup.h +++ b/paddle/gserver/dataproviders/DataProviderGroup.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/dataproviders/MultiDataProvider.cpp b/paddle/gserver/dataproviders/MultiDataProvider.cpp index 46fe053768..f71947ef39 100644 --- a/paddle/gserver/dataproviders/MultiDataProvider.cpp +++ b/paddle/gserver/dataproviders/MultiDataProvider.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/dataproviders/MultiDataProvider.h b/paddle/gserver/dataproviders/MultiDataProvider.h index 4c8fb2cd0d..9a863c8967 100644 --- a/paddle/gserver/dataproviders/MultiDataProvider.h +++ b/paddle/gserver/dataproviders/MultiDataProvider.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/dataproviders/ProtoReader.h b/paddle/gserver/dataproviders/ProtoReader.h index 4e6f58a529..786703f4de 100644 --- a/paddle/gserver/dataproviders/ProtoReader.h +++ b/paddle/gserver/dataproviders/ProtoReader.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/dataproviders/PyDataProvider.cpp b/paddle/gserver/dataproviders/PyDataProvider.cpp index b53790e764..dadf1b4cf2 100644 --- a/paddle/gserver/dataproviders/PyDataProvider.cpp +++ b/paddle/gserver/dataproviders/PyDataProvider.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/dataproviders/PyDataProvider.h b/paddle/gserver/dataproviders/PyDataProvider.h index 1401c13a1e..e53354c9e4 100644 --- a/paddle/gserver/dataproviders/PyDataProvider.h +++ b/paddle/gserver/dataproviders/PyDataProvider.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/dataproviders/PyDataProvider2.cpp b/paddle/gserver/dataproviders/PyDataProvider2.cpp index b8079dc079..e3e4457f9b 100644 --- a/paddle/gserver/dataproviders/PyDataProvider2.cpp +++ b/paddle/gserver/dataproviders/PyDataProvider2.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/evaluators/CTCErrorEvaluator.cpp b/paddle/gserver/evaluators/CTCErrorEvaluator.cpp index 92087fa32b..0f680de776 100644 --- a/paddle/gserver/evaluators/CTCErrorEvaluator.cpp +++ b/paddle/gserver/evaluators/CTCErrorEvaluator.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/evaluators/ChunkEvaluator.cpp b/paddle/gserver/evaluators/ChunkEvaluator.cpp index a2ab15eede..755b91d05c 100644 --- a/paddle/gserver/evaluators/ChunkEvaluator.cpp +++ b/paddle/gserver/evaluators/ChunkEvaluator.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/evaluators/DetectionMAPEvaluator.cpp b/paddle/gserver/evaluators/DetectionMAPEvaluator.cpp index 9b825db574..f43ef5dd51 100644 --- a/paddle/gserver/evaluators/DetectionMAPEvaluator.cpp +++ b/paddle/gserver/evaluators/DetectionMAPEvaluator.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/evaluators/Evaluator.cpp b/paddle/gserver/evaluators/Evaluator.cpp index 8e66b1f0db..79478e7fac 100644 --- a/paddle/gserver/evaluators/Evaluator.cpp +++ b/paddle/gserver/evaluators/Evaluator.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/evaluators/Evaluator.h b/paddle/gserver/evaluators/Evaluator.h index 90203553e0..be2032992c 100644 --- a/paddle/gserver/evaluators/Evaluator.h +++ b/paddle/gserver/evaluators/Evaluator.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/gradientmachines/GradientMachine.cpp b/paddle/gserver/gradientmachines/GradientMachine.cpp index de5faf5e1e..654024e8a4 100644 --- a/paddle/gserver/gradientmachines/GradientMachine.cpp +++ b/paddle/gserver/gradientmachines/GradientMachine.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/gradientmachines/GradientMachine.h b/paddle/gserver/gradientmachines/GradientMachine.h index 4ab54a5022..60936c311d 100644 --- a/paddle/gserver/gradientmachines/GradientMachine.h +++ b/paddle/gserver/gradientmachines/GradientMachine.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/gradientmachines/GradientMachineMode.cpp b/paddle/gserver/gradientmachines/GradientMachineMode.cpp index 3583fb4de8..9a0b2643e0 100644 --- a/paddle/gserver/gradientmachines/GradientMachineMode.cpp +++ b/paddle/gserver/gradientmachines/GradientMachineMode.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/gradientmachines/GradientMachineMode.h b/paddle/gserver/gradientmachines/GradientMachineMode.h index 7bc885fe99..898b68fbbc 100644 --- a/paddle/gserver/gradientmachines/GradientMachineMode.h +++ b/paddle/gserver/gradientmachines/GradientMachineMode.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/gradientmachines/MultiGradientMachine.cpp b/paddle/gserver/gradientmachines/MultiGradientMachine.cpp index 018da6c76d..3f46cc98cd 100644 --- a/paddle/gserver/gradientmachines/MultiGradientMachine.cpp +++ b/paddle/gserver/gradientmachines/MultiGradientMachine.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/gradientmachines/MultiGradientMachine.h b/paddle/gserver/gradientmachines/MultiGradientMachine.h index 5e7622f929..83d2651f34 100644 --- a/paddle/gserver/gradientmachines/MultiGradientMachine.h +++ b/paddle/gserver/gradientmachines/MultiGradientMachine.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/gradientmachines/MultiNetwork.cpp b/paddle/gserver/gradientmachines/MultiNetwork.cpp index 5f52a5f3d4..a1140402b8 100644 --- a/paddle/gserver/gradientmachines/MultiNetwork.cpp +++ b/paddle/gserver/gradientmachines/MultiNetwork.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/gradientmachines/MultiNetwork.h b/paddle/gserver/gradientmachines/MultiNetwork.h index 3ac2888c57..186a9ad0a3 100644 --- a/paddle/gserver/gradientmachines/MultiNetwork.h +++ b/paddle/gserver/gradientmachines/MultiNetwork.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/gradientmachines/NeuralNetwork.cpp b/paddle/gserver/gradientmachines/NeuralNetwork.cpp index 1f2aa61b6f..a3c13df3db 100644 --- a/paddle/gserver/gradientmachines/NeuralNetwork.cpp +++ b/paddle/gserver/gradientmachines/NeuralNetwork.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/gradientmachines/NeuralNetwork.h b/paddle/gserver/gradientmachines/NeuralNetwork.h index 968e198cf6..5b32f844f7 100644 --- a/paddle/gserver/gradientmachines/NeuralNetwork.h +++ b/paddle/gserver/gradientmachines/NeuralNetwork.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/gradientmachines/ParallelNeuralNetwork.cpp b/paddle/gserver/gradientmachines/ParallelNeuralNetwork.cpp index c6e3a3b321..85cfc59fbe 100644 --- a/paddle/gserver/gradientmachines/ParallelNeuralNetwork.cpp +++ b/paddle/gserver/gradientmachines/ParallelNeuralNetwork.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/gradientmachines/ParallelNeuralNetwork.h b/paddle/gserver/gradientmachines/ParallelNeuralNetwork.h index 39f5682a58..e3b6812123 100644 --- a/paddle/gserver/gradientmachines/ParallelNeuralNetwork.h +++ b/paddle/gserver/gradientmachines/ParallelNeuralNetwork.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp b/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp index 9f29b97466..2429b5d1a0 100644 --- a/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp +++ b/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/gradientmachines/RecurrentGradientMachine.h b/paddle/gserver/gradientmachines/RecurrentGradientMachine.h index c16fae6d17..0032b72cda 100644 --- a/paddle/gserver/gradientmachines/RecurrentGradientMachine.h +++ b/paddle/gserver/gradientmachines/RecurrentGradientMachine.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/AddtoLayer.cpp b/paddle/gserver/layers/AddtoLayer.cpp index 5338530113..75e17f52df 100644 --- a/paddle/gserver/layers/AddtoLayer.cpp +++ b/paddle/gserver/layers/AddtoLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/AddtoLayer.h b/paddle/gserver/layers/AddtoLayer.h index 4e98c174b4..1d00063056 100644 --- a/paddle/gserver/layers/AddtoLayer.h +++ b/paddle/gserver/layers/AddtoLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/AgentLayer.cpp b/paddle/gserver/layers/AgentLayer.cpp index bdae7e623a..e2f73f88f5 100644 --- a/paddle/gserver/layers/AgentLayer.cpp +++ b/paddle/gserver/layers/AgentLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/AgentLayer.h b/paddle/gserver/layers/AgentLayer.h index 29681b29c6..da0ac45308 100644 --- a/paddle/gserver/layers/AgentLayer.h +++ b/paddle/gserver/layers/AgentLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/AverageLayer.cpp b/paddle/gserver/layers/AverageLayer.cpp index 96cc4288c6..b3787b1448 100644 --- a/paddle/gserver/layers/AverageLayer.cpp +++ b/paddle/gserver/layers/AverageLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/AverageLayer.h b/paddle/gserver/layers/AverageLayer.h index db4a17bfb0..24602d2a9c 100644 --- a/paddle/gserver/layers/AverageLayer.h +++ b/paddle/gserver/layers/AverageLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/BatchNormBaseLayer.cpp b/paddle/gserver/layers/BatchNormBaseLayer.cpp index 925af31289..a3516f9423 100644 --- a/paddle/gserver/layers/BatchNormBaseLayer.cpp +++ b/paddle/gserver/layers/BatchNormBaseLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/BatchNormBaseLayer.h b/paddle/gserver/layers/BatchNormBaseLayer.h index 2ac3cd9d67..69d642af4f 100644 --- a/paddle/gserver/layers/BatchNormBaseLayer.h +++ b/paddle/gserver/layers/BatchNormBaseLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/BatchNormalizationLayer.cpp b/paddle/gserver/layers/BatchNormalizationLayer.cpp index 25ab5cd927..59831dd904 100644 --- a/paddle/gserver/layers/BatchNormalizationLayer.cpp +++ b/paddle/gserver/layers/BatchNormalizationLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/BatchNormalizationLayer.h b/paddle/gserver/layers/BatchNormalizationLayer.h index 1fdb5e2070..95add69215 100644 --- a/paddle/gserver/layers/BatchNormalizationLayer.h +++ b/paddle/gserver/layers/BatchNormalizationLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/BilinearInterpLayer.cpp b/paddle/gserver/layers/BilinearInterpLayer.cpp index 1976cb0017..9775914596 100644 --- a/paddle/gserver/layers/BilinearInterpLayer.cpp +++ b/paddle/gserver/layers/BilinearInterpLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/BilinearInterpLayer.h b/paddle/gserver/layers/BilinearInterpLayer.h index 27c269f278..acd320420f 100644 --- a/paddle/gserver/layers/BilinearInterpLayer.h +++ b/paddle/gserver/layers/BilinearInterpLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/BlockExpandLayer.cpp b/paddle/gserver/layers/BlockExpandLayer.cpp index 3b1f346359..793d24e884 100644 --- a/paddle/gserver/layers/BlockExpandLayer.cpp +++ b/paddle/gserver/layers/BlockExpandLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/BlockExpandLayer.h b/paddle/gserver/layers/BlockExpandLayer.h index 15ce73ab8b..1797b64036 100644 --- a/paddle/gserver/layers/BlockExpandLayer.h +++ b/paddle/gserver/layers/BlockExpandLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/CRFDecodingLayer.cpp b/paddle/gserver/layers/CRFDecodingLayer.cpp index 191176ce98..4afed7e295 100644 --- a/paddle/gserver/layers/CRFDecodingLayer.cpp +++ b/paddle/gserver/layers/CRFDecodingLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/CRFDecodingLayer.h b/paddle/gserver/layers/CRFDecodingLayer.h index 3cbcac6cf6..fba3cebac1 100644 --- a/paddle/gserver/layers/CRFDecodingLayer.h +++ b/paddle/gserver/layers/CRFDecodingLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/CRFLayer.cpp b/paddle/gserver/layers/CRFLayer.cpp index 867303b4fa..8b87a533a2 100644 --- a/paddle/gserver/layers/CRFLayer.cpp +++ b/paddle/gserver/layers/CRFLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/CRFLayer.h b/paddle/gserver/layers/CRFLayer.h index 00ec13cede..cb5bd05568 100644 --- a/paddle/gserver/layers/CRFLayer.h +++ b/paddle/gserver/layers/CRFLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/CTCLayer.cpp b/paddle/gserver/layers/CTCLayer.cpp index 14ec851551..64eb15cd0d 100644 --- a/paddle/gserver/layers/CTCLayer.cpp +++ b/paddle/gserver/layers/CTCLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/CTCLayer.h b/paddle/gserver/layers/CTCLayer.h index f7a515f312..fcbc42565e 100644 --- a/paddle/gserver/layers/CTCLayer.h +++ b/paddle/gserver/layers/CTCLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ClipLayer.cpp b/paddle/gserver/layers/ClipLayer.cpp index 13f16c9537..dbc3337499 100644 --- a/paddle/gserver/layers/ClipLayer.cpp +++ b/paddle/gserver/layers/ClipLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ConcatenateLayer.cpp b/paddle/gserver/layers/ConcatenateLayer.cpp index c5fc4cf4f8..f5ab29a509 100644 --- a/paddle/gserver/layers/ConcatenateLayer.cpp +++ b/paddle/gserver/layers/ConcatenateLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ContextProjection.cpp b/paddle/gserver/layers/ContextProjection.cpp index d7042af1c2..10c3cef0da 100644 --- a/paddle/gserver/layers/ContextProjection.cpp +++ b/paddle/gserver/layers/ContextProjection.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ContextProjection.h b/paddle/gserver/layers/ContextProjection.h index c87d6ed1d6..e30f98f58d 100644 --- a/paddle/gserver/layers/ContextProjection.h +++ b/paddle/gserver/layers/ContextProjection.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/Conv3DLayer.cpp b/paddle/gserver/layers/Conv3DLayer.cpp index 9deda2de98..b38de86b15 100644 --- a/paddle/gserver/layers/Conv3DLayer.cpp +++ b/paddle/gserver/layers/Conv3DLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/Conv3DLayer.h b/paddle/gserver/layers/Conv3DLayer.h index b622508d0c..5ab5ff3d4a 100644 --- a/paddle/gserver/layers/Conv3DLayer.h +++ b/paddle/gserver/layers/Conv3DLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ConvBaseLayer.cpp b/paddle/gserver/layers/ConvBaseLayer.cpp index b848ab6bdd..56bf4f9fcb 100644 --- a/paddle/gserver/layers/ConvBaseLayer.cpp +++ b/paddle/gserver/layers/ConvBaseLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ConvBaseLayer.h b/paddle/gserver/layers/ConvBaseLayer.h index ccd170d9d8..93869fe68d 100644 --- a/paddle/gserver/layers/ConvBaseLayer.h +++ b/paddle/gserver/layers/ConvBaseLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ConvBaseOperator.cpp b/paddle/gserver/layers/ConvBaseOperator.cpp index 5469c41c87..317e7d5c60 100644 --- a/paddle/gserver/layers/ConvBaseOperator.cpp +++ b/paddle/gserver/layers/ConvBaseOperator.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ConvBaseOperator.h b/paddle/gserver/layers/ConvBaseOperator.h index 2d42169cde..27fb0362d3 100644 --- a/paddle/gserver/layers/ConvBaseOperator.h +++ b/paddle/gserver/layers/ConvBaseOperator.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ConvBaseProjection.cpp b/paddle/gserver/layers/ConvBaseProjection.cpp index 19efed7b52..39f433b78f 100644 --- a/paddle/gserver/layers/ConvBaseProjection.cpp +++ b/paddle/gserver/layers/ConvBaseProjection.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ConvBaseProjection.h b/paddle/gserver/layers/ConvBaseProjection.h index bb7ffa627b..ba76d236d9 100644 --- a/paddle/gserver/layers/ConvBaseProjection.h +++ b/paddle/gserver/layers/ConvBaseProjection.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ConvOperator.cpp b/paddle/gserver/layers/ConvOperator.cpp index 80932c8c50..45498b92d3 100644 --- a/paddle/gserver/layers/ConvOperator.cpp +++ b/paddle/gserver/layers/ConvOperator.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ConvOperator.h b/paddle/gserver/layers/ConvOperator.h index 0f3546c67a..fbdb7bb1cd 100644 --- a/paddle/gserver/layers/ConvOperator.h +++ b/paddle/gserver/layers/ConvOperator.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ConvProjection.cpp b/paddle/gserver/layers/ConvProjection.cpp index 6f0106b713..f382e6cab1 100644 --- a/paddle/gserver/layers/ConvProjection.cpp +++ b/paddle/gserver/layers/ConvProjection.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ConvProjection.h b/paddle/gserver/layers/ConvProjection.h index b7d7cc9a27..e8ecb99431 100644 --- a/paddle/gserver/layers/ConvProjection.h +++ b/paddle/gserver/layers/ConvProjection.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ConvShiftLayer.cpp b/paddle/gserver/layers/ConvShiftLayer.cpp index 002be41569..fb87771019 100644 --- a/paddle/gserver/layers/ConvShiftLayer.cpp +++ b/paddle/gserver/layers/ConvShiftLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ConvTransOperator.cpp b/paddle/gserver/layers/ConvTransOperator.cpp index db026337a4..ac41d6f9a4 100644 --- a/paddle/gserver/layers/ConvTransOperator.cpp +++ b/paddle/gserver/layers/ConvTransOperator.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ConvTransOperator.h b/paddle/gserver/layers/ConvTransOperator.h index ca08dc9aa7..1bf58f2bfb 100644 --- a/paddle/gserver/layers/ConvTransOperator.h +++ b/paddle/gserver/layers/ConvTransOperator.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ConvTransProjection.cpp b/paddle/gserver/layers/ConvTransProjection.cpp index e7f081c023..242ce34a60 100644 --- a/paddle/gserver/layers/ConvTransProjection.cpp +++ b/paddle/gserver/layers/ConvTransProjection.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ConvTransProjection.h b/paddle/gserver/layers/ConvTransProjection.h index 6508d17b24..269b2694c8 100644 --- a/paddle/gserver/layers/ConvTransProjection.h +++ b/paddle/gserver/layers/ConvTransProjection.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ConvexCombinationLayer.cpp b/paddle/gserver/layers/ConvexCombinationLayer.cpp index 32eb3bf604..dce751940c 100644 --- a/paddle/gserver/layers/ConvexCombinationLayer.cpp +++ b/paddle/gserver/layers/ConvexCombinationLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/CosSimLayer.cpp b/paddle/gserver/layers/CosSimLayer.cpp index 57ba124e40..4e44a5e8df 100644 --- a/paddle/gserver/layers/CosSimLayer.cpp +++ b/paddle/gserver/layers/CosSimLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/CosSimLayer.h b/paddle/gserver/layers/CosSimLayer.h index 8afaee62c2..675cdb16b5 100644 --- a/paddle/gserver/layers/CosSimLayer.h +++ b/paddle/gserver/layers/CosSimLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/CosSimVecMatLayer.cpp b/paddle/gserver/layers/CosSimVecMatLayer.cpp index 0f887d8adf..685b4e8ef3 100644 --- a/paddle/gserver/layers/CosSimVecMatLayer.cpp +++ b/paddle/gserver/layers/CosSimVecMatLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/CostLayer.cpp b/paddle/gserver/layers/CostLayer.cpp index 0bb6f84c22..484f803a83 100644 --- a/paddle/gserver/layers/CostLayer.cpp +++ b/paddle/gserver/layers/CostLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/CostLayer.h b/paddle/gserver/layers/CostLayer.h index 0f655b48ee..306c067ed1 100644 --- a/paddle/gserver/layers/CostLayer.h +++ b/paddle/gserver/layers/CostLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/CropLayer.cpp b/paddle/gserver/layers/CropLayer.cpp index 69ad913420..bc97ca2f9e 100644 --- a/paddle/gserver/layers/CropLayer.cpp +++ b/paddle/gserver/layers/CropLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/CropLayer.h b/paddle/gserver/layers/CropLayer.h index 6b62026210..1a85911ef7 100644 --- a/paddle/gserver/layers/CropLayer.h +++ b/paddle/gserver/layers/CropLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/CrossChannelNormLayer.cpp b/paddle/gserver/layers/CrossChannelNormLayer.cpp index d72503217f..644450291e 100644 --- a/paddle/gserver/layers/CrossChannelNormLayer.cpp +++ b/paddle/gserver/layers/CrossChannelNormLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/CrossEntropyOverBeam.cpp b/paddle/gserver/layers/CrossEntropyOverBeam.cpp index 578bdbbe72..f3bf214858 100644 --- a/paddle/gserver/layers/CrossEntropyOverBeam.cpp +++ b/paddle/gserver/layers/CrossEntropyOverBeam.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/CrossEntropyOverBeam.h b/paddle/gserver/layers/CrossEntropyOverBeam.h index 5643556f43..b47a2933c2 100644 --- a/paddle/gserver/layers/CrossEntropyOverBeam.h +++ b/paddle/gserver/layers/CrossEntropyOverBeam.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/CudnnBatchNormLayer.cpp b/paddle/gserver/layers/CudnnBatchNormLayer.cpp index 8390b55026..9a29e6a55e 100644 --- a/paddle/gserver/layers/CudnnBatchNormLayer.cpp +++ b/paddle/gserver/layers/CudnnBatchNormLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/CudnnBatchNormLayer.h b/paddle/gserver/layers/CudnnBatchNormLayer.h index 1a3f0c0cbf..aa279f73d6 100644 --- a/paddle/gserver/layers/CudnnBatchNormLayer.h +++ b/paddle/gserver/layers/CudnnBatchNormLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/CudnnConvBaseLayer.cpp b/paddle/gserver/layers/CudnnConvBaseLayer.cpp index 9e954615cd..6d0a40a607 100644 --- a/paddle/gserver/layers/CudnnConvBaseLayer.cpp +++ b/paddle/gserver/layers/CudnnConvBaseLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/CudnnConvBaseLayer.h b/paddle/gserver/layers/CudnnConvBaseLayer.h index 93a05f94c7..698104e4fb 100644 --- a/paddle/gserver/layers/CudnnConvBaseLayer.h +++ b/paddle/gserver/layers/CudnnConvBaseLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/CudnnPoolLayer.cpp b/paddle/gserver/layers/CudnnPoolLayer.cpp index 810a1af2d0..ac6d2168f4 100644 --- a/paddle/gserver/layers/CudnnPoolLayer.cpp +++ b/paddle/gserver/layers/CudnnPoolLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/CudnnPoolLayer.h b/paddle/gserver/layers/CudnnPoolLayer.h index f0aa22fe3a..9eb4fc6138 100644 --- a/paddle/gserver/layers/CudnnPoolLayer.h +++ b/paddle/gserver/layers/CudnnPoolLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/DataLayer.cpp b/paddle/gserver/layers/DataLayer.cpp index 3551df4e17..4cadaa7663 100644 --- a/paddle/gserver/layers/DataLayer.cpp +++ b/paddle/gserver/layers/DataLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/DataLayer.h b/paddle/gserver/layers/DataLayer.h index a9cf1f943c..4b12afe0ef 100644 --- a/paddle/gserver/layers/DataLayer.h +++ b/paddle/gserver/layers/DataLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/DataNormLayer.cpp b/paddle/gserver/layers/DataNormLayer.cpp index afd532c949..86da4d6f95 100644 --- a/paddle/gserver/layers/DataNormLayer.cpp +++ b/paddle/gserver/layers/DataNormLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/DataNormLayer.h b/paddle/gserver/layers/DataNormLayer.h index f0fd044e5b..2a2a2a4aa7 100644 --- a/paddle/gserver/layers/DataNormLayer.h +++ b/paddle/gserver/layers/DataNormLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/DeConv3DLayer.cpp b/paddle/gserver/layers/DeConv3DLayer.cpp index 3eea638649..db6d6e073c 100644 --- a/paddle/gserver/layers/DeConv3DLayer.cpp +++ b/paddle/gserver/layers/DeConv3DLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/DeConv3DLayer.h b/paddle/gserver/layers/DeConv3DLayer.h index a2a3d3f827..57d51cdec6 100644 --- a/paddle/gserver/layers/DeConv3DLayer.h +++ b/paddle/gserver/layers/DeConv3DLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/DetectionOutputLayer.cpp b/paddle/gserver/layers/DetectionOutputLayer.cpp index f9040f7ae7..93fe046c6a 100644 --- a/paddle/gserver/layers/DetectionOutputLayer.cpp +++ b/paddle/gserver/layers/DetectionOutputLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/DetectionOutputLayer.h b/paddle/gserver/layers/DetectionOutputLayer.h index a232af0a69..174a6e5d9a 100644 --- a/paddle/gserver/layers/DetectionOutputLayer.h +++ b/paddle/gserver/layers/DetectionOutputLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/DetectionUtil.cpp b/paddle/gserver/layers/DetectionUtil.cpp index d83674f45a..0dc45e5a75 100644 --- a/paddle/gserver/layers/DetectionUtil.cpp +++ b/paddle/gserver/layers/DetectionUtil.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/DetectionUtil.h b/paddle/gserver/layers/DetectionUtil.h index 641ed873b4..d6502fcf8f 100644 --- a/paddle/gserver/layers/DetectionUtil.h +++ b/paddle/gserver/layers/DetectionUtil.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/DotMulOperator.cpp b/paddle/gserver/layers/DotMulOperator.cpp index 55dabd79d0..68db2929ad 100644 --- a/paddle/gserver/layers/DotMulOperator.cpp +++ b/paddle/gserver/layers/DotMulOperator.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/DotMulProjection.cpp b/paddle/gserver/layers/DotMulProjection.cpp index 0a1ede3618..86453aae84 100644 --- a/paddle/gserver/layers/DotMulProjection.cpp +++ b/paddle/gserver/layers/DotMulProjection.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/DotProdLayer.cpp b/paddle/gserver/layers/DotProdLayer.cpp index 9e2dbe3c3c..5148d93e27 100644 --- a/paddle/gserver/layers/DotProdLayer.cpp +++ b/paddle/gserver/layers/DotProdLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/EosIdCheckLayer.cpp b/paddle/gserver/layers/EosIdCheckLayer.cpp index 686f1fa054..470a5b8ea2 100644 --- a/paddle/gserver/layers/EosIdCheckLayer.cpp +++ b/paddle/gserver/layers/EosIdCheckLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ExpandConvLayer.cpp b/paddle/gserver/layers/ExpandConvLayer.cpp index 7ff0c73721..3a84786582 100644 --- a/paddle/gserver/layers/ExpandConvLayer.cpp +++ b/paddle/gserver/layers/ExpandConvLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ExpandConvLayer.h b/paddle/gserver/layers/ExpandConvLayer.h index a0873de192..be968155ef 100644 --- a/paddle/gserver/layers/ExpandConvLayer.h +++ b/paddle/gserver/layers/ExpandConvLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ExpandLayer.cpp b/paddle/gserver/layers/ExpandLayer.cpp index de5acfde05..6b57767540 100644 --- a/paddle/gserver/layers/ExpandLayer.cpp +++ b/paddle/gserver/layers/ExpandLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ExpandLayer.h b/paddle/gserver/layers/ExpandLayer.h index c21b3350e2..04bbfcbd04 100644 --- a/paddle/gserver/layers/ExpandLayer.h +++ b/paddle/gserver/layers/ExpandLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/FactorizationMachineLayer.cpp b/paddle/gserver/layers/FactorizationMachineLayer.cpp index be26b9ba88..1744faada2 100644 --- a/paddle/gserver/layers/FactorizationMachineLayer.cpp +++ b/paddle/gserver/layers/FactorizationMachineLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/FactorizationMachineLayer.h b/paddle/gserver/layers/FactorizationMachineLayer.h index df20a49934..684da4e65a 100644 --- a/paddle/gserver/layers/FactorizationMachineLayer.h +++ b/paddle/gserver/layers/FactorizationMachineLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/FeatureMapExpandLayer.cpp b/paddle/gserver/layers/FeatureMapExpandLayer.cpp index 8a2ae6b49f..81b98da45b 100644 --- a/paddle/gserver/layers/FeatureMapExpandLayer.cpp +++ b/paddle/gserver/layers/FeatureMapExpandLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/FullMatrixProjection.cpp b/paddle/gserver/layers/FullMatrixProjection.cpp index b8b6f403d6..b9f1bc99fa 100644 --- a/paddle/gserver/layers/FullMatrixProjection.cpp +++ b/paddle/gserver/layers/FullMatrixProjection.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/FullMatrixProjection.h b/paddle/gserver/layers/FullMatrixProjection.h index 58499f2e1e..7c4cd1a706 100644 --- a/paddle/gserver/layers/FullMatrixProjection.h +++ b/paddle/gserver/layers/FullMatrixProjection.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/FullyConnectedLayer.cpp b/paddle/gserver/layers/FullyConnectedLayer.cpp index d8a667ff8d..21ffa01d95 100644 --- a/paddle/gserver/layers/FullyConnectedLayer.cpp +++ b/paddle/gserver/layers/FullyConnectedLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/FullyConnectedLayer.h b/paddle/gserver/layers/FullyConnectedLayer.h index 64e7a05012..e66aeeb733 100644 --- a/paddle/gserver/layers/FullyConnectedLayer.h +++ b/paddle/gserver/layers/FullyConnectedLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/GatedRecurrentLayer.cpp b/paddle/gserver/layers/GatedRecurrentLayer.cpp index d3aeea9218..9d38849fdf 100644 --- a/paddle/gserver/layers/GatedRecurrentLayer.cpp +++ b/paddle/gserver/layers/GatedRecurrentLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/GatedRecurrentLayer.h b/paddle/gserver/layers/GatedRecurrentLayer.h index 58dd760eb8..f0a3a82301 100644 --- a/paddle/gserver/layers/GatedRecurrentLayer.h +++ b/paddle/gserver/layers/GatedRecurrentLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/GetOutputLayer.cpp b/paddle/gserver/layers/GetOutputLayer.cpp index 4e29efd461..f255681f3e 100644 --- a/paddle/gserver/layers/GetOutputLayer.cpp +++ b/paddle/gserver/layers/GetOutputLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/GruCompute.cpp b/paddle/gserver/layers/GruCompute.cpp index 148516391c..48ddbc413e 100644 --- a/paddle/gserver/layers/GruCompute.cpp +++ b/paddle/gserver/layers/GruCompute.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/GruCompute.cu b/paddle/gserver/layers/GruCompute.cu index b4f5c54b14..54be6b8047 100644 --- a/paddle/gserver/layers/GruCompute.cu +++ b/paddle/gserver/layers/GruCompute.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/GruCompute.h b/paddle/gserver/layers/GruCompute.h index 3340e38e62..fb6bc56422 100644 --- a/paddle/gserver/layers/GruCompute.h +++ b/paddle/gserver/layers/GruCompute.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/GruStepLayer.cpp b/paddle/gserver/layers/GruStepLayer.cpp index 5b5cb25f92..917c50250c 100644 --- a/paddle/gserver/layers/GruStepLayer.cpp +++ b/paddle/gserver/layers/GruStepLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/HierarchicalSigmoidLayer.cpp b/paddle/gserver/layers/HierarchicalSigmoidLayer.cpp index 236f8096bd..3e720f179e 100644 --- a/paddle/gserver/layers/HierarchicalSigmoidLayer.cpp +++ b/paddle/gserver/layers/HierarchicalSigmoidLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/HierarchicalSigmoidLayer.h b/paddle/gserver/layers/HierarchicalSigmoidLayer.h index 7f896e61ca..10e501f180 100644 --- a/paddle/gserver/layers/HierarchicalSigmoidLayer.h +++ b/paddle/gserver/layers/HierarchicalSigmoidLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/IdentityProjection.cpp b/paddle/gserver/layers/IdentityProjection.cpp index f1d41a33d4..6c70f77acc 100644 --- a/paddle/gserver/layers/IdentityProjection.cpp +++ b/paddle/gserver/layers/IdentityProjection.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/InterpolationLayer.cpp b/paddle/gserver/layers/InterpolationLayer.cpp index eac7428571..0ac92024bc 100644 --- a/paddle/gserver/layers/InterpolationLayer.cpp +++ b/paddle/gserver/layers/InterpolationLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/KmaxSeqScoreLayer.cpp b/paddle/gserver/layers/KmaxSeqScoreLayer.cpp index d5407555b2..0ea960902e 100644 --- a/paddle/gserver/layers/KmaxSeqScoreLayer.cpp +++ b/paddle/gserver/layers/KmaxSeqScoreLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/L2DistanceLayer.cpp b/paddle/gserver/layers/L2DistanceLayer.cpp index c71df1b92c..c8cca3762c 100644 --- a/paddle/gserver/layers/L2DistanceLayer.cpp +++ b/paddle/gserver/layers/L2DistanceLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/L2DistanceLayer.h b/paddle/gserver/layers/L2DistanceLayer.h index 9b12847a10..97f35daf78 100644 --- a/paddle/gserver/layers/L2DistanceLayer.h +++ b/paddle/gserver/layers/L2DistanceLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/Layer.cpp b/paddle/gserver/layers/Layer.cpp index b55b86221c..32e2f4c9dd 100644 --- a/paddle/gserver/layers/Layer.cpp +++ b/paddle/gserver/layers/Layer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/Layer.h b/paddle/gserver/layers/Layer.h index 9813a55607..8da342a00f 100644 --- a/paddle/gserver/layers/Layer.h +++ b/paddle/gserver/layers/Layer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/LinearChainCRF.cpp b/paddle/gserver/layers/LinearChainCRF.cpp index abaa1802b7..315fc25fab 100644 --- a/paddle/gserver/layers/LinearChainCRF.cpp +++ b/paddle/gserver/layers/LinearChainCRF.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/LinearChainCRF.h b/paddle/gserver/layers/LinearChainCRF.h index 8daf1e14a6..1ea4c7e105 100644 --- a/paddle/gserver/layers/LinearChainCRF.h +++ b/paddle/gserver/layers/LinearChainCRF.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/LinearChainCTC.cpp b/paddle/gserver/layers/LinearChainCTC.cpp index cb2b249110..1fad545b7a 100644 --- a/paddle/gserver/layers/LinearChainCTC.cpp +++ b/paddle/gserver/layers/LinearChainCTC.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/LinearChainCTC.h b/paddle/gserver/layers/LinearChainCTC.h index 737c9d5c31..0b774277dc 100644 --- a/paddle/gserver/layers/LinearChainCTC.h +++ b/paddle/gserver/layers/LinearChainCTC.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/LstmCompute.cpp b/paddle/gserver/layers/LstmCompute.cpp index 4c42970964..ea30f6d6b1 100644 --- a/paddle/gserver/layers/LstmCompute.cpp +++ b/paddle/gserver/layers/LstmCompute.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/LstmCompute.cu b/paddle/gserver/layers/LstmCompute.cu index d3f59b52a4..3f15edcaca 100644 --- a/paddle/gserver/layers/LstmCompute.cu +++ b/paddle/gserver/layers/LstmCompute.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/LstmCompute.h b/paddle/gserver/layers/LstmCompute.h index 2588fad279..b7d55eb1f9 100644 --- a/paddle/gserver/layers/LstmCompute.h +++ b/paddle/gserver/layers/LstmCompute.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/LstmLayer.cpp b/paddle/gserver/layers/LstmLayer.cpp index 01cc5fec8b..f65ae6a3e6 100644 --- a/paddle/gserver/layers/LstmLayer.cpp +++ b/paddle/gserver/layers/LstmLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/LstmLayer.h b/paddle/gserver/layers/LstmLayer.h index c45a52d2e9..4568b13ade 100644 --- a/paddle/gserver/layers/LstmLayer.h +++ b/paddle/gserver/layers/LstmLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/LstmStepLayer.cpp b/paddle/gserver/layers/LstmStepLayer.cpp index 568277a90c..8faaa1c4e1 100644 --- a/paddle/gserver/layers/LstmStepLayer.cpp +++ b/paddle/gserver/layers/LstmStepLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MDLstmLayer.cpp b/paddle/gserver/layers/MDLstmLayer.cpp index be0f2a07d4..7cfdb3ff25 100644 --- a/paddle/gserver/layers/MDLstmLayer.cpp +++ b/paddle/gserver/layers/MDLstmLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MKLDNNAddtoLayer.cpp b/paddle/gserver/layers/MKLDNNAddtoLayer.cpp index 39bffc26f7..544b4082fa 100644 --- a/paddle/gserver/layers/MKLDNNAddtoLayer.cpp +++ b/paddle/gserver/layers/MKLDNNAddtoLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MKLDNNAddtoLayer.h b/paddle/gserver/layers/MKLDNNAddtoLayer.h index 0ea3e208e5..e40e2f2251 100644 --- a/paddle/gserver/layers/MKLDNNAddtoLayer.h +++ b/paddle/gserver/layers/MKLDNNAddtoLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MKLDNNBase.h b/paddle/gserver/layers/MKLDNNBase.h index af02a37cad..d84e285940 100644 --- a/paddle/gserver/layers/MKLDNNBase.h +++ b/paddle/gserver/layers/MKLDNNBase.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MKLDNNBatchNormLayer.cpp b/paddle/gserver/layers/MKLDNNBatchNormLayer.cpp index 7faca0f8b7..dbdfaff32f 100644 --- a/paddle/gserver/layers/MKLDNNBatchNormLayer.cpp +++ b/paddle/gserver/layers/MKLDNNBatchNormLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MKLDNNBatchNormLayer.h b/paddle/gserver/layers/MKLDNNBatchNormLayer.h index 1cf33cb34f..93e182206a 100644 --- a/paddle/gserver/layers/MKLDNNBatchNormLayer.h +++ b/paddle/gserver/layers/MKLDNNBatchNormLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MKLDNNConcatLayer.cpp b/paddle/gserver/layers/MKLDNNConcatLayer.cpp index 520ccc1a99..beed6176e1 100644 --- a/paddle/gserver/layers/MKLDNNConcatLayer.cpp +++ b/paddle/gserver/layers/MKLDNNConcatLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MKLDNNConcatLayer.h b/paddle/gserver/layers/MKLDNNConcatLayer.h index 37f3a26c5e..f7abdabfb5 100644 --- a/paddle/gserver/layers/MKLDNNConcatLayer.h +++ b/paddle/gserver/layers/MKLDNNConcatLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MKLDNNConvLayer.cpp b/paddle/gserver/layers/MKLDNNConvLayer.cpp index ab1d0f7b04..a442a0a013 100644 --- a/paddle/gserver/layers/MKLDNNConvLayer.cpp +++ b/paddle/gserver/layers/MKLDNNConvLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MKLDNNConvLayer.h b/paddle/gserver/layers/MKLDNNConvLayer.h index 3e754a0e65..29c8735fbb 100644 --- a/paddle/gserver/layers/MKLDNNConvLayer.h +++ b/paddle/gserver/layers/MKLDNNConvLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MKLDNNFcLayer.cpp b/paddle/gserver/layers/MKLDNNFcLayer.cpp index c8778bdd07..0c7e6f16e2 100644 --- a/paddle/gserver/layers/MKLDNNFcLayer.cpp +++ b/paddle/gserver/layers/MKLDNNFcLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MKLDNNFcLayer.h b/paddle/gserver/layers/MKLDNNFcLayer.h index 283dc9b540..0d41a4379d 100644 --- a/paddle/gserver/layers/MKLDNNFcLayer.h +++ b/paddle/gserver/layers/MKLDNNFcLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MKLDNNLRNLayer.cpp b/paddle/gserver/layers/MKLDNNLRNLayer.cpp index ac217f1363..88513ab8bc 100644 --- a/paddle/gserver/layers/MKLDNNLRNLayer.cpp +++ b/paddle/gserver/layers/MKLDNNLRNLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MKLDNNLRNLayer.h b/paddle/gserver/layers/MKLDNNLRNLayer.h index cfe5621252..b503ee5594 100644 --- a/paddle/gserver/layers/MKLDNNLRNLayer.h +++ b/paddle/gserver/layers/MKLDNNLRNLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MKLDNNLayer.cpp b/paddle/gserver/layers/MKLDNNLayer.cpp index 2d0fff608c..f0acffe871 100644 --- a/paddle/gserver/layers/MKLDNNLayer.cpp +++ b/paddle/gserver/layers/MKLDNNLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MKLDNNLayer.h b/paddle/gserver/layers/MKLDNNLayer.h index 3ba39f18b6..4a7eb74ce3 100644 --- a/paddle/gserver/layers/MKLDNNLayer.h +++ b/paddle/gserver/layers/MKLDNNLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MKLDNNPoolLayer.cpp b/paddle/gserver/layers/MKLDNNPoolLayer.cpp index a8252593c8..3be848c749 100644 --- a/paddle/gserver/layers/MKLDNNPoolLayer.cpp +++ b/paddle/gserver/layers/MKLDNNPoolLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MKLDNNPoolLayer.h b/paddle/gserver/layers/MKLDNNPoolLayer.h index dad60156f0..12821cda73 100644 --- a/paddle/gserver/layers/MKLDNNPoolLayer.h +++ b/paddle/gserver/layers/MKLDNNPoolLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MKLPackedRecurrentLayer.cpp b/paddle/gserver/layers/MKLPackedRecurrentLayer.cpp index dd75555fae..d928ebc324 100644 --- a/paddle/gserver/layers/MKLPackedRecurrentLayer.cpp +++ b/paddle/gserver/layers/MKLPackedRecurrentLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MKLPackedRecurrentLayer.h b/paddle/gserver/layers/MKLPackedRecurrentLayer.h index bded523a8f..37eb362d45 100644 --- a/paddle/gserver/layers/MKLPackedRecurrentLayer.h +++ b/paddle/gserver/layers/MKLPackedRecurrentLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MKLPackedWeight.h b/paddle/gserver/layers/MKLPackedWeight.h index 15d5093beb..28b8a7db7c 100644 --- a/paddle/gserver/layers/MKLPackedWeight.h +++ b/paddle/gserver/layers/MKLPackedWeight.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MaxIdLayer.cpp b/paddle/gserver/layers/MaxIdLayer.cpp index 9e72b167cd..84e375d744 100644 --- a/paddle/gserver/layers/MaxIdLayer.cpp +++ b/paddle/gserver/layers/MaxIdLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MaxLayer.cpp b/paddle/gserver/layers/MaxLayer.cpp index 23629e1986..7ee2e0dd94 100644 --- a/paddle/gserver/layers/MaxLayer.cpp +++ b/paddle/gserver/layers/MaxLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MaxLayer.h b/paddle/gserver/layers/MaxLayer.h index fa536fce2b..9dbc672652 100644 --- a/paddle/gserver/layers/MaxLayer.h +++ b/paddle/gserver/layers/MaxLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MaxOutLayer.cpp b/paddle/gserver/layers/MaxOutLayer.cpp index 3a86a95321..919f62a45b 100644 --- a/paddle/gserver/layers/MaxOutLayer.cpp +++ b/paddle/gserver/layers/MaxOutLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MaxOutLayer.h b/paddle/gserver/layers/MaxOutLayer.h index 73fd8536be..1fb371836b 100644 --- a/paddle/gserver/layers/MaxOutLayer.h +++ b/paddle/gserver/layers/MaxOutLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MaxPoolWithMaskLayer.cpp b/paddle/gserver/layers/MaxPoolWithMaskLayer.cpp index d810a58d9a..e594e22b5e 100644 --- a/paddle/gserver/layers/MaxPoolWithMaskLayer.cpp +++ b/paddle/gserver/layers/MaxPoolWithMaskLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MaxPoolWithMaskLayer.h b/paddle/gserver/layers/MaxPoolWithMaskLayer.h index e0174add9d..74cc8acf35 100644 --- a/paddle/gserver/layers/MaxPoolWithMaskLayer.h +++ b/paddle/gserver/layers/MaxPoolWithMaskLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MixedLayer.cpp b/paddle/gserver/layers/MixedLayer.cpp index 2525b1984b..7dcb30b98d 100644 --- a/paddle/gserver/layers/MixedLayer.cpp +++ b/paddle/gserver/layers/MixedLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MixedLayer.h b/paddle/gserver/layers/MixedLayer.h index 755c9deb8b..a1a43c52e4 100644 --- a/paddle/gserver/layers/MixedLayer.h +++ b/paddle/gserver/layers/MixedLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MultiBoxLossLayer.cpp b/paddle/gserver/layers/MultiBoxLossLayer.cpp index bbf1166dce..335e9a6ac4 100644 --- a/paddle/gserver/layers/MultiBoxLossLayer.cpp +++ b/paddle/gserver/layers/MultiBoxLossLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MultinomialSampler.cpp b/paddle/gserver/layers/MultinomialSampler.cpp index 0b285ed20f..e74ed795a1 100644 --- a/paddle/gserver/layers/MultinomialSampler.cpp +++ b/paddle/gserver/layers/MultinomialSampler.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MultinomialSampler.h b/paddle/gserver/layers/MultinomialSampler.h index 546ef9c1f2..1f9e818ee5 100644 --- a/paddle/gserver/layers/MultinomialSampler.h +++ b/paddle/gserver/layers/MultinomialSampler.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/MultiplexLayer.cpp b/paddle/gserver/layers/MultiplexLayer.cpp index 297972b3cd..82857f8c3e 100644 --- a/paddle/gserver/layers/MultiplexLayer.cpp +++ b/paddle/gserver/layers/MultiplexLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/NCELayer.cpp b/paddle/gserver/layers/NCELayer.cpp index 0bc2ef1182..d3d7b1fd9a 100644 --- a/paddle/gserver/layers/NCELayer.cpp +++ b/paddle/gserver/layers/NCELayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/NormLayer.cpp b/paddle/gserver/layers/NormLayer.cpp index caef710092..4678f6fa9a 100644 --- a/paddle/gserver/layers/NormLayer.cpp +++ b/paddle/gserver/layers/NormLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/NormLayer.h b/paddle/gserver/layers/NormLayer.h index 7c238ac944..c89cbbfce9 100644 --- a/paddle/gserver/layers/NormLayer.h +++ b/paddle/gserver/layers/NormLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/NormProjectionLayer.cpp b/paddle/gserver/layers/NormProjectionLayer.cpp index 4331009de7..3013bbdbc7 100644 --- a/paddle/gserver/layers/NormProjectionLayer.cpp +++ b/paddle/gserver/layers/NormProjectionLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/NormProjectionLayer.h b/paddle/gserver/layers/NormProjectionLayer.h index 2997ae8848..898b5823a9 100644 --- a/paddle/gserver/layers/NormProjectionLayer.h +++ b/paddle/gserver/layers/NormProjectionLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/Operator.cpp b/paddle/gserver/layers/Operator.cpp index a638933914..5b9cf8d15d 100644 --- a/paddle/gserver/layers/Operator.cpp +++ b/paddle/gserver/layers/Operator.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/Operator.h b/paddle/gserver/layers/Operator.h index 6fd331382f..a620926ccc 100644 --- a/paddle/gserver/layers/Operator.h +++ b/paddle/gserver/layers/Operator.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/OuterProdLayer.cpp b/paddle/gserver/layers/OuterProdLayer.cpp index 283fdb003a..75f4abf93e 100644 --- a/paddle/gserver/layers/OuterProdLayer.cpp +++ b/paddle/gserver/layers/OuterProdLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/PadLayer.cpp b/paddle/gserver/layers/PadLayer.cpp index a5ed7e057a..b1910e108b 100644 --- a/paddle/gserver/layers/PadLayer.cpp +++ b/paddle/gserver/layers/PadLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/PadLayer.h b/paddle/gserver/layers/PadLayer.h index fe9388d8cc..7e09d7f8a0 100644 --- a/paddle/gserver/layers/PadLayer.h +++ b/paddle/gserver/layers/PadLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ParameterReluLayer.cpp b/paddle/gserver/layers/ParameterReluLayer.cpp index 836c1981ba..12d04fc1c3 100644 --- a/paddle/gserver/layers/ParameterReluLayer.cpp +++ b/paddle/gserver/layers/ParameterReluLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ParameterReluLayer.h b/paddle/gserver/layers/ParameterReluLayer.h index 9a11b81ebf..3725fa4a11 100644 --- a/paddle/gserver/layers/ParameterReluLayer.h +++ b/paddle/gserver/layers/ParameterReluLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/Pool3DLayer.cpp b/paddle/gserver/layers/Pool3DLayer.cpp index 199f21adb1..3ac9eb0d81 100644 --- a/paddle/gserver/layers/Pool3DLayer.cpp +++ b/paddle/gserver/layers/Pool3DLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/Pool3DLayer.h b/paddle/gserver/layers/Pool3DLayer.h index 8329a02f57..59ee73f7cb 100644 --- a/paddle/gserver/layers/Pool3DLayer.h +++ b/paddle/gserver/layers/Pool3DLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/PoolLayer.cpp b/paddle/gserver/layers/PoolLayer.cpp index fceb389d06..ee589e6be5 100644 --- a/paddle/gserver/layers/PoolLayer.cpp +++ b/paddle/gserver/layers/PoolLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/PoolLayer.h b/paddle/gserver/layers/PoolLayer.h index 9df672a935..58d5fb0a09 100644 --- a/paddle/gserver/layers/PoolLayer.h +++ b/paddle/gserver/layers/PoolLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/PoolProjection.cpp b/paddle/gserver/layers/PoolProjection.cpp index 6a9de394ce..73ce88adf2 100644 --- a/paddle/gserver/layers/PoolProjection.cpp +++ b/paddle/gserver/layers/PoolProjection.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/PoolProjection.h b/paddle/gserver/layers/PoolProjection.h index a0412714bc..c99287dbf0 100644 --- a/paddle/gserver/layers/PoolProjection.h +++ b/paddle/gserver/layers/PoolProjection.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/PoolProjectionLayer.cpp b/paddle/gserver/layers/PoolProjectionLayer.cpp index ed5011ab89..73d320e67e 100644 --- a/paddle/gserver/layers/PoolProjectionLayer.cpp +++ b/paddle/gserver/layers/PoolProjectionLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/PoolProjectionLayer.h b/paddle/gserver/layers/PoolProjectionLayer.h index e31116de8c..5a97a7769a 100644 --- a/paddle/gserver/layers/PoolProjectionLayer.h +++ b/paddle/gserver/layers/PoolProjectionLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/PowerLayer.cpp b/paddle/gserver/layers/PowerLayer.cpp index 31c34b43e2..18f650fcda 100644 --- a/paddle/gserver/layers/PowerLayer.cpp +++ b/paddle/gserver/layers/PowerLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/PrintLayer.cpp b/paddle/gserver/layers/PrintLayer.cpp index e83ae34bbe..5a527d598d 100644 --- a/paddle/gserver/layers/PrintLayer.cpp +++ b/paddle/gserver/layers/PrintLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/PriorBox.cpp b/paddle/gserver/layers/PriorBox.cpp index 8faf032f55..af2cc05a95 100644 --- a/paddle/gserver/layers/PriorBox.cpp +++ b/paddle/gserver/layers/PriorBox.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/Projection.cpp b/paddle/gserver/layers/Projection.cpp index 974b3cf059..96d61e7f67 100644 --- a/paddle/gserver/layers/Projection.cpp +++ b/paddle/gserver/layers/Projection.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/Projection.h b/paddle/gserver/layers/Projection.h index 778a7fe13d..1f0b96c79e 100644 --- a/paddle/gserver/layers/Projection.h +++ b/paddle/gserver/layers/Projection.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ROIPoolLayer.cpp b/paddle/gserver/layers/ROIPoolLayer.cpp index 7d7c30b4d8..b5cbc0c704 100644 --- a/paddle/gserver/layers/ROIPoolLayer.cpp +++ b/paddle/gserver/layers/ROIPoolLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ROIPoolLayer.h b/paddle/gserver/layers/ROIPoolLayer.h index 4f07e49d6f..b1735e9748 100644 --- a/paddle/gserver/layers/ROIPoolLayer.h +++ b/paddle/gserver/layers/ROIPoolLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/RecurrentLayer.cpp b/paddle/gserver/layers/RecurrentLayer.cpp index 6bd42c06ca..3fc5bd15ed 100644 --- a/paddle/gserver/layers/RecurrentLayer.cpp +++ b/paddle/gserver/layers/RecurrentLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/RecurrentLayer.h b/paddle/gserver/layers/RecurrentLayer.h index f40dbe150f..8fd4fe6b78 100644 --- a/paddle/gserver/layers/RecurrentLayer.h +++ b/paddle/gserver/layers/RecurrentLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/RecurrentLayerGroup.cpp b/paddle/gserver/layers/RecurrentLayerGroup.cpp index 78a74ff19a..27e8b5868e 100644 --- a/paddle/gserver/layers/RecurrentLayerGroup.cpp +++ b/paddle/gserver/layers/RecurrentLayerGroup.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ResizeLayer.cpp b/paddle/gserver/layers/ResizeLayer.cpp index eb3b63c106..831f4c3b7e 100644 --- a/paddle/gserver/layers/ResizeLayer.cpp +++ b/paddle/gserver/layers/ResizeLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/RotateLayer.cpp b/paddle/gserver/layers/RotateLayer.cpp index 7c71088d78..f205d1a919 100644 --- a/paddle/gserver/layers/RotateLayer.cpp +++ b/paddle/gserver/layers/RotateLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/RotateLayer.h b/paddle/gserver/layers/RotateLayer.h index d05c2065cb..3b619921ab 100644 --- a/paddle/gserver/layers/RotateLayer.h +++ b/paddle/gserver/layers/RotateLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/RowConvLayer.cpp b/paddle/gserver/layers/RowConvLayer.cpp index 54d77999ad..63b499e486 100644 --- a/paddle/gserver/layers/RowConvLayer.cpp +++ b/paddle/gserver/layers/RowConvLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/RowConvLayer.h b/paddle/gserver/layers/RowConvLayer.h index b3bdda2f35..ba0af1de68 100644 --- a/paddle/gserver/layers/RowConvLayer.h +++ b/paddle/gserver/layers/RowConvLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/RowL2NormLayer.cpp b/paddle/gserver/layers/RowL2NormLayer.cpp index 0d609be43b..7ff0c9bae9 100644 --- a/paddle/gserver/layers/RowL2NormLayer.cpp +++ b/paddle/gserver/layers/RowL2NormLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/SamplingIdLayer.cpp b/paddle/gserver/layers/SamplingIdLayer.cpp index 2538d99bb7..2edd915d22 100644 --- a/paddle/gserver/layers/SamplingIdLayer.cpp +++ b/paddle/gserver/layers/SamplingIdLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ScaleShiftLayer.cpp b/paddle/gserver/layers/ScaleShiftLayer.cpp index 35fd038ab4..799d1fe51a 100644 --- a/paddle/gserver/layers/ScaleShiftLayer.cpp +++ b/paddle/gserver/layers/ScaleShiftLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ScaleSubRegionLayer.cpp b/paddle/gserver/layers/ScaleSubRegionLayer.cpp index aa6778aef4..68a0ff7358 100644 --- a/paddle/gserver/layers/ScaleSubRegionLayer.cpp +++ b/paddle/gserver/layers/ScaleSubRegionLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ScaleSubRegionLayer.h b/paddle/gserver/layers/ScaleSubRegionLayer.h index a27c56de93..6e861be485 100644 --- a/paddle/gserver/layers/ScaleSubRegionLayer.h +++ b/paddle/gserver/layers/ScaleSubRegionLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ScalingLayer.cpp b/paddle/gserver/layers/ScalingLayer.cpp index a38ee0857a..1d98a7373d 100644 --- a/paddle/gserver/layers/ScalingLayer.cpp +++ b/paddle/gserver/layers/ScalingLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ScalingProjection.cpp b/paddle/gserver/layers/ScalingProjection.cpp index ddb8c87110..99b5b68f54 100644 --- a/paddle/gserver/layers/ScalingProjection.cpp +++ b/paddle/gserver/layers/ScalingProjection.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/SelectiveFullyConnectedLayer.cpp b/paddle/gserver/layers/SelectiveFullyConnectedLayer.cpp index d9a91de8a6..43c98993f3 100644 --- a/paddle/gserver/layers/SelectiveFullyConnectedLayer.cpp +++ b/paddle/gserver/layers/SelectiveFullyConnectedLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/SelectiveFullyConnectedLayer.h b/paddle/gserver/layers/SelectiveFullyConnectedLayer.h index 99126fdba5..8156407418 100644 --- a/paddle/gserver/layers/SelectiveFullyConnectedLayer.h +++ b/paddle/gserver/layers/SelectiveFullyConnectedLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/SequenceConcatLayer.cpp b/paddle/gserver/layers/SequenceConcatLayer.cpp index 4b24d8f0c8..cf573f3f33 100644 --- a/paddle/gserver/layers/SequenceConcatLayer.cpp +++ b/paddle/gserver/layers/SequenceConcatLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/SequenceLastInstanceLayer.cpp b/paddle/gserver/layers/SequenceLastInstanceLayer.cpp index 323cc47df1..6c4ae775c1 100644 --- a/paddle/gserver/layers/SequenceLastInstanceLayer.cpp +++ b/paddle/gserver/layers/SequenceLastInstanceLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/SequencePoolLayer.cpp b/paddle/gserver/layers/SequencePoolLayer.cpp index 2a693b110a..650ab425d1 100644 --- a/paddle/gserver/layers/SequencePoolLayer.cpp +++ b/paddle/gserver/layers/SequencePoolLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/SequencePoolLayer.h b/paddle/gserver/layers/SequencePoolLayer.h index e207afd1dc..254e4cc6b3 100644 --- a/paddle/gserver/layers/SequencePoolLayer.h +++ b/paddle/gserver/layers/SequencePoolLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/SequenceReshapeLayer.cpp b/paddle/gserver/layers/SequenceReshapeLayer.cpp index 8229744072..fb96669917 100644 --- a/paddle/gserver/layers/SequenceReshapeLayer.cpp +++ b/paddle/gserver/layers/SequenceReshapeLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/SequenceSliceLayer.cpp b/paddle/gserver/layers/SequenceSliceLayer.cpp index ce68ca4494..1b7c33477e 100644 --- a/paddle/gserver/layers/SequenceSliceLayer.cpp +++ b/paddle/gserver/layers/SequenceSliceLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/SequenceToBatch.cpp b/paddle/gserver/layers/SequenceToBatch.cpp index 6b769378d2..5d0d588e67 100644 --- a/paddle/gserver/layers/SequenceToBatch.cpp +++ b/paddle/gserver/layers/SequenceToBatch.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/SequenceToBatch.h b/paddle/gserver/layers/SequenceToBatch.h index 17e735a135..8743a5ef10 100644 --- a/paddle/gserver/layers/SequenceToBatch.h +++ b/paddle/gserver/layers/SequenceToBatch.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/SliceProjection.cpp b/paddle/gserver/layers/SliceProjection.cpp index 267dd6154b..5627ad1eb3 100644 --- a/paddle/gserver/layers/SliceProjection.cpp +++ b/paddle/gserver/layers/SliceProjection.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/SlopeInterceptLayer.cpp b/paddle/gserver/layers/SlopeInterceptLayer.cpp index faf98744a7..c94a07e5da 100644 --- a/paddle/gserver/layers/SlopeInterceptLayer.cpp +++ b/paddle/gserver/layers/SlopeInterceptLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/SpatialPyramidPoolLayer.cpp b/paddle/gserver/layers/SpatialPyramidPoolLayer.cpp index 14fe88ff8a..b445a399ef 100644 --- a/paddle/gserver/layers/SpatialPyramidPoolLayer.cpp +++ b/paddle/gserver/layers/SpatialPyramidPoolLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/SpatialPyramidPoolLayer.h b/paddle/gserver/layers/SpatialPyramidPoolLayer.h index 7d3cb80443..6cb5fdf83e 100644 --- a/paddle/gserver/layers/SpatialPyramidPoolLayer.h +++ b/paddle/gserver/layers/SpatialPyramidPoolLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/SubNestedSequenceLayer.cpp b/paddle/gserver/layers/SubNestedSequenceLayer.cpp index e9bee77212..db240ab0c9 100644 --- a/paddle/gserver/layers/SubNestedSequenceLayer.cpp +++ b/paddle/gserver/layers/SubNestedSequenceLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/SubSequenceLayer.cpp b/paddle/gserver/layers/SubSequenceLayer.cpp index 00d8ce017a..808627f092 100644 --- a/paddle/gserver/layers/SubSequenceLayer.cpp +++ b/paddle/gserver/layers/SubSequenceLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/SumToOneNormLayer.cpp b/paddle/gserver/layers/SumToOneNormLayer.cpp index 00f8519550..ffbe149253 100644 --- a/paddle/gserver/layers/SumToOneNormLayer.cpp +++ b/paddle/gserver/layers/SumToOneNormLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/SwitchOrderLayer.cpp b/paddle/gserver/layers/SwitchOrderLayer.cpp index e97809141a..704735de38 100644 --- a/paddle/gserver/layers/SwitchOrderLayer.cpp +++ b/paddle/gserver/layers/SwitchOrderLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/SwitchOrderLayer.h b/paddle/gserver/layers/SwitchOrderLayer.h index 47b1f7f73e..882437f443 100644 --- a/paddle/gserver/layers/SwitchOrderLayer.h +++ b/paddle/gserver/layers/SwitchOrderLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/TableProjection.cpp b/paddle/gserver/layers/TableProjection.cpp index 270acdd34b..326e241d07 100644 --- a/paddle/gserver/layers/TableProjection.cpp +++ b/paddle/gserver/layers/TableProjection.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/TableProjection.h b/paddle/gserver/layers/TableProjection.h index fb6c0e17c2..ffb05e68f0 100644 --- a/paddle/gserver/layers/TableProjection.h +++ b/paddle/gserver/layers/TableProjection.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/TensorLayer.cpp b/paddle/gserver/layers/TensorLayer.cpp index 5be88d7c05..b2271c63ef 100644 --- a/paddle/gserver/layers/TensorLayer.cpp +++ b/paddle/gserver/layers/TensorLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/TensorLayer.h b/paddle/gserver/layers/TensorLayer.h index 43992f692d..8a323aa15f 100644 --- a/paddle/gserver/layers/TensorLayer.h +++ b/paddle/gserver/layers/TensorLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/TransLayer.cpp b/paddle/gserver/layers/TransLayer.cpp index 4150f1727d..cf87ca53d1 100644 --- a/paddle/gserver/layers/TransLayer.cpp +++ b/paddle/gserver/layers/TransLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/TransLayer.h b/paddle/gserver/layers/TransLayer.h index be10bb74f6..03d0948624 100644 --- a/paddle/gserver/layers/TransLayer.h +++ b/paddle/gserver/layers/TransLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/TransposedFullMatrixProjection.cpp b/paddle/gserver/layers/TransposedFullMatrixProjection.cpp index 2a12499e5b..755389f707 100644 --- a/paddle/gserver/layers/TransposedFullMatrixProjection.cpp +++ b/paddle/gserver/layers/TransposedFullMatrixProjection.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ValidationLayer.cpp b/paddle/gserver/layers/ValidationLayer.cpp index 5127bcaba3..b626825a7b 100644 --- a/paddle/gserver/layers/ValidationLayer.cpp +++ b/paddle/gserver/layers/ValidationLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/ValidationLayer.h b/paddle/gserver/layers/ValidationLayer.h index c8b2634a13..f412d685c0 100644 --- a/paddle/gserver/layers/ValidationLayer.h +++ b/paddle/gserver/layers/ValidationLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/WarpCTCLayer.cpp b/paddle/gserver/layers/WarpCTCLayer.cpp index 94e926a8d8..6b1656a523 100644 --- a/paddle/gserver/layers/WarpCTCLayer.cpp +++ b/paddle/gserver/layers/WarpCTCLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/layers/WarpCTCLayer.h b/paddle/gserver/layers/WarpCTCLayer.h index 7e8d7379d2..6f6be359c0 100644 --- a/paddle/gserver/layers/WarpCTCLayer.h +++ b/paddle/gserver/layers/WarpCTCLayer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/LayerGradUtil.cpp b/paddle/gserver/tests/LayerGradUtil.cpp index cd957c7c0b..f08c1cd1d5 100644 --- a/paddle/gserver/tests/LayerGradUtil.cpp +++ b/paddle/gserver/tests/LayerGradUtil.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/LayerGradUtil.h b/paddle/gserver/tests/LayerGradUtil.h index e10a27eedf..1999b2204b 100644 --- a/paddle/gserver/tests/LayerGradUtil.h +++ b/paddle/gserver/tests/LayerGradUtil.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/MKLDNNTester.cpp b/paddle/gserver/tests/MKLDNNTester.cpp index afe1608eab..d2a9761a4e 100644 --- a/paddle/gserver/tests/MKLDNNTester.cpp +++ b/paddle/gserver/tests/MKLDNNTester.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/MKLDNNTester.h b/paddle/gserver/tests/MKLDNNTester.h index 9d61533c0b..c1faa6fd90 100644 --- a/paddle/gserver/tests/MKLDNNTester.h +++ b/paddle/gserver/tests/MKLDNNTester.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/img_conv_cudnn.py b/paddle/gserver/tests/img_conv_cudnn.py index 0ea6d6bae6..fd889ee1ce 100644 --- a/paddle/gserver/tests/img_conv_cudnn.py +++ b/paddle/gserver/tests/img_conv_cudnn.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/img_conv_exconv.py b/paddle/gserver/tests/img_conv_exconv.py index c618cdab27..5aca6da5ac 100644 --- a/paddle/gserver/tests/img_conv_exconv.py +++ b/paddle/gserver/tests/img_conv_exconv.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/pyDataProvider.py b/paddle/gserver/tests/pyDataProvider.py index d2ad5888b5..85ea90d6ee 100644 --- a/paddle/gserver/tests/pyDataProvider.py +++ b/paddle/gserver/tests/pyDataProvider.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/rnn_data_provider.py b/paddle/gserver/tests/rnn_data_provider.py index 063a4127e5..18b2191f44 100644 --- a/paddle/gserver/tests/rnn_data_provider.py +++ b/paddle/gserver/tests/rnn_data_provider.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/sequenceGen.py b/paddle/gserver/tests/sequenceGen.py index 04a1732d61..d5ec8ac23f 100644 --- a/paddle/gserver/tests/sequenceGen.py +++ b/paddle/gserver/tests/sequenceGen.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py b/paddle/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py index aeaaa221f9..569d3c094b 100644 --- a/paddle/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py +++ b/paddle/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/sequence_recurrent.py b/paddle/gserver/tests/sequence_recurrent.py index 8786a5465d..b88c09084e 100644 --- a/paddle/gserver/tests/sequence_recurrent.py +++ b/paddle/gserver/tests/sequence_recurrent.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/sequence_recurrent_group.py b/paddle/gserver/tests/sequence_recurrent_group.py index 8b5a3d4983..0daf746700 100644 --- a/paddle/gserver/tests/sequence_recurrent_group.py +++ b/paddle/gserver/tests/sequence_recurrent_group.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/sequence_rnn_matched_inputs.py b/paddle/gserver/tests/sequence_rnn_matched_inputs.py index 0c55f2cf9d..41a581e0cc 100644 --- a/paddle/gserver/tests/sequence_rnn_matched_inputs.py +++ b/paddle/gserver/tests/sequence_rnn_matched_inputs.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/sequence_rnn_mixed_inputs.py b/paddle/gserver/tests/sequence_rnn_mixed_inputs.py index 22b376b91a..ae89d8e2bb 100644 --- a/paddle/gserver/tests/sequence_rnn_mixed_inputs.py +++ b/paddle/gserver/tests/sequence_rnn_mixed_inputs.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/sequence_rnn_multi_unequalength_inputs.py b/paddle/gserver/tests/sequence_rnn_multi_unequalength_inputs.py index 3ce87490bb..6473fb3f3e 100644 --- a/paddle/gserver/tests/sequence_rnn_multi_unequalength_inputs.py +++ b/paddle/gserver/tests/sequence_rnn_multi_unequalength_inputs.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/test_ActivationGrad.cpp b/paddle/gserver/tests/test_ActivationGrad.cpp index f4c2a07c44..b5e4af26dc 100644 --- a/paddle/gserver/tests/test_ActivationGrad.cpp +++ b/paddle/gserver/tests/test_ActivationGrad.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/test_BatchNorm.cpp b/paddle/gserver/tests/test_BatchNorm.cpp index 41116f4809..a3ec66c758 100644 --- a/paddle/gserver/tests/test_BatchNorm.cpp +++ b/paddle/gserver/tests/test_BatchNorm.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/test_CRFLayerGrad.cpp b/paddle/gserver/tests/test_CRFLayerGrad.cpp index f010066ebc..9f3d293656 100644 --- a/paddle/gserver/tests/test_CRFLayerGrad.cpp +++ b/paddle/gserver/tests/test_CRFLayerGrad.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/test_CompareSparse.cpp b/paddle/gserver/tests/test_CompareSparse.cpp index 2495d8b60a..2fbc404125 100644 --- a/paddle/gserver/tests/test_CompareSparse.cpp +++ b/paddle/gserver/tests/test_CompareSparse.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/test_CompareTwoNets.cpp b/paddle/gserver/tests/test_CompareTwoNets.cpp index 801d960756..1c9b4002a3 100644 --- a/paddle/gserver/tests/test_CompareTwoNets.cpp +++ b/paddle/gserver/tests/test_CompareTwoNets.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/test_ConvTrans.cpp b/paddle/gserver/tests/test_ConvTrans.cpp index 5f2f966547..2e394a74b7 100644 --- a/paddle/gserver/tests/test_ConvTrans.cpp +++ b/paddle/gserver/tests/test_ConvTrans.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/test_ConvUnify.cpp b/paddle/gserver/tests/test_ConvUnify.cpp index 8634355b52..ba820d9a2a 100644 --- a/paddle/gserver/tests/test_ConvUnify.cpp +++ b/paddle/gserver/tests/test_ConvUnify.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp b/paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp index 477638426f..0041ed3093 100644 --- a/paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp +++ b/paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/test_DetectionOutput.cpp b/paddle/gserver/tests/test_DetectionOutput.cpp index dc39c97a87..4865214265 100644 --- a/paddle/gserver/tests/test_DetectionOutput.cpp +++ b/paddle/gserver/tests/test_DetectionOutput.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/test_Evaluator.cpp b/paddle/gserver/tests/test_Evaluator.cpp index 62a131171f..4a8843f3af 100644 --- a/paddle/gserver/tests/test_Evaluator.cpp +++ b/paddle/gserver/tests/test_Evaluator.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/test_Expand.cpp b/paddle/gserver/tests/test_Expand.cpp index d32bf0152f..fa1c86d13f 100644 --- a/paddle/gserver/tests/test_Expand.cpp +++ b/paddle/gserver/tests/test_Expand.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/test_KmaxSeqScore.cpp b/paddle/gserver/tests/test_KmaxSeqScore.cpp index ffe5cfb8db..168ffbdac8 100644 --- a/paddle/gserver/tests/test_KmaxSeqScore.cpp +++ b/paddle/gserver/tests/test_KmaxSeqScore.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index aab02f1684..1254d58050 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/test_LinearChainCRF.cpp b/paddle/gserver/tests/test_LinearChainCRF.cpp index b37277054c..423c31e27d 100644 --- a/paddle/gserver/tests/test_LinearChainCRF.cpp +++ b/paddle/gserver/tests/test_LinearChainCRF.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/test_MKLDNN.cpp b/paddle/gserver/tests/test_MKLDNN.cpp index ad1dbc3ee2..a34a3f6206 100644 --- a/paddle/gserver/tests/test_MKLDNN.cpp +++ b/paddle/gserver/tests/test_MKLDNN.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/test_MaxPoolingWithMaskOutput.cpp b/paddle/gserver/tests/test_MaxPoolingWithMaskOutput.cpp index 16438886df..5188d2abed 100644 --- a/paddle/gserver/tests/test_MaxPoolingWithMaskOutput.cpp +++ b/paddle/gserver/tests/test_MaxPoolingWithMaskOutput.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/test_MultinomialSampler.cpp b/paddle/gserver/tests/test_MultinomialSampler.cpp index eadf40ade0..4a295ea9d5 100644 --- a/paddle/gserver/tests/test_MultinomialSampler.cpp +++ b/paddle/gserver/tests/test_MultinomialSampler.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/test_NetworkCompare.cpp b/paddle/gserver/tests/test_NetworkCompare.cpp index 2b92211936..fda3f2f793 100644 --- a/paddle/gserver/tests/test_NetworkCompare.cpp +++ b/paddle/gserver/tests/test_NetworkCompare.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/test_PriorBox.cpp b/paddle/gserver/tests/test_PriorBox.cpp index 8dc5568784..10d512ec45 100644 --- a/paddle/gserver/tests/test_PriorBox.cpp +++ b/paddle/gserver/tests/test_PriorBox.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/test_PyDataProvider.cpp b/paddle/gserver/tests/test_PyDataProvider.cpp index fe54799259..a1dee97950 100644 --- a/paddle/gserver/tests/test_PyDataProvider.cpp +++ b/paddle/gserver/tests/test_PyDataProvider.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/test_PyDataProvider2.cpp b/paddle/gserver/tests/test_PyDataProvider2.cpp index 7e193eb31a..b39fb35345 100644 --- a/paddle/gserver/tests/test_PyDataProvider2.cpp +++ b/paddle/gserver/tests/test_PyDataProvider2.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/test_PyDataProvider2.py b/paddle/gserver/tests/test_PyDataProvider2.py index 044aede98e..461d80b9e6 100644 --- a/paddle/gserver/tests/test_PyDataProvider2.py +++ b/paddle/gserver/tests/test_PyDataProvider2.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/test_RecurrentGradientMachine.cpp b/paddle/gserver/tests/test_RecurrentGradientMachine.cpp index 6b19eb0ce5..72324fcf29 100644 --- a/paddle/gserver/tests/test_RecurrentGradientMachine.cpp +++ b/paddle/gserver/tests/test_RecurrentGradientMachine.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/test_RecurrentLayer.cpp b/paddle/gserver/tests/test_RecurrentLayer.cpp index 0e13084333..e5ce922f15 100644 --- a/paddle/gserver/tests/test_RecurrentLayer.cpp +++ b/paddle/gserver/tests/test_RecurrentLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/test_SelectiveFCLayer.cpp b/paddle/gserver/tests/test_SelectiveFCLayer.cpp index d164e382c4..583e3bc545 100644 --- a/paddle/gserver/tests/test_SelectiveFCLayer.cpp +++ b/paddle/gserver/tests/test_SelectiveFCLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp b/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp index 3dbffc5634..406ca63b6e 100644 --- a/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp +++ b/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/gserver/tests/test_WarpCTCLayer.cpp b/paddle/gserver/tests/test_WarpCTCLayer.cpp index da82946006..f2299d7da2 100644 --- a/paddle/gserver/tests/test_WarpCTCLayer.cpp +++ b/paddle/gserver/tests/test_WarpCTCLayer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/Allocator.h b/paddle/math/Allocator.h index 17563bf5e1..ae60f6fe5f 100644 --- a/paddle/math/Allocator.h +++ b/paddle/math/Allocator.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/BaseMatrix.cu b/paddle/math/BaseMatrix.cu index e3eff59dc5..7b57419e5a 100644 --- a/paddle/math/BaseMatrix.cu +++ b/paddle/math/BaseMatrix.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/BaseMatrix.h b/paddle/math/BaseMatrix.h index 12ad2d45a0..00ce5a1949 100644 --- a/paddle/math/BaseMatrix.h +++ b/paddle/math/BaseMatrix.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/CpuSparseMatrix.cpp b/paddle/math/CpuSparseMatrix.cpp index dc6979cf5a..023450ffb7 100644 --- a/paddle/math/CpuSparseMatrix.cpp +++ b/paddle/math/CpuSparseMatrix.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/CpuSparseMatrix.h b/paddle/math/CpuSparseMatrix.h index 522b436a2a..22b6b71688 100644 --- a/paddle/math/CpuSparseMatrix.h +++ b/paddle/math/CpuSparseMatrix.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/ExecViaCpu.h b/paddle/math/ExecViaCpu.h index 1e03cc5f45..9b2a3c2b8a 100644 --- a/paddle/math/ExecViaCpu.h +++ b/paddle/math/ExecViaCpu.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/MKLDNNMatrix.cpp b/paddle/math/MKLDNNMatrix.cpp index a710479bab..52036c5f80 100644 --- a/paddle/math/MKLDNNMatrix.cpp +++ b/paddle/math/MKLDNNMatrix.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/MKLDNNMatrix.h b/paddle/math/MKLDNNMatrix.h index 39d40a1f61..e1fb81679a 100644 --- a/paddle/math/MKLDNNMatrix.h +++ b/paddle/math/MKLDNNMatrix.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/MathFunctions.cpp b/paddle/math/MathFunctions.cpp index 28ab54b450..b2ff4bc323 100644 --- a/paddle/math/MathFunctions.cpp +++ b/paddle/math/MathFunctions.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/MathFunctions.h b/paddle/math/MathFunctions.h index 29fe36e3a4..f4cf6bd6c2 100644 --- a/paddle/math/MathFunctions.h +++ b/paddle/math/MathFunctions.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/MathUtils.cpp b/paddle/math/MathUtils.cpp index 980b6e1388..b2afdbcd51 100644 --- a/paddle/math/MathUtils.cpp +++ b/paddle/math/MathUtils.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/MathUtils.h b/paddle/math/MathUtils.h index f2b2980138..597485d9c5 100644 --- a/paddle/math/MathUtils.h +++ b/paddle/math/MathUtils.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index cc86b12be0..35359d4b5a 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/Matrix.h b/paddle/math/Matrix.h index c8e690e642..631e69edc1 100644 --- a/paddle/math/Matrix.h +++ b/paddle/math/Matrix.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/MatrixBitCode.cpp b/paddle/math/MatrixBitCode.cpp index cea912d3ca..61a9923bc2 100644 --- a/paddle/math/MatrixBitCode.cpp +++ b/paddle/math/MatrixBitCode.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/MemoryHandle.cpp b/paddle/math/MemoryHandle.cpp index 84afb5944c..1563314e92 100644 --- a/paddle/math/MemoryHandle.cpp +++ b/paddle/math/MemoryHandle.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/MemoryHandle.h b/paddle/math/MemoryHandle.h index 0828d377c9..03ee413c12 100644 --- a/paddle/math/MemoryHandle.h +++ b/paddle/math/MemoryHandle.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/NEONFunctions.cpp b/paddle/math/NEONFunctions.cpp index 0f83149422..953d5bb8c8 100644 --- a/paddle/math/NEONFunctions.cpp +++ b/paddle/math/NEONFunctions.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/NEONFunctions.h b/paddle/math/NEONFunctions.h index d67b2f47a8..33edd9d518 100644 --- a/paddle/math/NEONFunctions.h +++ b/paddle/math/NEONFunctions.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/PoolAllocator.cpp b/paddle/math/PoolAllocator.cpp index 4282c7243a..b6ad168856 100644 --- a/paddle/math/PoolAllocator.cpp +++ b/paddle/math/PoolAllocator.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/PoolAllocator.h b/paddle/math/PoolAllocator.h index c06efa9ac7..90141fef3f 100644 --- a/paddle/math/PoolAllocator.h +++ b/paddle/math/PoolAllocator.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/RowBuffer.h b/paddle/math/RowBuffer.h index e457d71f1b..2e4d11a86b 100644 --- a/paddle/math/RowBuffer.h +++ b/paddle/math/RowBuffer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/SIMDFunctions.cpp b/paddle/math/SIMDFunctions.cpp index d66d543a61..3cfc5d6f1e 100644 --- a/paddle/math/SIMDFunctions.cpp +++ b/paddle/math/SIMDFunctions.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/SIMDFunctions.h b/paddle/math/SIMDFunctions.h index 76909720f6..5b1dfea9d3 100644 --- a/paddle/math/SIMDFunctions.h +++ b/paddle/math/SIMDFunctions.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/SparseMatrix.cpp b/paddle/math/SparseMatrix.cpp index 284b68d590..1faa343dbc 100644 --- a/paddle/math/SparseMatrix.cpp +++ b/paddle/math/SparseMatrix.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/SparseMatrix.h b/paddle/math/SparseMatrix.h index e0a3c6d228..7c525f4edf 100644 --- a/paddle/math/SparseMatrix.h +++ b/paddle/math/SparseMatrix.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/SparseRowMatrix.cpp b/paddle/math/SparseRowMatrix.cpp index b086433fe5..4254175aab 100644 --- a/paddle/math/SparseRowMatrix.cpp +++ b/paddle/math/SparseRowMatrix.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/SparseRowMatrix.h b/paddle/math/SparseRowMatrix.h index ca7a6806da..3920de32df 100644 --- a/paddle/math/SparseRowMatrix.h +++ b/paddle/math/SparseRowMatrix.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/Storage.cpp b/paddle/math/Storage.cpp index a2ef731ecb..5982bf2e56 100644 --- a/paddle/math/Storage.cpp +++ b/paddle/math/Storage.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/Storage.h b/paddle/math/Storage.h index 06a66b5f14..ba8f4689a1 100644 --- a/paddle/math/Storage.h +++ b/paddle/math/Storage.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/TensorApply.h b/paddle/math/TensorApply.h index 11c7acb441..7d79cae5a1 100644 --- a/paddle/math/TensorApply.h +++ b/paddle/math/TensorApply.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/TensorAssign.h b/paddle/math/TensorAssign.h index 943fb5649e..113d98c16b 100644 --- a/paddle/math/TensorAssign.h +++ b/paddle/math/TensorAssign.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/TensorEvaluate.h b/paddle/math/TensorEvaluate.h index 687bad3711..2a722016e7 100644 --- a/paddle/math/TensorEvaluate.h +++ b/paddle/math/TensorEvaluate.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/TensorExpression.h b/paddle/math/TensorExpression.h index 6fd60e7f3c..83229ae65d 100644 --- a/paddle/math/TensorExpression.h +++ b/paddle/math/TensorExpression.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/TrainingAlgorithmOp.cu b/paddle/math/TrainingAlgorithmOp.cu index fc746b8533..b844768d3b 100644 --- a/paddle/math/TrainingAlgorithmOp.cu +++ b/paddle/math/TrainingAlgorithmOp.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/TrainingAlgorithmOp.h b/paddle/math/TrainingAlgorithmOp.h index 881a8d72d8..fe40fc2d36 100644 --- a/paddle/math/TrainingAlgorithmOp.h +++ b/paddle/math/TrainingAlgorithmOp.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/Vector.cpp b/paddle/math/Vector.cpp index 346008439c..2a47ed7ef8 100644 --- a/paddle/math/Vector.cpp +++ b/paddle/math/Vector.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/Vector.h b/paddle/math/Vector.h index f965a58092..3efbc769df 100644 --- a/paddle/math/Vector.h +++ b/paddle/math/Vector.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/float16.h b/paddle/math/float16.h index 63248d36f9..b00a85b082 100644 --- a/paddle/math/float16.h +++ b/paddle/math/float16.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/tests/OriginalOptimizerApi.h b/paddle/math/tests/OriginalOptimizerApi.h index 0188372771..e30d784b23 100644 --- a/paddle/math/tests/OriginalOptimizerApi.h +++ b/paddle/math/tests/OriginalOptimizerApi.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/tests/PerfUtils.h b/paddle/math/tests/PerfUtils.h index 9c6a63ce6c..bee2351e2f 100644 --- a/paddle/math/tests/PerfUtils.h +++ b/paddle/math/tests/PerfUtils.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/tests/TensorCheck.h b/paddle/math/tests/TensorCheck.h index b998e5772e..f4332ede36 100644 --- a/paddle/math/tests/TensorCheck.h +++ b/paddle/math/tests/TensorCheck.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/tests/TestUtils.h b/paddle/math/tests/TestUtils.h index 713f407f49..d2b9706432 100644 --- a/paddle/math/tests/TestUtils.h +++ b/paddle/math/tests/TestUtils.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/tests/test_Allocator.cpp b/paddle/math/tests/test_Allocator.cpp index 1fecf659e5..84bc1c1d9e 100644 --- a/paddle/math/tests/test_Allocator.cpp +++ b/paddle/math/tests/test_Allocator.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/tests/test_BaseMatrix.cpp b/paddle/math/tests/test_BaseMatrix.cpp index 1766257860..6f7beb60c8 100644 --- a/paddle/math/tests/test_BaseMatrix.cpp +++ b/paddle/math/tests/test_BaseMatrix.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/tests/test_CpuGpuVector.cpp b/paddle/math/tests/test_CpuGpuVector.cpp index c72f89c824..395541a76a 100644 --- a/paddle/math/tests/test_CpuGpuVector.cpp +++ b/paddle/math/tests/test_CpuGpuVector.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/tests/test_ExecViaCpu.cpp b/paddle/math/tests/test_ExecViaCpu.cpp index 25e0ba11de..513c7b440e 100644 --- a/paddle/math/tests/test_ExecViaCpu.cpp +++ b/paddle/math/tests/test_ExecViaCpu.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/tests/test_FPException.cpp b/paddle/math/tests/test_FPException.cpp index 3836f7fc0f..d87fdcda9e 100644 --- a/paddle/math/tests/test_FPException.cpp +++ b/paddle/math/tests/test_FPException.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/tests/test_GpuProfiler.cpp b/paddle/math/tests/test_GpuProfiler.cpp index d9f146f0d1..828159660b 100644 --- a/paddle/math/tests/test_GpuProfiler.cpp +++ b/paddle/math/tests/test_GpuProfiler.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/tests/test_Matrix.cpp b/paddle/math/tests/test_Matrix.cpp index 2f99fa3581..a9407a31f3 100644 --- a/paddle/math/tests/test_Matrix.cpp +++ b/paddle/math/tests/test_Matrix.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/tests/test_RowBuffer.cpp b/paddle/math/tests/test_RowBuffer.cpp index 8cc4c69a1a..e38de853e0 100644 --- a/paddle/math/tests/test_RowBuffer.cpp +++ b/paddle/math/tests/test_RowBuffer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/tests/test_SIMDFunctions.cpp b/paddle/math/tests/test_SIMDFunctions.cpp index e8f9b26ff2..b692679436 100644 --- a/paddle/math/tests/test_SIMDFunctions.cpp +++ b/paddle/math/tests/test_SIMDFunctions.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/tests/test_SparseMatrix.cpp b/paddle/math/tests/test_SparseMatrix.cpp index 8abbe8d82e..dbcbeb8d50 100644 --- a/paddle/math/tests/test_SparseMatrix.cpp +++ b/paddle/math/tests/test_SparseMatrix.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/tests/test_Tensor.cu b/paddle/math/tests/test_Tensor.cu index d03698dee2..acb2da86d0 100644 --- a/paddle/math/tests/test_Tensor.cu +++ b/paddle/math/tests/test_Tensor.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/tests/test_TrainingAlgorithm.cpp b/paddle/math/tests/test_TrainingAlgorithm.cpp index 5ae0aa036f..fb146176ca 100644 --- a/paddle/math/tests/test_TrainingAlgorithm.cpp +++ b/paddle/math/tests/test_TrainingAlgorithm.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/tests/test_batchTranspose.cpp b/paddle/math/tests/test_batchTranspose.cpp index b70a619764..ccfd6d5aae 100644 --- a/paddle/math/tests/test_batchTranspose.cpp +++ b/paddle/math/tests/test_batchTranspose.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/tests/test_float16.cpp b/paddle/math/tests/test_float16.cpp index 74cc55aa37..64cc43f972 100644 --- a/paddle/math/tests/test_float16.cpp +++ b/paddle/math/tests/test_float16.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/paddle/math/tests/test_float16.cu b/paddle/math/tests/test_float16.cu index 4b520feaaf..3b2d8cfcec 100644 --- a/paddle/math/tests/test_float16.cu +++ b/paddle/math/tests/test_float16.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/paddle/math/tests/test_lazyAssign.cu b/paddle/math/tests/test_lazyAssign.cu index 04f23cff55..cbd74bbfe3 100644 --- a/paddle/math/tests/test_lazyAssign.cu +++ b/paddle/math/tests/test_lazyAssign.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/tests/test_matrixCompare.cpp b/paddle/math/tests/test_matrixCompare.cpp index afb8d9d599..e45ddd433f 100644 --- a/paddle/math/tests/test_matrixCompare.cpp +++ b/paddle/math/tests/test_matrixCompare.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/tests/test_matrixUtil.h b/paddle/math/tests/test_matrixUtil.h index 47f4614746..86297547dc 100644 --- a/paddle/math/tests/test_matrixUtil.h +++ b/paddle/math/tests/test_matrixUtil.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/tests/test_perturbation.cpp b/paddle/math/tests/test_perturbation.cpp index c7c07c817a..ef99dab60a 100644 --- a/paddle/math/tests/test_perturbation.cpp +++ b/paddle/math/tests/test_perturbation.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/math/tests/test_sparseMatrixCompare.cpp b/paddle/math/tests/test_sparseMatrixCompare.cpp index 2b2a391b9d..12647d21a2 100644 --- a/paddle/math/tests/test_sparseMatrixCompare.cpp +++ b/paddle/math/tests/test_sparseMatrixCompare.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/optimizer/adadelta_optimizer.cc b/paddle/optimizer/adadelta_optimizer.cc index 8ca048257e..1faeb0cd31 100644 --- a/paddle/optimizer/adadelta_optimizer.cc +++ b/paddle/optimizer/adadelta_optimizer.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/optimizer/adadelta_optimizer.h b/paddle/optimizer/adadelta_optimizer.h index 48f1ae1750..74df9d54be 100644 --- a/paddle/optimizer/adadelta_optimizer.h +++ b/paddle/optimizer/adadelta_optimizer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/optimizer/adagrad_optimizer.cc b/paddle/optimizer/adagrad_optimizer.cc index c6d39a366a..5ac65dbd72 100644 --- a/paddle/optimizer/adagrad_optimizer.cc +++ b/paddle/optimizer/adagrad_optimizer.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/optimizer/adagrad_optimizer.h b/paddle/optimizer/adagrad_optimizer.h index b0cff061f5..1d58402d78 100644 --- a/paddle/optimizer/adagrad_optimizer.h +++ b/paddle/optimizer/adagrad_optimizer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/optimizer/adam_optimizer.cc b/paddle/optimizer/adam_optimizer.cc index 8a384b59c4..9a4ff5ecc0 100644 --- a/paddle/optimizer/adam_optimizer.cc +++ b/paddle/optimizer/adam_optimizer.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/optimizer/adam_optimizer.h b/paddle/optimizer/adam_optimizer.h index 7df40064df..7977226c86 100644 --- a/paddle/optimizer/adam_optimizer.h +++ b/paddle/optimizer/adam_optimizer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/optimizer/lr_policy.h b/paddle/optimizer/lr_policy.h index 9a44a776f2..14422d1f42 100644 --- a/paddle/optimizer/lr_policy.h +++ b/paddle/optimizer/lr_policy.h @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/optimizer/optimizer.cc b/paddle/optimizer/optimizer.cc index 3af4448436..e583aebd77 100644 --- a/paddle/optimizer/optimizer.cc +++ b/paddle/optimizer/optimizer.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/optimizer/optimizer.h b/paddle/optimizer/optimizer.h index 516e612167..c079de921f 100644 --- a/paddle/optimizer/optimizer.h +++ b/paddle/optimizer/optimizer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/optimizer/parameter_optimizer.cc b/paddle/optimizer/parameter_optimizer.cc index 1603e5fdc8..f9474b315d 100644 --- a/paddle/optimizer/parameter_optimizer.cc +++ b/paddle/optimizer/parameter_optimizer.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/optimizer/parameter_optimizer.h b/paddle/optimizer/parameter_optimizer.h index 1f501c49e1..c7cf8db3ee 100644 --- a/paddle/optimizer/parameter_optimizer.h +++ b/paddle/optimizer/parameter_optimizer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/optimizer/parameter_optimizer_test.cc b/paddle/optimizer/parameter_optimizer_test.cc index 2bcfca55cc..d663e2fd00 100644 --- a/paddle/optimizer/parameter_optimizer_test.cc +++ b/paddle/optimizer/parameter_optimizer_test.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/optimizer/serialization.h b/paddle/optimizer/serialization.h index 98548ddb7a..bf12eed15f 100644 --- a/paddle/optimizer/serialization.h +++ b/paddle/optimizer/serialization.h @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/optimizer/serialization_test.cc b/paddle/optimizer/serialization_test.cc index 25a8f5d351..93ee1f492f 100644 --- a/paddle/optimizer/serialization_test.cc +++ b/paddle/optimizer/serialization_test.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/optimizer/sgd_optimizer.cc b/paddle/optimizer/sgd_optimizer.cc index ee80f543fc..c1e2064de7 100644 --- a/paddle/optimizer/sgd_optimizer.cc +++ b/paddle/optimizer/sgd_optimizer.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/optimizer/sgd_optimizer.h b/paddle/optimizer/sgd_optimizer.h index 16a4df9973..f504d98adb 100644 --- a/paddle/optimizer/sgd_optimizer.h +++ b/paddle/optimizer/sgd_optimizer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/optimizer/tensor.h b/paddle/optimizer/tensor.h index e999e9bda1..fd32398a23 100644 --- a/paddle/optimizer/tensor.h +++ b/paddle/optimizer/tensor.h @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/paddle/parameter/Argument.cpp b/paddle/parameter/Argument.cpp index 8dbef0b22e..cfdaf8998b 100644 --- a/paddle/parameter/Argument.cpp +++ b/paddle/parameter/Argument.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/Argument.h b/paddle/parameter/Argument.h index 7b59199dde..e580d38216 100644 --- a/paddle/parameter/Argument.h +++ b/paddle/parameter/Argument.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/paddle/parameter/AverageOptimizer.cpp b/paddle/parameter/AverageOptimizer.cpp index e51ca56520..75998d81dd 100644 --- a/paddle/parameter/AverageOptimizer.cpp +++ b/paddle/parameter/AverageOptimizer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/AverageOptimizer.h b/paddle/parameter/AverageOptimizer.h index 9fd3f75baa..4ad3c18d56 100644 --- a/paddle/parameter/AverageOptimizer.h +++ b/paddle/parameter/AverageOptimizer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/FirstOrderOptimizer.cpp b/paddle/parameter/FirstOrderOptimizer.cpp index 5938b2210c..5e280bcac3 100644 --- a/paddle/parameter/FirstOrderOptimizer.cpp +++ b/paddle/parameter/FirstOrderOptimizer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/FirstOrderOptimizer.h b/paddle/parameter/FirstOrderOptimizer.h index 5b0c52a30d..047989fcad 100644 --- a/paddle/parameter/FirstOrderOptimizer.h +++ b/paddle/parameter/FirstOrderOptimizer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/LearningRateScheduler.cpp b/paddle/parameter/LearningRateScheduler.cpp index 66448b2c5f..b6b58e3dda 100644 --- a/paddle/parameter/LearningRateScheduler.cpp +++ b/paddle/parameter/LearningRateScheduler.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/LearningRateScheduler.h b/paddle/parameter/LearningRateScheduler.h index 53b9dba446..aea99a1c20 100644 --- a/paddle/parameter/LearningRateScheduler.h +++ b/paddle/parameter/LearningRateScheduler.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/OptimizerFunctions.cpp b/paddle/parameter/OptimizerFunctions.cpp index a4af1b4705..b7f920b89c 100644 --- a/paddle/parameter/OptimizerFunctions.cpp +++ b/paddle/parameter/OptimizerFunctions.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/OptimizerFunctions.h b/paddle/parameter/OptimizerFunctions.h index 4f7370b6ba..57f6fc9d40 100644 --- a/paddle/parameter/OptimizerFunctions.h +++ b/paddle/parameter/OptimizerFunctions.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/OptimizerWithRegularizer.cpp b/paddle/parameter/OptimizerWithRegularizer.cpp index 7910b12444..9e914ae4ec 100644 --- a/paddle/parameter/OptimizerWithRegularizer.cpp +++ b/paddle/parameter/OptimizerWithRegularizer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/OptimizerWithRegularizer.h b/paddle/parameter/OptimizerWithRegularizer.h index 0e1c444d28..7219d96d92 100644 --- a/paddle/parameter/OptimizerWithRegularizer.h +++ b/paddle/parameter/OptimizerWithRegularizer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/Parameter.cpp b/paddle/parameter/Parameter.cpp index 3b0f09cea6..0e6ea90f3d 100644 --- a/paddle/parameter/Parameter.cpp +++ b/paddle/parameter/Parameter.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/Parameter.h b/paddle/parameter/Parameter.h index 04f12efaac..24ac10f3fe 100644 --- a/paddle/parameter/Parameter.h +++ b/paddle/parameter/Parameter.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/ParameterOptimizer.cpp b/paddle/parameter/ParameterOptimizer.cpp index 7c8c6978e2..638daa58f1 100644 --- a/paddle/parameter/ParameterOptimizer.cpp +++ b/paddle/parameter/ParameterOptimizer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/ParameterOptimizer.h b/paddle/parameter/ParameterOptimizer.h index f98ba569b5..a8d0ca72f2 100644 --- a/paddle/parameter/ParameterOptimizer.h +++ b/paddle/parameter/ParameterOptimizer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/ParameterUpdateFunctions.cpp b/paddle/parameter/ParameterUpdateFunctions.cpp index d60cb36383..db1153c2d6 100644 --- a/paddle/parameter/ParameterUpdateFunctions.cpp +++ b/paddle/parameter/ParameterUpdateFunctions.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/ParameterUpdateFunctions.h b/paddle/parameter/ParameterUpdateFunctions.h index 0fca280149..7434baa2d3 100644 --- a/paddle/parameter/ParameterUpdateFunctions.h +++ b/paddle/parameter/ParameterUpdateFunctions.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/ParameterUpdaterBase.cpp b/paddle/parameter/ParameterUpdaterBase.cpp index 458cae886a..7815856b45 100644 --- a/paddle/parameter/ParameterUpdaterBase.cpp +++ b/paddle/parameter/ParameterUpdaterBase.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/ParameterUpdaterBase.h b/paddle/parameter/ParameterUpdaterBase.h index 6265c828a1..717e1c6721 100644 --- a/paddle/parameter/ParameterUpdaterBase.h +++ b/paddle/parameter/ParameterUpdaterBase.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/ParameterUpdaterHook.cpp b/paddle/parameter/ParameterUpdaterHook.cpp index c8b47687f5..e6aec3c348 100644 --- a/paddle/parameter/ParameterUpdaterHook.cpp +++ b/paddle/parameter/ParameterUpdaterHook.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/ParameterUpdaterHook.h b/paddle/parameter/ParameterUpdaterHook.h index 1f4506441d..d30530ec39 100644 --- a/paddle/parameter/ParameterUpdaterHook.h +++ b/paddle/parameter/ParameterUpdaterHook.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/Regularizer.cpp b/paddle/parameter/Regularizer.cpp index 8511900150..d223fd2df6 100644 --- a/paddle/parameter/Regularizer.cpp +++ b/paddle/parameter/Regularizer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/Regularizer.h b/paddle/parameter/Regularizer.h index 6d54773098..6bed7b0ddf 100644 --- a/paddle/parameter/Regularizer.h +++ b/paddle/parameter/Regularizer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/ThreadLocalBuffer.cpp b/paddle/parameter/ThreadLocalBuffer.cpp index b21dd15245..550e41dfda 100644 --- a/paddle/parameter/ThreadLocalBuffer.cpp +++ b/paddle/parameter/ThreadLocalBuffer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/ThreadLocalBuffer.h b/paddle/parameter/ThreadLocalBuffer.h index c916519c97..07c96e59d0 100644 --- a/paddle/parameter/ThreadLocalBuffer.h +++ b/paddle/parameter/ThreadLocalBuffer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/Weight.cpp b/paddle/parameter/Weight.cpp index 3738a58d7f..ba4ddce69f 100644 --- a/paddle/parameter/Weight.cpp +++ b/paddle/parameter/Weight.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/Weight.h b/paddle/parameter/Weight.h index 6e7a49154e..7314c29d0d 100644 --- a/paddle/parameter/Weight.h +++ b/paddle/parameter/Weight.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/tests/test_argument.cpp b/paddle/parameter/tests/test_argument.cpp index 19df6ea957..54ceb3e087 100644 --- a/paddle/parameter/tests/test_argument.cpp +++ b/paddle/parameter/tests/test_argument.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/parameter/tests/test_common.cpp b/paddle/parameter/tests/test_common.cpp index 64d204aea1..6e10becabb 100644 --- a/paddle/parameter/tests/test_common.cpp +++ b/paddle/parameter/tests/test_common.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/pserver/BaseClient.cpp b/paddle/pserver/BaseClient.cpp index 0e031a7e20..a6204ef47e 100644 --- a/paddle/pserver/BaseClient.cpp +++ b/paddle/pserver/BaseClient.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/pserver/BaseClient.h b/paddle/pserver/BaseClient.h index 667bc451d1..a932d34712 100644 --- a/paddle/pserver/BaseClient.h +++ b/paddle/pserver/BaseClient.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/pserver/LightNetwork.cpp b/paddle/pserver/LightNetwork.cpp index 0e8e5a83a4..4c0da2217e 100644 --- a/paddle/pserver/LightNetwork.cpp +++ b/paddle/pserver/LightNetwork.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/pserver/LightNetwork.h b/paddle/pserver/LightNetwork.h index c4a06deb94..2aaa26a5c7 100644 --- a/paddle/pserver/LightNetwork.h +++ b/paddle/pserver/LightNetwork.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/pserver/ParameterClient2.cpp b/paddle/pserver/ParameterClient2.cpp index 9562c64986..43e4902b0f 100644 --- a/paddle/pserver/ParameterClient2.cpp +++ b/paddle/pserver/ParameterClient2.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/pserver/ParameterClient2.h b/paddle/pserver/ParameterClient2.h index 29b9eeacdd..d63273ccbc 100644 --- a/paddle/pserver/ParameterClient2.h +++ b/paddle/pserver/ParameterClient2.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/pserver/ParameterServer2.cpp b/paddle/pserver/ParameterServer2.cpp index 54f5c4c0fb..f8814714c2 100644 --- a/paddle/pserver/ParameterServer2.cpp +++ b/paddle/pserver/ParameterServer2.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/pserver/ParameterServer2.h b/paddle/pserver/ParameterServer2.h index f7d3587b88..3ed06b6b04 100644 --- a/paddle/pserver/ParameterServer2.h +++ b/paddle/pserver/ParameterServer2.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/pserver/ParameterServer2Main.cpp b/paddle/pserver/ParameterServer2Main.cpp index 845a2c27e2..dfbae0cd0f 100644 --- a/paddle/pserver/ParameterServer2Main.cpp +++ b/paddle/pserver/ParameterServer2Main.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/pserver/ParameterServerController.cpp b/paddle/pserver/ParameterServerController.cpp index 1d11a2e1ac..2a7dcc15aa 100644 --- a/paddle/pserver/ParameterServerController.cpp +++ b/paddle/pserver/ParameterServerController.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/pserver/ParameterServerController.h b/paddle/pserver/ParameterServerController.h index fe9bb0b4d0..3a9bc74edf 100644 --- a/paddle/pserver/ParameterServerController.h +++ b/paddle/pserver/ParameterServerController.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/pserver/ProtoServer.cpp b/paddle/pserver/ProtoServer.cpp index 410317ece2..6b7948a7d0 100644 --- a/paddle/pserver/ProtoServer.cpp +++ b/paddle/pserver/ProtoServer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/pserver/ProtoServer.h b/paddle/pserver/ProtoServer.h index 3acdcc27da..3f78799dbf 100644 --- a/paddle/pserver/ProtoServer.h +++ b/paddle/pserver/ProtoServer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/pserver/RDMANetwork.h b/paddle/pserver/RDMANetwork.h index caef65134b..83db6b9df7 100644 --- a/paddle/pserver/RDMANetwork.h +++ b/paddle/pserver/RDMANetwork.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/pserver/SocketChannel.cpp b/paddle/pserver/SocketChannel.cpp index 12e3bc6552..72e6943408 100644 --- a/paddle/pserver/SocketChannel.cpp +++ b/paddle/pserver/SocketChannel.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/pserver/SocketChannel.h b/paddle/pserver/SocketChannel.h index 6c3dd20d7b..c0f30d0db7 100644 --- a/paddle/pserver/SocketChannel.h +++ b/paddle/pserver/SocketChannel.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/pserver/SparseParameterDistribution.cpp b/paddle/pserver/SparseParameterDistribution.cpp index 6dd725db30..bb247f389c 100644 --- a/paddle/pserver/SparseParameterDistribution.cpp +++ b/paddle/pserver/SparseParameterDistribution.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/pserver/SparseParameterDistribution.h b/paddle/pserver/SparseParameterDistribution.h index 24b14106cf..13f199548d 100644 --- a/paddle/pserver/SparseParameterDistribution.h +++ b/paddle/pserver/SparseParameterDistribution.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/pserver/test/SocketTest.cpp b/paddle/pserver/test/SocketTest.cpp index b43461d61b..6019dccaad 100644 --- a/paddle/pserver/test/SocketTest.cpp +++ b/paddle/pserver/test/SocketTest.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/pserver/test/test_ParameterServer2.cpp b/paddle/pserver/test/test_ParameterServer2.cpp index 8e7231a9e1..e742cd0871 100644 --- a/paddle/pserver/test/test_ParameterServer2.cpp +++ b/paddle/pserver/test/test_ParameterServer2.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/pserver/test/test_ProtoServer.cpp b/paddle/pserver/test/test_ProtoServer.cpp index ad8ffed9c1..d68a8d2180 100644 --- a/paddle/pserver/test/test_ProtoServer.cpp +++ b/paddle/pserver/test/test_ProtoServer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/proto/DataConfig.proto b/proto/DataConfig.proto index 0cb5d7afbb..1b2aa8e726 100644 --- a/proto/DataConfig.proto +++ b/proto/DataConfig.proto @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/proto/DataFormat.proto b/proto/DataFormat.proto index 7d963bc29f..46b1f58bdb 100644 --- a/proto/DataFormat.proto +++ b/proto/DataFormat.proto @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/proto/ModelConfig.proto b/proto/ModelConfig.proto index 1fbdd5bbd8..d699984ff2 100644 --- a/proto/ModelConfig.proto +++ b/proto/ModelConfig.proto @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/proto/OptimizerConfig.proto b/proto/OptimizerConfig.proto index b341d78d19..e9ea1bfbcc 100644 --- a/proto/OptimizerConfig.proto +++ b/proto/OptimizerConfig.proto @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/proto/ParameterConfig.proto b/proto/ParameterConfig.proto index b13570a2c6..6f8ba9d760 100644 --- a/proto/ParameterConfig.proto +++ b/proto/ParameterConfig.proto @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/proto/ParameterServerConfig.proto b/proto/ParameterServerConfig.proto index bd63cf35b1..1404c8aa14 100644 --- a/proto/ParameterServerConfig.proto +++ b/proto/ParameterServerConfig.proto @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -47,4 +47,4 @@ message ParameterServerConfig { // if async_lagged_grad_discard_ratio is not set in trainer_config.conf // use it as defalut value required double async_lagged_ratio_default = 9 [ default = 1.5 ]; -} \ No newline at end of file +} diff --git a/proto/ParameterService.proto b/proto/ParameterService.proto index e3c180ccc3..b56c1bfe7c 100644 --- a/proto/ParameterService.proto +++ b/proto/ParameterService.proto @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/proto/TrainerConfig.proto b/proto/TrainerConfig.proto index aa4e5f4ca0..9cc20b4a3e 100644 --- a/proto/TrainerConfig.proto +++ b/proto/TrainerConfig.proto @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/ProtobufEqualMain.cpp b/python/paddle/trainer_config_helpers/tests/ProtobufEqualMain.cpp index fc53422afd..7b10e0b7a6 100644 --- a/python/paddle/trainer_config_helpers/tests/ProtobufEqualMain.cpp +++ b/python/paddle/trainer_config_helpers/tests/ProtobufEqualMain.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/img_layers.py b/python/paddle/trainer_config_helpers/tests/configs/img_layers.py index 93b505a602..767b645424 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/img_layers.py +++ b/python/paddle/trainer_config_helpers/tests/configs/img_layers.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/img_trans_layers.py b/python/paddle/trainer_config_helpers/tests/configs/img_trans_layers.py index 745f060fa5..e17c8fa7c0 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/img_trans_layers.py +++ b/python/paddle/trainer_config_helpers/tests/configs/img_trans_layers.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/last_first_seq.py b/python/paddle/trainer_config_helpers/tests/configs/last_first_seq.py index b6fc8f70f9..5b6d2627e4 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/last_first_seq.py +++ b/python/paddle/trainer_config_helpers/tests/configs/last_first_seq.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/layer_activations.py b/python/paddle/trainer_config_helpers/tests/configs/layer_activations.py index 6edc03bba0..ac1f7e02c0 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/layer_activations.py +++ b/python/paddle/trainer_config_helpers/tests/configs/layer_activations.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/math_ops.py b/python/paddle/trainer_config_helpers/tests/configs/math_ops.py index 59a71e1cd1..29dc634fb3 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/math_ops.py +++ b/python/paddle/trainer_config_helpers/tests/configs/math_ops.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/projections.py b/python/paddle/trainer_config_helpers/tests/configs/projections.py index 96f06b4018..3b7a196d1c 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/projections.py +++ b/python/paddle/trainer_config_helpers/tests/configs/projections.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/shared_fc.py b/python/paddle/trainer_config_helpers/tests/configs/shared_fc.py index 69a0a5b8ff..3229252a2f 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/shared_fc.py +++ b/python/paddle/trainer_config_helpers/tests/configs/shared_fc.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/shared_gru.py b/python/paddle/trainer_config_helpers/tests/configs/shared_gru.py index 97b41fb372..dff561fdf7 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/shared_gru.py +++ b/python/paddle/trainer_config_helpers/tests/configs/shared_gru.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/shared_lstm.py b/python/paddle/trainer_config_helpers/tests/configs/shared_lstm.py index 4e653dedb9..97ef2d07ae 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/shared_lstm.py +++ b/python/paddle/trainer_config_helpers/tests/configs/shared_lstm.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/simple_rnn_layers.py b/python/paddle/trainer_config_helpers/tests/configs/simple_rnn_layers.py index dc418325f8..f882efcba2 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/simple_rnn_layers.py +++ b/python/paddle/trainer_config_helpers/tests/configs/simple_rnn_layers.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_BatchNorm3D.py b/python/paddle/trainer_config_helpers/tests/configs/test_BatchNorm3D.py index 5b98e3fb34..169038deb1 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_BatchNorm3D.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_BatchNorm3D.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_bi_grumemory.py b/python/paddle/trainer_config_helpers/tests/configs/test_bi_grumemory.py index f3abdfe1ae..d29e4e5c4d 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_bi_grumemory.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_bi_grumemory.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_bilinear_interp.py b/python/paddle/trainer_config_helpers/tests/configs/test_bilinear_interp.py index 4eb9f207e0..5e724ba7d1 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_bilinear_interp.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_bilinear_interp.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_clip_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_clip_layer.py index 24564c105f..95a1192bfa 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_clip_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_clip_layer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_conv3d_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_conv3d_layer.py index 35087c4228..f9966e399e 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_conv3d_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_conv3d_layer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers.py b/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers.py index b076b89106..351694fd55 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.py b/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.py index fa7a1abe9a..8cbcf5de0a 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_crop.py b/python/paddle/trainer_config_helpers/tests/configs/test_crop.py index 569d747857..b4ffff252b 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_crop.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_crop.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_deconv3d_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_deconv3d_layer.py index 4f27d99873..08e701c7a8 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_deconv3d_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_deconv3d_layer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_detection_output_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_detection_output_layer.py index d37954222e..4ecd1c2b7e 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_detection_output_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_detection_output_layer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_dot_prod_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_dot_prod_layer.py index 63ba0a72b9..9b444bc2c0 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_dot_prod_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_dot_prod_layer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_expand_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_expand_layer.py index 9892bca05d..85101d2b92 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_expand_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_expand_layer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_factorization_machine.py b/python/paddle/trainer_config_helpers/tests/configs/test_factorization_machine.py index 6fb773d9f7..48ac46c5bb 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_factorization_machine.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_factorization_machine.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_fc.py b/python/paddle/trainer_config_helpers/tests/configs/test_fc.py index 4dd37d0242..f1e454d211 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_fc.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_fc.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_gated_unit_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_gated_unit_layer.py index 082646b9d3..afc3e9207c 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_gated_unit_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_gated_unit_layer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_grumemory_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_grumemory_layer.py index f5271b8280..ac9902d08c 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_grumemory_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_grumemory_layer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_hsigmoid.py b/python/paddle/trainer_config_helpers/tests/configs/test_hsigmoid.py index ad86d7d5bd..da781c149b 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_hsigmoid.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_hsigmoid.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_l2_distance_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_l2_distance_layer.py index 1796e1c6b6..42c9b5deea 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_l2_distance_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_l2_distance_layer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_lstmemory_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_lstmemory_layer.py index 7484818ab2..26eeea5461 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_lstmemory_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_lstmemory_layer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_maxout.py b/python/paddle/trainer_config_helpers/tests/configs/test_maxout.py index 22788be2e9..2cd41a306a 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_maxout.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_maxout.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_multibox_loss_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_multibox_loss_layer.py index 0dcccc49e4..b4fd9052c4 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_multibox_loss_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_multibox_loss_layer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_multiplex_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_multiplex_layer.py index 046d38741e..bfba07be86 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_multiplex_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_multiplex_layer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_ntm_layers.py b/python/paddle/trainer_config_helpers/tests/configs/test_ntm_layers.py index d81128c77c..891894172c 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_ntm_layers.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_ntm_layers.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_pad.py b/python/paddle/trainer_config_helpers/tests/configs/test_pad.py index 44b0b34d5a..c5825c82e5 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_pad.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_pad.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_pooling3D_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_pooling3D_layer.py index e257e735ad..5ff52c195a 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_pooling3D_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_pooling3D_layer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_prelu_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_prelu_layer.py index 098e2397ec..d803a0d13d 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_prelu_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_prelu_layer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_print_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_print_layer.py index 714d8893e9..ca1f5a4572 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_print_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_print_layer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_recursive_topology.py b/python/paddle/trainer_config_helpers/tests/configs/test_recursive_topology.py index 188a3d2320..d44870d804 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_recursive_topology.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_recursive_topology.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_repeat_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_repeat_layer.py index 93b673afee..ee90e830df 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_repeat_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_repeat_layer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_resize_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_resize_layer.py index 3a202974e3..4aa81919df 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_resize_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_resize_layer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_rnn_group.py b/python/paddle/trainer_config_helpers/tests/configs/test_rnn_group.py index 91074b8fdf..3824ef5995 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_rnn_group.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_rnn_group.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_roi_pool_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_roi_pool_layer.py index f0a37f7e99..6929d106c6 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_roi_pool_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_roi_pool_layer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_row_conv.py b/python/paddle/trainer_config_helpers/tests/configs/test_row_conv.py index 68b1a991f3..6381a26fe8 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_row_conv.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_row_conv.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_row_l2_norm_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_row_l2_norm_layer.py index c25393f580..3c17d2ccfd 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_row_l2_norm_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_row_l2_norm_layer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_scale_shift_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_scale_shift_layer.py index 3691e8daea..ae8a25ba94 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_scale_shift_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_scale_shift_layer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_scale_sub_region_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_scale_sub_region_layer.py index 426afcf3a0..e4f7120bcc 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_scale_sub_region_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_scale_sub_region_layer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_seq_concat_reshape.py b/python/paddle/trainer_config_helpers/tests/configs/test_seq_concat_reshape.py index 7296081857..a6be069e7e 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_seq_concat_reshape.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_seq_concat_reshape.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_sequence_pooling.py b/python/paddle/trainer_config_helpers/tests/configs/test_sequence_pooling.py index d13a5a8429..7b951a4cd7 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_sequence_pooling.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_sequence_pooling.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_smooth_l1.py b/python/paddle/trainer_config_helpers/tests/configs/test_smooth_l1.py index 42225b8505..32a4e6f6d0 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_smooth_l1.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_smooth_l1.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_split_datasource.py b/python/paddle/trainer_config_helpers/tests/configs/test_split_datasource.py index 7ebdf7408d..ea68b5493e 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_split_datasource.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_split_datasource.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_spp_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_spp_layer.py index 1f19ea77ad..0e692d4b62 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_spp_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_spp_layer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/unused_layers.py b/python/paddle/trainer_config_helpers/tests/configs/unused_layers.py index 8581ba60ab..8878e73fff 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/unused_layers.py +++ b/python/paddle/trainer_config_helpers/tests/configs/unused_layers.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/configs/util_layers.py b/python/paddle/trainer_config_helpers/tests/configs/util_layers.py index a66c9515c7..da134f100b 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/util_layers.py +++ b/python/paddle/trainer_config_helpers/tests/configs/util_layers.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/trainer_config_helpers/tests/test_reset_hook.py b/python/paddle/trainer_config_helpers/tests/test_reset_hook.py index 81186dedd2..4d7542c35b 100644 --- a/python/paddle/trainer_config_helpers/tests/test_reset_hook.py +++ b/python/paddle/trainer_config_helpers/tests/test_reset_hook.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/utils/image_multiproc.py b/python/paddle/utils/image_multiproc.py index fdbefef9ff..3e3e519f76 100644 --- a/python/paddle/utils/image_multiproc.py +++ b/python/paddle/utils/image_multiproc.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/dataset/tests/imikolov_test.py b/python/paddle/v2/dataset/tests/imikolov_test.py index eed1458244..714a75d6f1 100644 --- a/python/paddle/v2/dataset/tests/imikolov_test.py +++ b/python/paddle/v2/dataset/tests/imikolov_test.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/event.py b/python/paddle/v2/event.py index 01067ef426..c11aa121c1 100644 --- a/python/paddle/v2/event.py +++ b/python/paddle/v2/event.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/__init__.py b/python/paddle/v2/fluid/__init__.py index 73acbf3e00..9f710c4a4a 100644 --- a/python/paddle/v2/fluid/__init__.py +++ b/python/paddle/v2/fluid/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/backward.py b/python/paddle/v2/fluid/backward.py index 29243c90e8..a690c14300 100644 --- a/python/paddle/v2/fluid/backward.py +++ b/python/paddle/v2/fluid/backward.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/clip.py b/python/paddle/v2/fluid/clip.py index fdbc8524ab..12add9e686 100644 --- a/python/paddle/v2/fluid/clip.py +++ b/python/paddle/v2/fluid/clip.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/data_feeder.py b/python/paddle/v2/fluid/data_feeder.py index a3b22a8633..f9e2f3e6a2 100644 --- a/python/paddle/v2/fluid/data_feeder.py +++ b/python/paddle/v2/fluid/data_feeder.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/debuger.py b/python/paddle/v2/fluid/debuger.py index db1808c647..b7a906654a 100644 --- a/python/paddle/v2/fluid/debuger.py +++ b/python/paddle/v2/fluid/debuger.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/default_scope_funcs.py b/python/paddle/v2/fluid/default_scope_funcs.py index a27280208b..eeb9fb2043 100644 --- a/python/paddle/v2/fluid/default_scope_funcs.py +++ b/python/paddle/v2/fluid/default_scope_funcs.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/distribute_transpiler.py b/python/paddle/v2/fluid/distribute_transpiler.py index bf2e9e88f3..03a7478ae8 100644 --- a/python/paddle/v2/fluid/distribute_transpiler.py +++ b/python/paddle/v2/fluid/distribute_transpiler.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/distribute_transpiler_simple.py b/python/paddle/v2/fluid/distribute_transpiler_simple.py index 73d9bed1ae..e94bbb6c39 100644 --- a/python/paddle/v2/fluid/distribute_transpiler_simple.py +++ b/python/paddle/v2/fluid/distribute_transpiler_simple.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/distributed_spliter.py b/python/paddle/v2/fluid/distributed_spliter.py index 8cf0b06786..d288b27ba0 100644 --- a/python/paddle/v2/fluid/distributed_spliter.py +++ b/python/paddle/v2/fluid/distributed_spliter.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/evaluator.py b/python/paddle/v2/fluid/evaluator.py index 2686a5bdfc..30d87c76c2 100644 --- a/python/paddle/v2/fluid/evaluator.py +++ b/python/paddle/v2/fluid/evaluator.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/executor.py b/python/paddle/v2/fluid/executor.py index 01cbdb3ec4..e274959305 100644 --- a/python/paddle/v2/fluid/executor.py +++ b/python/paddle/v2/fluid/executor.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/framework.py b/python/paddle/v2/fluid/framework.py index 35d3df785b..dfd7e8047c 100644 --- a/python/paddle/v2/fluid/framework.py +++ b/python/paddle/v2/fluid/framework.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/graphviz.py b/python/paddle/v2/fluid/graphviz.py index 5881119c39..b8d21344fc 100644 --- a/python/paddle/v2/fluid/graphviz.py +++ b/python/paddle/v2/fluid/graphviz.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/initializer.py b/python/paddle/v2/fluid/initializer.py index 8c70fd90ef..927f1e625a 100644 --- a/python/paddle/v2/fluid/initializer.py +++ b/python/paddle/v2/fluid/initializer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/io.py b/python/paddle/v2/fluid/io.py index 0f43e46082..8a8bd089b5 100644 --- a/python/paddle/v2/fluid/io.py +++ b/python/paddle/v2/fluid/io.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/layer_helper.py b/python/paddle/v2/fluid/layer_helper.py index 2119ca12c8..e7abc23f2f 100644 --- a/python/paddle/v2/fluid/layer_helper.py +++ b/python/paddle/v2/fluid/layer_helper.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/layers/__init__.py b/python/paddle/v2/fluid/layers/__init__.py index f4fb2ca279..906a16a49f 100644 --- a/python/paddle/v2/fluid/layers/__init__.py +++ b/python/paddle/v2/fluid/layers/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/layers/control_flow.py b/python/paddle/v2/fluid/layers/control_flow.py index 71a9459d55..800c11a53b 100644 --- a/python/paddle/v2/fluid/layers/control_flow.py +++ b/python/paddle/v2/fluid/layers/control_flow.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/layers/device.py b/python/paddle/v2/fluid/layers/device.py index 107511b5f4..3fee263ac0 100644 --- a/python/paddle/v2/fluid/layers/device.py +++ b/python/paddle/v2/fluid/layers/device.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/layers/io.py b/python/paddle/v2/fluid/layers/io.py index 85e44a0e51..af3ae54248 100644 --- a/python/paddle/v2/fluid/layers/io.py +++ b/python/paddle/v2/fluid/layers/io.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/layers/layer_function_generator.py b/python/paddle/v2/fluid/layers/layer_function_generator.py index b0e4d1635f..88c9ae31b7 100644 --- a/python/paddle/v2/fluid/layers/layer_function_generator.py +++ b/python/paddle/v2/fluid/layers/layer_function_generator.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/layers/math_op_patch.py b/python/paddle/v2/fluid/layers/math_op_patch.py index d829bba1b1..417a01b76f 100644 --- a/python/paddle/v2/fluid/layers/math_op_patch.py +++ b/python/paddle/v2/fluid/layers/math_op_patch.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index fcb55c8d4d..f5b64fee1d 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/layers/ops.py b/python/paddle/v2/fluid/layers/ops.py index bb3f71abbb..28265a57e6 100644 --- a/python/paddle/v2/fluid/layers/ops.py +++ b/python/paddle/v2/fluid/layers/ops.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/layers/tensor.py b/python/paddle/v2/fluid/layers/tensor.py index 2d4e0ab0cc..db400aad37 100644 --- a/python/paddle/v2/fluid/layers/tensor.py +++ b/python/paddle/v2/fluid/layers/tensor.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/memory_optimization_transpiler.py b/python/paddle/v2/fluid/memory_optimization_transpiler.py index 53e0991ee8..78dc56f849 100644 --- a/python/paddle/v2/fluid/memory_optimization_transpiler.py +++ b/python/paddle/v2/fluid/memory_optimization_transpiler.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/net_drawer.py b/python/paddle/v2/fluid/net_drawer.py index 9b126f5197..66793a5785 100644 --- a/python/paddle/v2/fluid/net_drawer.py +++ b/python/paddle/v2/fluid/net_drawer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/nets.py b/python/paddle/v2/fluid/nets.py index be7878f869..c161d93854 100644 --- a/python/paddle/v2/fluid/nets.py +++ b/python/paddle/v2/fluid/nets.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/op.py b/python/paddle/v2/fluid/op.py index f368e0c2d8..6a41370458 100644 --- a/python/paddle/v2/fluid/op.py +++ b/python/paddle/v2/fluid/op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/optimizer.py b/python/paddle/v2/fluid/optimizer.py index f8a00e3a5f..39391eb8e4 100644 --- a/python/paddle/v2/fluid/optimizer.py +++ b/python/paddle/v2/fluid/optimizer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/param_attr.py b/python/paddle/v2/fluid/param_attr.py index fc566b8a24..255cd21043 100644 --- a/python/paddle/v2/fluid/param_attr.py +++ b/python/paddle/v2/fluid/param_attr.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/profiler.py b/python/paddle/v2/fluid/profiler.py index d33a4c52a8..4611986c99 100644 --- a/python/paddle/v2/fluid/profiler.py +++ b/python/paddle/v2/fluid/profiler.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/regularizer.py b/python/paddle/v2/fluid/regularizer.py index 0273da647a..a29f9a208e 100644 --- a/python/paddle/v2/fluid/regularizer.py +++ b/python/paddle/v2/fluid/regularizer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/__init__.py b/python/paddle/v2/fluid/tests/__init__.py index b94a21a7e4..eca2dce114 100644 --- a/python/paddle/v2/fluid/tests/__init__.py +++ b/python/paddle/v2/fluid/tests/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/book/__init__.py b/python/paddle/v2/fluid/tests/book/__init__.py index b94a21a7e4..eca2dce114 100644 --- a/python/paddle/v2/fluid/tests/book/__init__.py +++ b/python/paddle/v2/fluid/tests/book/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/book/notest_rnn_encoder_decoer.py b/python/paddle/v2/fluid/tests/book/notest_rnn_encoder_decoer.py index 7fe43c680c..c7db70f1b1 100644 --- a/python/paddle/v2/fluid/tests/book/notest_rnn_encoder_decoer.py +++ b/python/paddle/v2/fluid/tests/book/notest_rnn_encoder_decoer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/book/test_fit_a_line.py b/python/paddle/v2/fluid/tests/book/test_fit_a_line.py index b3332b4810..a66c2c3c2f 100644 --- a/python/paddle/v2/fluid/tests/book/test_fit_a_line.py +++ b/python/paddle/v2/fluid/tests/book/test_fit_a_line.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/book/test_image_classification.py b/python/paddle/v2/fluid/tests/book/test_image_classification.py index ffbe5bdbd6..734ab3e4fb 100644 --- a/python/paddle/v2/fluid/tests/book/test_image_classification.py +++ b/python/paddle/v2/fluid/tests/book/test_image_classification.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py index f33e81186b..b790246ec1 100644 --- a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/book/test_machine_translation.py b/python/paddle/v2/fluid/tests/book/test_machine_translation.py index 5716ddd3dd..d3405a9601 100644 --- a/python/paddle/v2/fluid/tests/book/test_machine_translation.py +++ b/python/paddle/v2/fluid/tests/book/test_machine_translation.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py index 244c1749cd..a0b4774da5 100644 --- a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py +++ b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/book/test_recommender_system.py b/python/paddle/v2/fluid/tests/book/test_recommender_system.py index 612d51e08e..1a7d8d57ff 100644 --- a/python/paddle/v2/fluid/tests/book/test_recommender_system.py +++ b/python/paddle/v2/fluid/tests/book/test_recommender_system.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/book/test_word2vec.py b/python/paddle/v2/fluid/tests/book/test_word2vec.py index 69bfbcee69..9bd8f90c5e 100644 --- a/python/paddle/v2/fluid/tests/book/test_word2vec.py +++ b/python/paddle/v2/fluid/tests/book/test_word2vec.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_fit_a_line.py b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_fit_a_line.py index 9774edebfb..c443c4e0b7 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_fit_a_line.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_fit_a_line.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py index 08bb67b0a1..1210bf1d84 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_word2vec.py b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_word2vec.py index ec4c2d2721..0d5ad98850 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_word2vec.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_word2vec.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_machine_translation.py b/python/paddle/v2/fluid/tests/book_distribute/notest_machine_translation.py index adeacd4adf..15d2d40979 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_machine_translation.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_machine_translation.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py b/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py index f18ca05c78..07815059c4 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py b/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py index 7733248cb4..c442ada6e3 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_recommender_system_dist.py b/python/paddle/v2/fluid/tests/book_distribute/notest_recommender_system_dist.py index 2d8885e377..363c7102c7 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_recommender_system_dist.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_recommender_system_dist.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py b/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py index 49f26d6b69..c5c0856c31 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py b/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py index bff376a0e2..99e2c2bbac 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/book_distribute/test_split_var.py b/python/paddle/v2/fluid/tests/book_distribute/test_split_var.py index 4a50049bf2..d7160b78b9 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/test_split_var.py +++ b/python/paddle/v2/fluid/tests/book_distribute/test_split_var.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py b/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py index 045db8390c..944f8af086 100644 --- a/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py +++ b/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py b/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py index 9fbb36d363..a556904107 100644 --- a/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py +++ b/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py b/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py index 48abaa8d87..4c1eae861b 100644 --- a/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py +++ b/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/decorators.py b/python/paddle/v2/fluid/tests/decorators.py index 0a8a2ccc4d..7081e4b934 100644 --- a/python/paddle/v2/fluid/tests/decorators.py +++ b/python/paddle/v2/fluid/tests/decorators.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/demo/fc_gan.py b/python/paddle/v2/fluid/tests/demo/fc_gan.py index 0652c8134d..67921db04a 100644 --- a/python/paddle/v2/fluid/tests/demo/fc_gan.py +++ b/python/paddle/v2/fluid/tests/demo/fc_gan.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/op_test.py b/python/paddle/v2/fluid/tests/op_test.py index f8475813c0..940e2bfb16 100644 --- a/python/paddle/v2/fluid/tests/op_test.py +++ b/python/paddle/v2/fluid/tests/op_test.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_accuracy_op.py b/python/paddle/v2/fluid/tests/test_accuracy_op.py index ac3f3bdff4..212a87e529 100644 --- a/python/paddle/v2/fluid/tests/test_accuracy_op.py +++ b/python/paddle/v2/fluid/tests/test_accuracy_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_activation_op.py b/python/paddle/v2/fluid/tests/test_activation_op.py index 1de5d446b8..eab41ebe71 100644 --- a/python/paddle/v2/fluid/tests/test_activation_op.py +++ b/python/paddle/v2/fluid/tests/test_activation_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_adadelta_op.py b/python/paddle/v2/fluid/tests/test_adadelta_op.py index 949318d007..1b892e64c7 100644 --- a/python/paddle/v2/fluid/tests/test_adadelta_op.py +++ b/python/paddle/v2/fluid/tests/test_adadelta_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_adagrad_op.py b/python/paddle/v2/fluid/tests/test_adagrad_op.py index 3556bcf8ba..320f43023c 100644 --- a/python/paddle/v2/fluid/tests/test_adagrad_op.py +++ b/python/paddle/v2/fluid/tests/test_adagrad_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_adam_op.py b/python/paddle/v2/fluid/tests/test_adam_op.py index df1fa8983c..d6c5a16ff2 100644 --- a/python/paddle/v2/fluid/tests/test_adam_op.py +++ b/python/paddle/v2/fluid/tests/test_adam_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_adamax_op.py b/python/paddle/v2/fluid/tests/test_adamax_op.py index e285c454f0..8099beefa5 100644 --- a/python/paddle/v2/fluid/tests/test_adamax_op.py +++ b/python/paddle/v2/fluid/tests/test_adamax_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_array_read_write_op.py b/python/paddle/v2/fluid/tests/test_array_read_write_op.py index a32c24486e..8917b9b906 100644 --- a/python/paddle/v2/fluid/tests/test_array_read_write_op.py +++ b/python/paddle/v2/fluid/tests/test_array_read_write_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_assign_op.py b/python/paddle/v2/fluid/tests/test_assign_op.py index fbbfe0d02c..e93c02bd3e 100644 --- a/python/paddle/v2/fluid/tests/test_assign_op.py +++ b/python/paddle/v2/fluid/tests/test_assign_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_assign_value_op.py b/python/paddle/v2/fluid/tests/test_assign_value_op.py index 93970f863b..99d7e958c3 100644 --- a/python/paddle/v2/fluid/tests/test_assign_value_op.py +++ b/python/paddle/v2/fluid/tests/test_assign_value_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_auc_op.py b/python/paddle/v2/fluid/tests/test_auc_op.py index 5e4caedf5d..948836039b 100644 --- a/python/paddle/v2/fluid/tests/test_auc_op.py +++ b/python/paddle/v2/fluid/tests/test_auc_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_batch_norm_op.py b/python/paddle/v2/fluid/tests/test_batch_norm_op.py index cf13166f25..778c7044ce 100644 --- a/python/paddle/v2/fluid/tests/test_batch_norm_op.py +++ b/python/paddle/v2/fluid/tests/test_batch_norm_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_beam_search_decode_op.py b/python/paddle/v2/fluid/tests/test_beam_search_decode_op.py index 3674784985..91f8f7b18b 100644 --- a/python/paddle/v2/fluid/tests/test_beam_search_decode_op.py +++ b/python/paddle/v2/fluid/tests/test_beam_search_decode_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_beam_search_op.py b/python/paddle/v2/fluid/tests/test_beam_search_op.py index 4da463df26..1596bb3970 100644 --- a/python/paddle/v2/fluid/tests/test_beam_search_op.py +++ b/python/paddle/v2/fluid/tests/test_beam_search_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_bilinear_tensor_product_op.py b/python/paddle/v2/fluid/tests/test_bilinear_tensor_product_op.py index 4b03f512c2..d20a11e27e 100644 --- a/python/paddle/v2/fluid/tests/test_bilinear_tensor_product_op.py +++ b/python/paddle/v2/fluid/tests/test_bilinear_tensor_product_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_bipartite_match_op.py b/python/paddle/v2/fluid/tests/test_bipartite_match_op.py index 4943bbb338..9f9af2f55e 100644 --- a/python/paddle/v2/fluid/tests/test_bipartite_match_op.py +++ b/python/paddle/v2/fluid/tests/test_bipartite_match_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_box_coder_op.py b/python/paddle/v2/fluid/tests/test_box_coder_op.py index 0dc18476fd..b839176092 100644 --- a/python/paddle/v2/fluid/tests/test_box_coder_op.py +++ b/python/paddle/v2/fluid/tests/test_box_coder_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_calc_gradient.py b/python/paddle/v2/fluid/tests/test_calc_gradient.py index c773e81768..1b38dcf343 100644 --- a/python/paddle/v2/fluid/tests/test_calc_gradient.py +++ b/python/paddle/v2/fluid/tests/test_calc_gradient.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_cast_op.py b/python/paddle/v2/fluid/tests/test_cast_op.py index 327b246ed8..44859e2155 100644 --- a/python/paddle/v2/fluid/tests/test_cast_op.py +++ b/python/paddle/v2/fluid/tests/test_cast_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_chunk_eval_op.py b/python/paddle/v2/fluid/tests/test_chunk_eval_op.py index 5c3efe9baa..050df2801c 100644 --- a/python/paddle/v2/fluid/tests/test_chunk_eval_op.py +++ b/python/paddle/v2/fluid/tests/test_chunk_eval_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_clip_by_norm_op.py b/python/paddle/v2/fluid/tests/test_clip_by_norm_op.py index b30f321c79..129958fa28 100644 --- a/python/paddle/v2/fluid/tests/test_clip_by_norm_op.py +++ b/python/paddle/v2/fluid/tests/test_clip_by_norm_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_clip_op.py b/python/paddle/v2/fluid/tests/test_clip_op.py index ef0b75e286..3df80c8ec8 100644 --- a/python/paddle/v2/fluid/tests/test_clip_op.py +++ b/python/paddle/v2/fluid/tests/test_clip_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_compare_op.py b/python/paddle/v2/fluid/tests/test_compare_op.py index c9be80fc45..83d57639ca 100644 --- a/python/paddle/v2/fluid/tests/test_compare_op.py +++ b/python/paddle/v2/fluid/tests/test_compare_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_concat_op.py b/python/paddle/v2/fluid/tests/test_concat_op.py index ea0a95ebec..558f3a4dcb 100644 --- a/python/paddle/v2/fluid/tests/test_concat_op.py +++ b/python/paddle/v2/fluid/tests/test_concat_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_cond_op.py b/python/paddle/v2/fluid/tests/test_cond_op.py index 4b7ca0963e..6f4380166b 100644 --- a/python/paddle/v2/fluid/tests/test_cond_op.py +++ b/python/paddle/v2/fluid/tests/test_cond_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_conditional_block.py b/python/paddle/v2/fluid/tests/test_conditional_block.py index 5ee729cfee..58ac267203 100644 --- a/python/paddle/v2/fluid/tests/test_conditional_block.py +++ b/python/paddle/v2/fluid/tests/test_conditional_block.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_const_value.py b/python/paddle/v2/fluid/tests/test_const_value.py index d5b7cfded1..06c1c21fbc 100644 --- a/python/paddle/v2/fluid/tests/test_const_value.py +++ b/python/paddle/v2/fluid/tests/test_const_value.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_conv2d_op.py b/python/paddle/v2/fluid/tests/test_conv2d_op.py index 7512ea333e..ad242692ec 100644 --- a/python/paddle/v2/fluid/tests/test_conv2d_op.py +++ b/python/paddle/v2/fluid/tests/test_conv2d_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_conv2d_transpose_op.py b/python/paddle/v2/fluid/tests/test_conv2d_transpose_op.py index 0c76e222c9..c9e74f5860 100644 --- a/python/paddle/v2/fluid/tests/test_conv2d_transpose_op.py +++ b/python/paddle/v2/fluid/tests/test_conv2d_transpose_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_conv3d_op.py b/python/paddle/v2/fluid/tests/test_conv3d_op.py index 8121e32865..0f7e383d1a 100644 --- a/python/paddle/v2/fluid/tests/test_conv3d_op.py +++ b/python/paddle/v2/fluid/tests/test_conv3d_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_conv3d_transpose_op.py b/python/paddle/v2/fluid/tests/test_conv3d_transpose_op.py index 4934c5a34e..a70f23d4ad 100644 --- a/python/paddle/v2/fluid/tests/test_conv3d_transpose_op.py +++ b/python/paddle/v2/fluid/tests/test_conv3d_transpose_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_conv_shift_op.py b/python/paddle/v2/fluid/tests/test_conv_shift_op.py index 7029d5a2eb..5d4d244f43 100644 --- a/python/paddle/v2/fluid/tests/test_conv_shift_op.py +++ b/python/paddle/v2/fluid/tests/test_conv_shift_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_cos_sim_op.py b/python/paddle/v2/fluid/tests/test_cos_sim_op.py index 33db12ba9c..1b27cd5767 100644 --- a/python/paddle/v2/fluid/tests/test_cos_sim_op.py +++ b/python/paddle/v2/fluid/tests/test_cos_sim_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_create_op_doc_string.py b/python/paddle/v2/fluid/tests/test_create_op_doc_string.py index 2b7951ecea..4eadbd18ac 100644 --- a/python/paddle/v2/fluid/tests/test_create_op_doc_string.py +++ b/python/paddle/v2/fluid/tests/test_create_op_doc_string.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_crf_decoding_op.py b/python/paddle/v2/fluid/tests/test_crf_decoding_op.py index f819387cdc..f397f542bb 100644 --- a/python/paddle/v2/fluid/tests/test_crf_decoding_op.py +++ b/python/paddle/v2/fluid/tests/test_crf_decoding_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_crop_op.py b/python/paddle/v2/fluid/tests/test_crop_op.py index 36bf176168..20cc3a643f 100644 --- a/python/paddle/v2/fluid/tests/test_crop_op.py +++ b/python/paddle/v2/fluid/tests/test_crop_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_cross_entropy_op.py b/python/paddle/v2/fluid/tests/test_cross_entropy_op.py index ae8e9be6de..c5b9e92d69 100644 --- a/python/paddle/v2/fluid/tests/test_cross_entropy_op.py +++ b/python/paddle/v2/fluid/tests/test_cross_entropy_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_ctc_align.py b/python/paddle/v2/fluid/tests/test_ctc_align.py index cc815d8e9e..f166031a1c 100644 --- a/python/paddle/v2/fluid/tests/test_ctc_align.py +++ b/python/paddle/v2/fluid/tests/test_ctc_align.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_cumsum_op.py b/python/paddle/v2/fluid/tests/test_cumsum_op.py index e45ef45730..04e7f0b945 100644 --- a/python/paddle/v2/fluid/tests/test_cumsum_op.py +++ b/python/paddle/v2/fluid/tests/test_cumsum_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_data_feeder.py b/python/paddle/v2/fluid/tests/test_data_feeder.py index f967221015..3154293ee6 100644 --- a/python/paddle/v2/fluid/tests/test_data_feeder.py +++ b/python/paddle/v2/fluid/tests/test_data_feeder.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_decayed_adagrad_op.py b/python/paddle/v2/fluid/tests/test_decayed_adagrad_op.py index 78d4e3608e..84c44d4817 100644 --- a/python/paddle/v2/fluid/tests/test_decayed_adagrad_op.py +++ b/python/paddle/v2/fluid/tests/test_decayed_adagrad_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_default_scope_funcs.py b/python/paddle/v2/fluid/tests/test_default_scope_funcs.py index 5ff52f6d6b..d7ca596070 100644 --- a/python/paddle/v2/fluid/tests/test_default_scope_funcs.py +++ b/python/paddle/v2/fluid/tests/test_default_scope_funcs.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_detection.py b/python/paddle/v2/fluid/tests/test_detection.py index b731fc9b02..ca7cb5a30c 100644 --- a/python/paddle/v2/fluid/tests/test_detection.py +++ b/python/paddle/v2/fluid/tests/test_detection.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_detection_output_op.py b/python/paddle/v2/fluid/tests/test_detection_output_op.py index 8a5e06b38f..0a132652f1 100644 --- a/python/paddle/v2/fluid/tests/test_detection_output_op.py +++ b/python/paddle/v2/fluid/tests/test_detection_output_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_dropout_op.py b/python/paddle/v2/fluid/tests/test_dropout_op.py index b0c55df9f5..60930a612c 100644 --- a/python/paddle/v2/fluid/tests/test_dropout_op.py +++ b/python/paddle/v2/fluid/tests/test_dropout_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_dyn_rnn.py b/python/paddle/v2/fluid/tests/test_dyn_rnn.py index 2ac926c63c..1571572fc6 100644 --- a/python/paddle/v2/fluid/tests/test_dyn_rnn.py +++ b/python/paddle/v2/fluid/tests/test_dyn_rnn.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py index dd608432df..8b01ec730a 100644 --- a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py +++ b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_dynrnn_static_input.py b/python/paddle/v2/fluid/tests/test_dynrnn_static_input.py index d14923b6b3..d2f05dcd14 100644 --- a/python/paddle/v2/fluid/tests/test_dynrnn_static_input.py +++ b/python/paddle/v2/fluid/tests/test_dynrnn_static_input.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_edit_distance_op.py b/python/paddle/v2/fluid/tests/test_edit_distance_op.py index bebdc5cba3..2957fb5058 100644 --- a/python/paddle/v2/fluid/tests/test_edit_distance_op.py +++ b/python/paddle/v2/fluid/tests/test_edit_distance_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_elementwise_add_op.py b/python/paddle/v2/fluid/tests/test_elementwise_add_op.py index 3564772fb5..c8e930dad7 100644 --- a/python/paddle/v2/fluid/tests/test_elementwise_add_op.py +++ b/python/paddle/v2/fluid/tests/test_elementwise_add_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_elementwise_div_op.py b/python/paddle/v2/fluid/tests/test_elementwise_div_op.py index 77b113af76..bfe022af6d 100644 --- a/python/paddle/v2/fluid/tests/test_elementwise_div_op.py +++ b/python/paddle/v2/fluid/tests/test_elementwise_div_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_elementwise_max_op.py b/python/paddle/v2/fluid/tests/test_elementwise_max_op.py index 0fc15693b1..b6cd18a579 100644 --- a/python/paddle/v2/fluid/tests/test_elementwise_max_op.py +++ b/python/paddle/v2/fluid/tests/test_elementwise_max_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_elementwise_min_op.py b/python/paddle/v2/fluid/tests/test_elementwise_min_op.py index 51584d6980..92099724fe 100644 --- a/python/paddle/v2/fluid/tests/test_elementwise_min_op.py +++ b/python/paddle/v2/fluid/tests/test_elementwise_min_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_elementwise_mul_op.py b/python/paddle/v2/fluid/tests/test_elementwise_mul_op.py index 12dfa6599c..2742bb21d9 100644 --- a/python/paddle/v2/fluid/tests/test_elementwise_mul_op.py +++ b/python/paddle/v2/fluid/tests/test_elementwise_mul_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_elementwise_pow_op.py b/python/paddle/v2/fluid/tests/test_elementwise_pow_op.py index e31749df9b..a3fd18669c 100644 --- a/python/paddle/v2/fluid/tests/test_elementwise_pow_op.py +++ b/python/paddle/v2/fluid/tests/test_elementwise_pow_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_elementwise_sub_op.py b/python/paddle/v2/fluid/tests/test_elementwise_sub_op.py index cf53d85bba..acf652d3fb 100644 --- a/python/paddle/v2/fluid/tests/test_elementwise_sub_op.py +++ b/python/paddle/v2/fluid/tests/test_elementwise_sub_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_error_clip.py b/python/paddle/v2/fluid/tests/test_error_clip.py index 6f7718f4d8..b331f16913 100644 --- a/python/paddle/v2/fluid/tests/test_error_clip.py +++ b/python/paddle/v2/fluid/tests/test_error_clip.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_exception.py b/python/paddle/v2/fluid/tests/test_exception.py index cd57ca586b..066b0b7409 100644 --- a/python/paddle/v2/fluid/tests/test_exception.py +++ b/python/paddle/v2/fluid/tests/test_exception.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_executor_and_mul.py b/python/paddle/v2/fluid/tests/test_executor_and_mul.py index 44f93be6cb..c043c07b3a 100644 --- a/python/paddle/v2/fluid/tests/test_executor_and_mul.py +++ b/python/paddle/v2/fluid/tests/test_executor_and_mul.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_expand_op.py b/python/paddle/v2/fluid/tests/test_expand_op.py index b1a1cbc0fa..a91e3aef5a 100644 --- a/python/paddle/v2/fluid/tests/test_expand_op.py +++ b/python/paddle/v2/fluid/tests/test_expand_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_feed_fetch_method.py b/python/paddle/v2/fluid/tests/test_feed_fetch_method.py index 827a7590ff..f24e5e27f3 100644 --- a/python/paddle/v2/fluid/tests/test_feed_fetch_method.py +++ b/python/paddle/v2/fluid/tests/test_feed_fetch_method.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_fill_constant_batch_size_like_op.py b/python/paddle/v2/fluid/tests/test_fill_constant_batch_size_like_op.py index f34a1ceb23..66e3e2d51d 100644 --- a/python/paddle/v2/fluid/tests/test_fill_constant_batch_size_like_op.py +++ b/python/paddle/v2/fluid/tests/test_fill_constant_batch_size_like_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_fill_constant_op.py b/python/paddle/v2/fluid/tests/test_fill_constant_op.py index a05fa39729..5e2ddb218a 100644 --- a/python/paddle/v2/fluid/tests/test_fill_constant_op.py +++ b/python/paddle/v2/fluid/tests/test_fill_constant_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_fill_op.py b/python/paddle/v2/fluid/tests/test_fill_op.py index 901546f6f8..34c6401377 100644 --- a/python/paddle/v2/fluid/tests/test_fill_op.py +++ b/python/paddle/v2/fluid/tests/test_fill_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_fill_zeros_like_op.py b/python/paddle/v2/fluid/tests/test_fill_zeros_like_op.py index b7f0b96647..c9b3e4ba13 100644 --- a/python/paddle/v2/fluid/tests/test_fill_zeros_like_op.py +++ b/python/paddle/v2/fluid/tests/test_fill_zeros_like_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_framework_debug_str.py b/python/paddle/v2/fluid/tests/test_framework_debug_str.py index f8fcfb2249..88995c24df 100644 --- a/python/paddle/v2/fluid/tests/test_framework_debug_str.py +++ b/python/paddle/v2/fluid/tests/test_framework_debug_str.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_ftrl_op.py b/python/paddle/v2/fluid/tests/test_ftrl_op.py index 895337de0f..5f7581391a 100644 --- a/python/paddle/v2/fluid/tests/test_ftrl_op.py +++ b/python/paddle/v2/fluid/tests/test_ftrl_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_gather_op.py b/python/paddle/v2/fluid/tests/test_gather_op.py index 7675636797..6fd043c27e 100644 --- a/python/paddle/v2/fluid/tests/test_gather_op.py +++ b/python/paddle/v2/fluid/tests/test_gather_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_gaussian_random_op.py b/python/paddle/v2/fluid/tests/test_gaussian_random_op.py index 79beb8b1fc..3c0ee64098 100644 --- a/python/paddle/v2/fluid/tests/test_gaussian_random_op.py +++ b/python/paddle/v2/fluid/tests/test_gaussian_random_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_get_places_op.py b/python/paddle/v2/fluid/tests/test_get_places_op.py index 68698c5f4a..265433e606 100644 --- a/python/paddle/v2/fluid/tests/test_get_places_op.py +++ b/python/paddle/v2/fluid/tests/test_get_places_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_gradient_clip.py b/python/paddle/v2/fluid/tests/test_gradient_clip.py index 9337791c21..792262df84 100644 --- a/python/paddle/v2/fluid/tests/test_gradient_clip.py +++ b/python/paddle/v2/fluid/tests/test_gradient_clip.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_gru_op.py b/python/paddle/v2/fluid/tests/test_gru_op.py index 69cfd6c481..3a13eb872a 100644 --- a/python/paddle/v2/fluid/tests/test_gru_op.py +++ b/python/paddle/v2/fluid/tests/test_gru_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_gru_unit_op.py b/python/paddle/v2/fluid/tests/test_gru_unit_op.py index 71f13c4513..c56b1eefd3 100644 --- a/python/paddle/v2/fluid/tests/test_gru_unit_op.py +++ b/python/paddle/v2/fluid/tests/test_gru_unit_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_hinge_loss_op.py b/python/paddle/v2/fluid/tests/test_hinge_loss_op.py index 71ff47316e..70586c6be3 100644 --- a/python/paddle/v2/fluid/tests/test_hinge_loss_op.py +++ b/python/paddle/v2/fluid/tests/test_hinge_loss_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_huber_loss_op.py b/python/paddle/v2/fluid/tests/test_huber_loss_op.py index e4560af778..a8d0a77625 100644 --- a/python/paddle/v2/fluid/tests/test_huber_loss_op.py +++ b/python/paddle/v2/fluid/tests/test_huber_loss_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_im2sequence_op.py b/python/paddle/v2/fluid/tests/test_im2sequence_op.py index 2cab3e31a5..4946475f11 100644 --- a/python/paddle/v2/fluid/tests/test_im2sequence_op.py +++ b/python/paddle/v2/fluid/tests/test_im2sequence_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_image_classification_layer.py b/python/paddle/v2/fluid/tests/test_image_classification_layer.py index c64cfed5f5..8af8f646a7 100644 --- a/python/paddle/v2/fluid/tests/test_image_classification_layer.py +++ b/python/paddle/v2/fluid/tests/test_image_classification_layer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_infer_shape.py b/python/paddle/v2/fluid/tests/test_infer_shape.py index 521096388a..17957b9e04 100644 --- a/python/paddle/v2/fluid/tests/test_infer_shape.py +++ b/python/paddle/v2/fluid/tests/test_infer_shape.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_inference_model_io.py b/python/paddle/v2/fluid/tests/test_inference_model_io.py index adf428aa84..e381312ccc 100644 --- a/python/paddle/v2/fluid/tests/test_inference_model_io.py +++ b/python/paddle/v2/fluid/tests/test_inference_model_io.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_initializer.py b/python/paddle/v2/fluid/tests/test_initializer.py index 67746b4d7d..6d4eb62916 100644 --- a/python/paddle/v2/fluid/tests/test_initializer.py +++ b/python/paddle/v2/fluid/tests/test_initializer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_iou_similarity_op.py b/python/paddle/v2/fluid/tests/test_iou_similarity_op.py index 128f2e4977..e33436b63c 100644 --- a/python/paddle/v2/fluid/tests/test_iou_similarity_op.py +++ b/python/paddle/v2/fluid/tests/test_iou_similarity_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_is_empty_op.py b/python/paddle/v2/fluid/tests/test_is_empty_op.py index 7c17e3d57a..799da9dc15 100644 --- a/python/paddle/v2/fluid/tests/test_is_empty_op.py +++ b/python/paddle/v2/fluid/tests/test_is_empty_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_l1_norm_op.py b/python/paddle/v2/fluid/tests/test_l1_norm_op.py index bbc2087846..fa5b18a16f 100644 --- a/python/paddle/v2/fluid/tests/test_l1_norm_op.py +++ b/python/paddle/v2/fluid/tests/test_l1_norm_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_label_smooth_op.py b/python/paddle/v2/fluid/tests/test_label_smooth_op.py index 19a4df5744..ca21289a0d 100644 --- a/python/paddle/v2/fluid/tests/test_label_smooth_op.py +++ b/python/paddle/v2/fluid/tests/test_label_smooth_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_layer_norm_op.py b/python/paddle/v2/fluid/tests/test_layer_norm_op.py index 4460ffaf9c..b723b471bc 100644 --- a/python/paddle/v2/fluid/tests/test_layer_norm_op.py +++ b/python/paddle/v2/fluid/tests/test_layer_norm_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_layers.py b/python/paddle/v2/fluid/tests/test_layers.py index 50ef820424..e757598bba 100644 --- a/python/paddle/v2/fluid/tests/test_layers.py +++ b/python/paddle/v2/fluid/tests/test_layers.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_linear_chain_crf_op.py b/python/paddle/v2/fluid/tests/test_linear_chain_crf_op.py index cbfd9d5e5b..f49f7635f7 100644 --- a/python/paddle/v2/fluid/tests/test_linear_chain_crf_op.py +++ b/python/paddle/v2/fluid/tests/test_linear_chain_crf_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_lod_array_length_op.py b/python/paddle/v2/fluid/tests/test_lod_array_length_op.py index eff28368f1..643ee906d6 100644 --- a/python/paddle/v2/fluid/tests/test_lod_array_length_op.py +++ b/python/paddle/v2/fluid/tests/test_lod_array_length_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_lod_rank_table.py b/python/paddle/v2/fluid/tests/test_lod_rank_table.py index eb0392e8bf..70b8d69585 100644 --- a/python/paddle/v2/fluid/tests/test_lod_rank_table.py +++ b/python/paddle/v2/fluid/tests/test_lod_rank_table.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_lod_reset_op.py b/python/paddle/v2/fluid/tests/test_lod_reset_op.py index 4ee360403e..3bf8230f87 100644 --- a/python/paddle/v2/fluid/tests/test_lod_reset_op.py +++ b/python/paddle/v2/fluid/tests/test_lod_reset_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_lod_tensor_array.py b/python/paddle/v2/fluid/tests/test_lod_tensor_array.py index 0f3ac3c03d..0e90e25538 100644 --- a/python/paddle/v2/fluid/tests/test_lod_tensor_array.py +++ b/python/paddle/v2/fluid/tests/test_lod_tensor_array.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_lod_tensor_array_ops.py b/python/paddle/v2/fluid/tests/test_lod_tensor_array_ops.py index c2d04db99b..ebc0a2f714 100644 --- a/python/paddle/v2/fluid/tests/test_lod_tensor_array_ops.py +++ b/python/paddle/v2/fluid/tests/test_lod_tensor_array_ops.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_log_loss_op.py b/python/paddle/v2/fluid/tests/test_log_loss_op.py index 338355d0c4..d3980b8db9 100644 --- a/python/paddle/v2/fluid/tests/test_log_loss_op.py +++ b/python/paddle/v2/fluid/tests/test_log_loss_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_logical_op.py b/python/paddle/v2/fluid/tests/test_logical_op.py index dd67dc561b..1d7dfe60f2 100644 --- a/python/paddle/v2/fluid/tests/test_logical_op.py +++ b/python/paddle/v2/fluid/tests/test_logical_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_lookup_table_op.py b/python/paddle/v2/fluid/tests/test_lookup_table_op.py index 0c566c76c9..03a5bd24a1 100644 --- a/python/paddle/v2/fluid/tests/test_lookup_table_op.py +++ b/python/paddle/v2/fluid/tests/test_lookup_table_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_lrn_op.py b/python/paddle/v2/fluid/tests/test_lrn_op.py index a841dcf79f..7f2352c588 100644 --- a/python/paddle/v2/fluid/tests/test_lrn_op.py +++ b/python/paddle/v2/fluid/tests/test_lrn_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_lstm_op.py b/python/paddle/v2/fluid/tests/test_lstm_op.py index 3e79f9d8e1..f8ff5a3361 100644 --- a/python/paddle/v2/fluid/tests/test_lstm_op.py +++ b/python/paddle/v2/fluid/tests/test_lstm_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_lstm_unit_op.py b/python/paddle/v2/fluid/tests/test_lstm_unit_op.py index d6348ea0ec..af0c3db701 100644 --- a/python/paddle/v2/fluid/tests/test_lstm_unit_op.py +++ b/python/paddle/v2/fluid/tests/test_lstm_unit_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_lstmp_op.py b/python/paddle/v2/fluid/tests/test_lstmp_op.py index 92a954a9aa..afff133f6c 100644 --- a/python/paddle/v2/fluid/tests/test_lstmp_op.py +++ b/python/paddle/v2/fluid/tests/test_lstmp_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_margin_rank_loss_op.py b/python/paddle/v2/fluid/tests/test_margin_rank_loss_op.py index 694ce20712..97c112487f 100644 --- a/python/paddle/v2/fluid/tests/test_margin_rank_loss_op.py +++ b/python/paddle/v2/fluid/tests/test_margin_rank_loss_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_math_op_patch.py b/python/paddle/v2/fluid/tests/test_math_op_patch.py index 2e77639a4c..cae5188fe8 100644 --- a/python/paddle/v2/fluid/tests/test_math_op_patch.py +++ b/python/paddle/v2/fluid/tests/test_math_op_patch.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_matmul_op.py b/python/paddle/v2/fluid/tests/test_matmul_op.py index 5138af38f4..44ac468389 100644 --- a/python/paddle/v2/fluid/tests/test_matmul_op.py +++ b/python/paddle/v2/fluid/tests/test_matmul_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_maxout_op.py b/python/paddle/v2/fluid/tests/test_maxout_op.py index 5cd7fbde84..f5ddf72516 100644 --- a/python/paddle/v2/fluid/tests/test_maxout_op.py +++ b/python/paddle/v2/fluid/tests/test_maxout_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_mean_op.py b/python/paddle/v2/fluid/tests/test_mean_op.py index 81e8421635..15472a8fc4 100644 --- a/python/paddle/v2/fluid/tests/test_mean_op.py +++ b/python/paddle/v2/fluid/tests/test_mean_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_memory_optimization_transpiler.py b/python/paddle/v2/fluid/tests/test_memory_optimization_transpiler.py index 2e9ed78ffd..a276db581e 100644 --- a/python/paddle/v2/fluid/tests/test_memory_optimization_transpiler.py +++ b/python/paddle/v2/fluid/tests/test_memory_optimization_transpiler.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_minus_op.py b/python/paddle/v2/fluid/tests/test_minus_op.py index aee909f56c..ee32bd4992 100644 --- a/python/paddle/v2/fluid/tests/test_minus_op.py +++ b/python/paddle/v2/fluid/tests/test_minus_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_mnist_if_else_op.py b/python/paddle/v2/fluid/tests/test_mnist_if_else_op.py index 3288a0f007..75a651cf27 100644 --- a/python/paddle/v2/fluid/tests/test_mnist_if_else_op.py +++ b/python/paddle/v2/fluid/tests/test_mnist_if_else_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_modified_huber_loss_op.py b/python/paddle/v2/fluid/tests/test_modified_huber_loss_op.py index eb3873b9ea..def48c9261 100644 --- a/python/paddle/v2/fluid/tests/test_modified_huber_loss_op.py +++ b/python/paddle/v2/fluid/tests/test_modified_huber_loss_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_momentum_op.py b/python/paddle/v2/fluid/tests/test_momentum_op.py index 048eaae06b..aaea9c1809 100644 --- a/python/paddle/v2/fluid/tests/test_momentum_op.py +++ b/python/paddle/v2/fluid/tests/test_momentum_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_mul_op.py b/python/paddle/v2/fluid/tests/test_mul_op.py index 83715f0e27..9d1da420c7 100644 --- a/python/paddle/v2/fluid/tests/test_mul_op.py +++ b/python/paddle/v2/fluid/tests/test_mul_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_multiclass_nms_op.py b/python/paddle/v2/fluid/tests/test_multiclass_nms_op.py index 529223cf40..6459913c01 100644 --- a/python/paddle/v2/fluid/tests/test_multiclass_nms_op.py +++ b/python/paddle/v2/fluid/tests/test_multiclass_nms_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_multihead_attention.py b/python/paddle/v2/fluid/tests/test_multihead_attention.py index a2b300a645..6eeeefe021 100644 --- a/python/paddle/v2/fluid/tests/test_multihead_attention.py +++ b/python/paddle/v2/fluid/tests/test_multihead_attention.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_multiplex_op.py b/python/paddle/v2/fluid/tests/test_multiplex_op.py index a06aef94a5..03cad8b43b 100644 --- a/python/paddle/v2/fluid/tests/test_multiplex_op.py +++ b/python/paddle/v2/fluid/tests/test_multiplex_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_nce.py b/python/paddle/v2/fluid/tests/test_nce.py index 9a51c1f612..068081972d 100644 --- a/python/paddle/v2/fluid/tests/test_nce.py +++ b/python/paddle/v2/fluid/tests/test_nce.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_net.py b/python/paddle/v2/fluid/tests/test_net.py index 69d95d4f70..796a839117 100644 --- a/python/paddle/v2/fluid/tests/test_net.py +++ b/python/paddle/v2/fluid/tests/test_net.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_norm_op.py b/python/paddle/v2/fluid/tests/test_norm_op.py index dd1cd5a31c..6feda175fb 100644 --- a/python/paddle/v2/fluid/tests/test_norm_op.py +++ b/python/paddle/v2/fluid/tests/test_norm_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_normalization_wrapper.py b/python/paddle/v2/fluid/tests/test_normalization_wrapper.py index 6b71f2a923..094d8071e2 100644 --- a/python/paddle/v2/fluid/tests/test_normalization_wrapper.py +++ b/python/paddle/v2/fluid/tests/test_normalization_wrapper.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_one_hot_op.py b/python/paddle/v2/fluid/tests/test_one_hot_op.py index e51ea27d14..c93be0efda 100644 --- a/python/paddle/v2/fluid/tests/test_one_hot_op.py +++ b/python/paddle/v2/fluid/tests/test_one_hot_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_op_support_gpu.py b/python/paddle/v2/fluid/tests/test_op_support_gpu.py index 7de02a8fda..f8ac55590c 100644 --- a/python/paddle/v2/fluid/tests/test_op_support_gpu.py +++ b/python/paddle/v2/fluid/tests/test_op_support_gpu.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_operator.py b/python/paddle/v2/fluid/tests/test_operator.py index b82cf580e8..1f5de93387 100644 --- a/python/paddle/v2/fluid/tests/test_operator.py +++ b/python/paddle/v2/fluid/tests/test_operator.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_operator_desc.py b/python/paddle/v2/fluid/tests/test_operator_desc.py index 2c8665ffa2..c64c08ff7f 100644 --- a/python/paddle/v2/fluid/tests/test_operator_desc.py +++ b/python/paddle/v2/fluid/tests/test_operator_desc.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_optimizer.py b/python/paddle/v2/fluid/tests/test_optimizer.py index dc6b84dcdc..875e9e7c76 100644 --- a/python/paddle/v2/fluid/tests/test_optimizer.py +++ b/python/paddle/v2/fluid/tests/test_optimizer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_pad_op.py b/python/paddle/v2/fluid/tests/test_pad_op.py index 0bd4800055..300f3ffcb8 100644 --- a/python/paddle/v2/fluid/tests/test_pad_op.py +++ b/python/paddle/v2/fluid/tests/test_pad_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_parallel_op.py b/python/paddle/v2/fluid/tests/test_parallel_op.py index f1fd09a7fd..a0fc91f6de 100644 --- a/python/paddle/v2/fluid/tests/test_parallel_op.py +++ b/python/paddle/v2/fluid/tests/test_parallel_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_parameter.py b/python/paddle/v2/fluid/tests/test_parameter.py index dfecdf939b..0ba9235fdb 100644 --- a/python/paddle/v2/fluid/tests/test_parameter.py +++ b/python/paddle/v2/fluid/tests/test_parameter.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_pool2d_op.py b/python/paddle/v2/fluid/tests/test_pool2d_op.py index 2f43be8a0f..77961bc99f 100644 --- a/python/paddle/v2/fluid/tests/test_pool2d_op.py +++ b/python/paddle/v2/fluid/tests/test_pool2d_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_pool3d_op.py b/python/paddle/v2/fluid/tests/test_pool3d_op.py index c93711e051..a6afdaedc5 100644 --- a/python/paddle/v2/fluid/tests/test_pool3d_op.py +++ b/python/paddle/v2/fluid/tests/test_pool3d_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_pool_max_op.py b/python/paddle/v2/fluid/tests/test_pool_max_op.py index 330ad24bd4..cf9b763922 100644 --- a/python/paddle/v2/fluid/tests/test_pool_max_op.py +++ b/python/paddle/v2/fluid/tests/test_pool_max_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_positive_negative_pair_op.py b/python/paddle/v2/fluid/tests/test_positive_negative_pair_op.py index 9b5e544655..091cfc9c72 100644 --- a/python/paddle/v2/fluid/tests/test_positive_negative_pair_op.py +++ b/python/paddle/v2/fluid/tests/test_positive_negative_pair_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_precision_recall_op.py b/python/paddle/v2/fluid/tests/test_precision_recall_op.py index 188b7af559..7830ba2958 100644 --- a/python/paddle/v2/fluid/tests/test_precision_recall_op.py +++ b/python/paddle/v2/fluid/tests/test_precision_recall_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_prelu_op.py b/python/paddle/v2/fluid/tests/test_prelu_op.py index 848036234c..ae19a553bb 100644 --- a/python/paddle/v2/fluid/tests/test_prelu_op.py +++ b/python/paddle/v2/fluid/tests/test_prelu_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_print_op.py b/python/paddle/v2/fluid/tests/test_print_op.py index 3177700dfa..1e49ce994b 100644 --- a/python/paddle/v2/fluid/tests/test_print_op.py +++ b/python/paddle/v2/fluid/tests/test_print_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_prior_box_op.py b/python/paddle/v2/fluid/tests/test_prior_box_op.py index a6c21af49f..c21138c13e 100644 --- a/python/paddle/v2/fluid/tests/test_prior_box_op.py +++ b/python/paddle/v2/fluid/tests/test_prior_box_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_profiler.py b/python/paddle/v2/fluid/tests/test_profiler.py index 09b2d08401..62bfb2b8e2 100644 --- a/python/paddle/v2/fluid/tests/test_profiler.py +++ b/python/paddle/v2/fluid/tests/test_profiler.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_program.py b/python/paddle/v2/fluid/tests/test_program.py index 9967da1593..266e189e50 100644 --- a/python/paddle/v2/fluid/tests/test_program.py +++ b/python/paddle/v2/fluid/tests/test_program.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_protobuf.py b/python/paddle/v2/fluid/tests/test_protobuf.py index 48e6dedc58..90de56514d 100644 --- a/python/paddle/v2/fluid/tests/test_protobuf.py +++ b/python/paddle/v2/fluid/tests/test_protobuf.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_protobuf_descs.py b/python/paddle/v2/fluid/tests/test_protobuf_descs.py index c590bf1c65..55d18d2729 100644 --- a/python/paddle/v2/fluid/tests/test_protobuf_descs.py +++ b/python/paddle/v2/fluid/tests/test_protobuf_descs.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_proximal_adagrad_op.py b/python/paddle/v2/fluid/tests/test_proximal_adagrad_op.py index 744d71bdcf..3c26895850 100644 --- a/python/paddle/v2/fluid/tests/test_proximal_adagrad_op.py +++ b/python/paddle/v2/fluid/tests/test_proximal_adagrad_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_proximal_gd_op.py b/python/paddle/v2/fluid/tests/test_proximal_gd_op.py index 96540cf6cf..137594b9a0 100644 --- a/python/paddle/v2/fluid/tests/test_proximal_gd_op.py +++ b/python/paddle/v2/fluid/tests/test_proximal_gd_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_rank_loss_op.py b/python/paddle/v2/fluid/tests/test_rank_loss_op.py index f31a2c2681..7eba1e2077 100644 --- a/python/paddle/v2/fluid/tests/test_rank_loss_op.py +++ b/python/paddle/v2/fluid/tests/test_rank_loss_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_recurrent_op.py b/python/paddle/v2/fluid/tests/test_recurrent_op.py index 6d59e199e2..e540ca43b6 100644 --- a/python/paddle/v2/fluid/tests/test_recurrent_op.py +++ b/python/paddle/v2/fluid/tests/test_recurrent_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_recv_op.py b/python/paddle/v2/fluid/tests/test_recv_op.py index 3a02b88241..7a0802afc5 100644 --- a/python/paddle/v2/fluid/tests/test_recv_op.py +++ b/python/paddle/v2/fluid/tests/test_recv_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_reduce_op.py b/python/paddle/v2/fluid/tests/test_reduce_op.py index c669f73a7c..5e656bddb7 100644 --- a/python/paddle/v2/fluid/tests/test_reduce_op.py +++ b/python/paddle/v2/fluid/tests/test_reduce_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_registry.py b/python/paddle/v2/fluid/tests/test_registry.py index 44e50ca55a..bf4dc64186 100644 --- a/python/paddle/v2/fluid/tests/test_registry.py +++ b/python/paddle/v2/fluid/tests/test_registry.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_regularizer.py b/python/paddle/v2/fluid/tests/test_regularizer.py index b33817fa41..8fc4db1c5a 100644 --- a/python/paddle/v2/fluid/tests/test_regularizer.py +++ b/python/paddle/v2/fluid/tests/test_regularizer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_reorder_lod_tensor.py b/python/paddle/v2/fluid/tests/test_reorder_lod_tensor.py index 0a223bac0c..d4e17d1b1e 100644 --- a/python/paddle/v2/fluid/tests/test_reorder_lod_tensor.py +++ b/python/paddle/v2/fluid/tests/test_reorder_lod_tensor.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_reshape_op.py b/python/paddle/v2/fluid/tests/test_reshape_op.py index 2cc0b36460..6d1aa549d5 100644 --- a/python/paddle/v2/fluid/tests/test_reshape_op.py +++ b/python/paddle/v2/fluid/tests/test_reshape_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_rmsprop_op.py b/python/paddle/v2/fluid/tests/test_rmsprop_op.py index b6d7c69800..0d84a5853e 100644 --- a/python/paddle/v2/fluid/tests/test_rmsprop_op.py +++ b/python/paddle/v2/fluid/tests/test_rmsprop_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_rnn_memory_helper_op.py b/python/paddle/v2/fluid/tests/test_rnn_memory_helper_op.py index 82b54bbd1a..773bd17456 100644 --- a/python/paddle/v2/fluid/tests/test_rnn_memory_helper_op.py +++ b/python/paddle/v2/fluid/tests/test_rnn_memory_helper_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_roi_pool_op.py b/python/paddle/v2/fluid/tests/test_roi_pool_op.py index af48848dcd..e556d51b02 100644 --- a/python/paddle/v2/fluid/tests/test_roi_pool_op.py +++ b/python/paddle/v2/fluid/tests/test_roi_pool_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_row_conv_op.py b/python/paddle/v2/fluid/tests/test_row_conv_op.py index 580b08f75e..30f1efbcbc 100644 --- a/python/paddle/v2/fluid/tests/test_row_conv_op.py +++ b/python/paddle/v2/fluid/tests/test_row_conv_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_scale_op.py b/python/paddle/v2/fluid/tests/test_scale_op.py index 95cd935dda..53f59c3990 100644 --- a/python/paddle/v2/fluid/tests/test_scale_op.py +++ b/python/paddle/v2/fluid/tests/test_scale_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_scatter_op.py b/python/paddle/v2/fluid/tests/test_scatter_op.py index f2936e19ae..bb02a40d44 100644 --- a/python/paddle/v2/fluid/tests/test_scatter_op.py +++ b/python/paddle/v2/fluid/tests/test_scatter_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_scope.py b/python/paddle/v2/fluid/tests/test_scope.py index 566a11abbe..2a2efbf098 100644 --- a/python/paddle/v2/fluid/tests/test_scope.py +++ b/python/paddle/v2/fluid/tests/test_scope.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_selected_rows.py b/python/paddle/v2/fluid/tests/test_selected_rows.py index 65ddf1f8f5..50c8bb4bca 100644 --- a/python/paddle/v2/fluid/tests/test_selected_rows.py +++ b/python/paddle/v2/fluid/tests/test_selected_rows.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_seq_concat_op.py b/python/paddle/v2/fluid/tests/test_seq_concat_op.py index ba2bb075e6..1c9b61d8fd 100644 --- a/python/paddle/v2/fluid/tests/test_seq_concat_op.py +++ b/python/paddle/v2/fluid/tests/test_seq_concat_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_seq_conv.py b/python/paddle/v2/fluid/tests/test_seq_conv.py index 674a2e1694..51dbf1f618 100644 --- a/python/paddle/v2/fluid/tests/test_seq_conv.py +++ b/python/paddle/v2/fluid/tests/test_seq_conv.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_seq_pool.py b/python/paddle/v2/fluid/tests/test_seq_pool.py index 9dd6b2a087..0488475721 100644 --- a/python/paddle/v2/fluid/tests/test_seq_pool.py +++ b/python/paddle/v2/fluid/tests/test_seq_pool.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_sequence_erase_op.py b/python/paddle/v2/fluid/tests/test_sequence_erase_op.py index 4823836ba9..ebab77e804 100644 --- a/python/paddle/v2/fluid/tests/test_sequence_erase_op.py +++ b/python/paddle/v2/fluid/tests/test_sequence_erase_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_sequence_expand.py b/python/paddle/v2/fluid/tests/test_sequence_expand.py index 0d37751de4..957fa5d2c4 100644 --- a/python/paddle/v2/fluid/tests/test_sequence_expand.py +++ b/python/paddle/v2/fluid/tests/test_sequence_expand.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_sequence_reshape.py b/python/paddle/v2/fluid/tests/test_sequence_reshape.py index 06d5af8f5e..efeab56039 100644 --- a/python/paddle/v2/fluid/tests/test_sequence_reshape.py +++ b/python/paddle/v2/fluid/tests/test_sequence_reshape.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_sequence_slice_op.py b/python/paddle/v2/fluid/tests/test_sequence_slice_op.py index bf1f21bcde..660b4a171d 100644 --- a/python/paddle/v2/fluid/tests/test_sequence_slice_op.py +++ b/python/paddle/v2/fluid/tests/test_sequence_slice_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_sequence_softmax_op.py b/python/paddle/v2/fluid/tests/test_sequence_softmax_op.py index 5bd780f6b5..9e5c1e7a3d 100644 --- a/python/paddle/v2/fluid/tests/test_sequence_softmax_op.py +++ b/python/paddle/v2/fluid/tests/test_sequence_softmax_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_sgd_op.py b/python/paddle/v2/fluid/tests/test_sgd_op.py index ba2ca1683f..e5379b961f 100644 --- a/python/paddle/v2/fluid/tests/test_sgd_op.py +++ b/python/paddle/v2/fluid/tests/test_sgd_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_shrink_rnn_memory.py b/python/paddle/v2/fluid/tests/test_shrink_rnn_memory.py index 4578211bac..48874ba8a5 100644 --- a/python/paddle/v2/fluid/tests/test_shrink_rnn_memory.py +++ b/python/paddle/v2/fluid/tests/test_shrink_rnn_memory.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_sigmoid_cross_entropy_with_logits_op.py b/python/paddle/v2/fluid/tests/test_sigmoid_cross_entropy_with_logits_op.py index f88fa62119..c435796569 100644 --- a/python/paddle/v2/fluid/tests/test_sigmoid_cross_entropy_with_logits_op.py +++ b/python/paddle/v2/fluid/tests/test_sigmoid_cross_entropy_with_logits_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_sign_op.py b/python/paddle/v2/fluid/tests/test_sign_op.py index c1dfa7f45d..087a0c575b 100644 --- a/python/paddle/v2/fluid/tests/test_sign_op.py +++ b/python/paddle/v2/fluid/tests/test_sign_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_smooth_l1_loss_op.py b/python/paddle/v2/fluid/tests/test_smooth_l1_loss_op.py index 5a388bb7b3..e74664dac4 100644 --- a/python/paddle/v2/fluid/tests/test_smooth_l1_loss_op.py +++ b/python/paddle/v2/fluid/tests/test_smooth_l1_loss_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_softmax_op.py b/python/paddle/v2/fluid/tests/test_softmax_op.py index cf43e676c5..8f8312edca 100644 --- a/python/paddle/v2/fluid/tests/test_softmax_op.py +++ b/python/paddle/v2/fluid/tests/test_softmax_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_softmax_with_cross_entropy_op.py b/python/paddle/v2/fluid/tests/test_softmax_with_cross_entropy_op.py index 626f34f0e0..889fea2ce6 100644 --- a/python/paddle/v2/fluid/tests/test_softmax_with_cross_entropy_op.py +++ b/python/paddle/v2/fluid/tests/test_softmax_with_cross_entropy_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_split_and_merge_lod_tensor_op.py b/python/paddle/v2/fluid/tests/test_split_and_merge_lod_tensor_op.py index bc541298ed..48e6756a86 100644 --- a/python/paddle/v2/fluid/tests/test_split_and_merge_lod_tensor_op.py +++ b/python/paddle/v2/fluid/tests/test_split_and_merge_lod_tensor_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_split_op.py b/python/paddle/v2/fluid/tests/test_split_op.py index 50347d2df4..887bdfe8b3 100644 --- a/python/paddle/v2/fluid/tests/test_split_op.py +++ b/python/paddle/v2/fluid/tests/test_split_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_split_selected_rows_op.py b/python/paddle/v2/fluid/tests/test_split_selected_rows_op.py index 343aa20066..2aaa05dcac 100644 --- a/python/paddle/v2/fluid/tests/test_split_selected_rows_op.py +++ b/python/paddle/v2/fluid/tests/test_split_selected_rows_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_spp_op.py b/python/paddle/v2/fluid/tests/test_spp_op.py index e912b56de5..f0ab5909df 100644 --- a/python/paddle/v2/fluid/tests/test_spp_op.py +++ b/python/paddle/v2/fluid/tests/test_spp_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_squared_l2_distance_op.py b/python/paddle/v2/fluid/tests/test_squared_l2_distance_op.py index 8171207cd9..78bc300ebe 100644 --- a/python/paddle/v2/fluid/tests/test_squared_l2_distance_op.py +++ b/python/paddle/v2/fluid/tests/test_squared_l2_distance_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_squared_l2_norm_op.py b/python/paddle/v2/fluid/tests/test_squared_l2_norm_op.py index b7575cb4d2..609445d522 100644 --- a/python/paddle/v2/fluid/tests/test_squared_l2_norm_op.py +++ b/python/paddle/v2/fluid/tests/test_squared_l2_norm_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_sum_op.py b/python/paddle/v2/fluid/tests/test_sum_op.py index 0a15a9485d..2faf5b1064 100644 --- a/python/paddle/v2/fluid/tests/test_sum_op.py +++ b/python/paddle/v2/fluid/tests/test_sum_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_switch.py b/python/paddle/v2/fluid/tests/test_switch.py index 52ebf773ec..11296bc04e 100644 --- a/python/paddle/v2/fluid/tests/test_switch.py +++ b/python/paddle/v2/fluid/tests/test_switch.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_target_assign_op.py b/python/paddle/v2/fluid/tests/test_target_assign_op.py index ceda61ff55..ccb41e56c5 100755 --- a/python/paddle/v2/fluid/tests/test_target_assign_op.py +++ b/python/paddle/v2/fluid/tests/test_target_assign_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_tensor.py b/python/paddle/v2/fluid/tests/test_tensor.py index 0219bef42b..8fe234a90f 100644 --- a/python/paddle/v2/fluid/tests/test_tensor.py +++ b/python/paddle/v2/fluid/tests/test_tensor.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_top_k_op.py b/python/paddle/v2/fluid/tests/test_top_k_op.py index a50faf0fff..cc2fcc5ec0 100644 --- a/python/paddle/v2/fluid/tests/test_top_k_op.py +++ b/python/paddle/v2/fluid/tests/test_top_k_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_transpose_op.py b/python/paddle/v2/fluid/tests/test_transpose_op.py index a16de1416f..ebd63fbd49 100644 --- a/python/paddle/v2/fluid/tests/test_transpose_op.py +++ b/python/paddle/v2/fluid/tests/test_transpose_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_uniform_random_op.py b/python/paddle/v2/fluid/tests/test_uniform_random_op.py index 94cf416fad..53227716ef 100644 --- a/python/paddle/v2/fluid/tests/test_uniform_random_op.py +++ b/python/paddle/v2/fluid/tests/test_uniform_random_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_unpool_op.py b/python/paddle/v2/fluid/tests/test_unpool_op.py index 3dd43f9ba4..a97d6dfdda 100644 --- a/python/paddle/v2/fluid/tests/test_unpool_op.py +++ b/python/paddle/v2/fluid/tests/test_unpool_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_variable.py b/python/paddle/v2/fluid/tests/test_variable.py index 9f9748ca4e..b06bcfb075 100644 --- a/python/paddle/v2/fluid/tests/test_variable.py +++ b/python/paddle/v2/fluid/tests/test_variable.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_warpctc_op.py b/python/paddle/v2/fluid/tests/test_warpctc_op.py index 55d1c73262..ac638f7836 100644 --- a/python/paddle/v2/fluid/tests/test_warpctc_op.py +++ b/python/paddle/v2/fluid/tests/test_warpctc_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_weight_normalization.py b/python/paddle/v2/fluid/tests/test_weight_normalization.py index 80ad8285d8..c2b81dddb0 100644 --- a/python/paddle/v2/fluid/tests/test_weight_normalization.py +++ b/python/paddle/v2/fluid/tests/test_weight_normalization.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/fluid/tests/test_while_op.py b/python/paddle/v2/fluid/tests/test_while_op.py index 9f5e1b668c..3fa1d5e0ed 100644 --- a/python/paddle/v2/fluid/tests/test_while_op.py +++ b/python/paddle/v2/fluid/tests/test_while_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/image.py b/python/paddle/v2/image.py index e5000e440c..9235c41e9e 100644 --- a/python/paddle/v2/image.py +++ b/python/paddle/v2/image.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/inference.py b/python/paddle/v2/inference.py index 78bf9807da..52f5b947fd 100644 --- a/python/paddle/v2/inference.py +++ b/python/paddle/v2/inference.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/master/__init__.py b/python/paddle/v2/master/__init__.py index 494e4baf20..efaeeabfa2 100644 --- a/python/paddle/v2/master/__init__.py +++ b/python/paddle/v2/master/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/master/client.py b/python/paddle/v2/master/client.py index b3c790e39d..d62e7cc28e 100644 --- a/python/paddle/v2/master/client.py +++ b/python/paddle/v2/master/client.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/reader/tests/__init__.py b/python/paddle/v2/reader/tests/__init__.py index b94a21a7e4..eca2dce114 100644 --- a/python/paddle/v2/reader/tests/__init__.py +++ b/python/paddle/v2/reader/tests/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/reader/tests/creator_test.py b/python/paddle/v2/reader/tests/creator_test.py index ac6cd4e9b6..7fe374e663 100644 --- a/python/paddle/v2/reader/tests/creator_test.py +++ b/python/paddle/v2/reader/tests/creator_test.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Copyright PaddlePaddle contributors. All Rights Reserved +# Copyright PaddlePaddle contributors. All Rights Reservedd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/reader/tests/decorator_test.py b/python/paddle/v2/reader/tests/decorator_test.py index e41e9c78a0..6b680e39f3 100644 --- a/python/paddle/v2/reader/tests/decorator_test.py +++ b/python/paddle/v2/reader/tests/decorator_test.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/tests/test_image.py b/python/paddle/v2/tests/test_image.py index 2b0444bb03..c78bbdc40a 100644 --- a/python/paddle/v2/tests/test_image.py +++ b/python/paddle/v2/tests/test_image.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/tests/test_layer.py b/python/paddle/v2/tests/test_layer.py index 710e8135f2..b169a0f38e 100644 --- a/python/paddle/v2/tests/test_layer.py +++ b/python/paddle/v2/tests/test_layer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/tests/test_op.py b/python/paddle/v2/tests/test_op.py index dd04cc4ab6..15d5aef511 100644 --- a/python/paddle/v2/tests/test_op.py +++ b/python/paddle/v2/tests/test_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/tests/test_paramconf_order.py b/python/paddle/v2/tests/test_paramconf_order.py index 33c240b8f5..264442be18 100644 --- a/python/paddle/v2/tests/test_paramconf_order.py +++ b/python/paddle/v2/tests/test_paramconf_order.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Copyright PaddlePaddle contributors. All Rights Reserved +# Copyright PaddlePaddle contributors. All Rights Reservedd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/tests/test_parameters.py b/python/paddle/v2/tests/test_parameters.py index 1fe1f09b9d..3bfd9348a6 100644 --- a/python/paddle/v2/tests/test_parameters.py +++ b/python/paddle/v2/tests/test_parameters.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/tests/test_rnn_layer.py b/python/paddle/v2/tests/test_rnn_layer.py index 7920e342e1..6ad07167dc 100644 --- a/python/paddle/v2/tests/test_rnn_layer.py +++ b/python/paddle/v2/tests/test_rnn_layer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/tests/test_topology.py b/python/paddle/v2/tests/test_topology.py index 11b4154eed..bacd28ddb7 100644 --- a/python/paddle/v2/tests/test_topology.py +++ b/python/paddle/v2/tests/test_topology.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/paddle/v2/trainer.py b/python/paddle/v2/trainer.py index a0060bf227..5d98d5b6db 100644 --- a/python/paddle/v2/trainer.py +++ b/python/paddle/v2/trainer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tools/manylinux1/build_scripts/manylinux1-check.py b/tools/manylinux1/build_scripts/manylinux1-check.py index a27eab1c77..0d1a6df4ee 100644 --- a/tools/manylinux1/build_scripts/manylinux1-check.py +++ b/tools/manylinux1/build_scripts/manylinux1-check.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tools/manylinux1/build_scripts/python-tag-abi-tag.py b/tools/manylinux1/build_scripts/python-tag-abi-tag.py index cd2573314c..0364ab3659 100644 --- a/tools/manylinux1/build_scripts/python-tag-abi-tag.py +++ b/tools/manylinux1/build_scripts/python-tag-abi-tag.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tools/manylinux1/build_scripts/ssl-check.py b/tools/manylinux1/build_scripts/ssl-check.py index 34a3116207..afef2812f3 100644 --- a/tools/manylinux1/build_scripts/ssl-check.py +++ b/tools/manylinux1/build_scripts/ssl-check.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. -- GitLab From 5e1640f7e403c7fd2641d685479ab2375601d89e Mon Sep 17 00:00:00 2001 From: Jacek Czaja Date: Mon, 12 Feb 2018 05:36:24 -0800 Subject: [PATCH 093/217] - Fix MKLDNN build type to match Paddle build type --- cmake/external/mkldnn.cmake | 1 + 1 file changed, 1 insertion(+) diff --git a/cmake/external/mkldnn.cmake b/cmake/external/mkldnn.cmake index 89fc34796a..3b8314a06b 100644 --- a/cmake/external/mkldnn.cmake +++ b/cmake/external/mkldnn.cmake @@ -56,6 +56,7 @@ ExternalProject_Add( PREFIX ${MKLDNN_SOURCES_DIR} UPDATE_COMMAND "" CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${MKLDNN_INSTALL_DIR} + CMAKE_ARGS -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} CMAKE_ARGS -DMKLROOT=${MKLML_ROOT} CMAKE_ARGS -DCMAKE_C_FLAGS=${MKLDNN_CFLAG} CMAKE_ARGS -DCMAKE_CXX_FLAGS=${MKLDNN_CXXFLAG} -- GitLab From 3c47c730483d52d50b7d83ab4edbdbaff5b6694b Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Mon, 12 Feb 2018 19:43:22 +0000 Subject: [PATCH 094/217] add back libnccl-dev --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index ed559ca5c4..6ac9901ac6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -22,7 +22,7 @@ COPY ./paddle/scripts/docker/root/ /root/ RUN apt-get update && \ apt-get install -y \ - git python-pip python-dev openssh-server bison \ + git python-pip python-dev openssh-server bison libnccl-dev \ wget unzip unrar tar xz-utils bzip2 gzip coreutils ntp \ curl sed grep graphviz libjpeg-dev zlib1g-dev \ python-matplotlib gcc-4.8 g++-4.8 \ -- GitLab From f82fa64a06f71700a99b641aeb8fc9ef16c0d940 Mon Sep 17 00:00:00 2001 From: kexinzhao Date: Mon, 12 Feb 2018 13:53:06 -0800 Subject: [PATCH 095/217] Move float16 into fluid folder (#8394) * move float16 into fluid * fix include * move to platform folder --- paddle/fluid/platform/CMakeLists.txt | 5 ++- paddle/{math => fluid/platform}/float16.h | 31 +++++++------------ .../platform/float16_test.cc} | 2 +- .../platform/float16_test.cu} | 2 +- paddle/math/tests/CMakeLists.txt | 2 -- 5 files changed, 17 insertions(+), 25 deletions(-) rename paddle/{math => fluid/platform}/float16.h (97%) rename paddle/{math/tests/test_float16.cpp => fluid/platform/float16_test.cc} (98%) rename paddle/{math/tests/test_float16.cu => fluid/platform/float16_test.cu} (99%) diff --git a/paddle/fluid/platform/CMakeLists.txt b/paddle/fluid/platform/CMakeLists.txt index 5ce4b3de39..32e768fdf4 100644 --- a/paddle/fluid/platform/CMakeLists.txt +++ b/paddle/fluid/platform/CMakeLists.txt @@ -27,7 +27,7 @@ ELSE() set(MKLDNN_CTX_DEPS) ENDIF() -# memcpy deoends on device_context, here add deps individually for +# memcpy depends on device_context, here add deps individually for # avoiding cycle dependencies cc_library(device_context SRCS device_context.cc DEPS memory buddy_allocator system_allocator memory_block meta_data meta_cache place eigen3 ${GPU_CTX_DEPS} ${MKLDNN_CTX_DEPS}) @@ -39,3 +39,6 @@ nv_test(nccl_test SRCS nccl_test.cu DEPS dynload_cuda gpu_info device_context) cc_library(profiler SRCS profiler.cc DEPS device_context) cc_test(profiler_test SRCS profiler_test.cc DEPS profiler) + +nv_test(float16_gpu_test SRCS float16_test.cu) +cc_test(float16_test SRCS float16_test.cc) diff --git a/paddle/math/float16.h b/paddle/fluid/platform/float16.h similarity index 97% rename from paddle/math/float16.h rename to paddle/fluid/platform/float16.h index b00a85b082..c36bfad4bc 100644 --- a/paddle/math/float16.h +++ b/paddle/fluid/platform/float16.h @@ -68,7 +68,7 @@ namespace paddle { // memory access of float16 struct and also makes float16 compatible // with CUDA half, ARM float16_t, and Eigen::half data types. struct PADDLE_ALIGN(2) float16 { -public: + public: uint16_t x; // Constructors @@ -319,7 +319,7 @@ public: return static_cast(float(*this)); } -private: + private: union Bits { float f; int32_t si; @@ -485,8 +485,7 @@ HOST inline float16 operator+(const float16& a, const float16& b) { "st1 {v0.h}[0], [%[res_ptr]]\n" : // outputs : // inputs - [a_ptr] "r"(&(a.x)), - [b_ptr] "r"(&(b.x)), + [a_ptr] "r"(&(a.x)), [b_ptr] "r"(&(b.x)), [res_ptr] "r"(&(res.x)) : // clobbers "memory", "v0", "v1"); @@ -502,8 +501,7 @@ HOST inline float16 operator-(const float16& a, const float16& b) { "st1 {v0.h}[0], [%[res_ptr]]\n" : // outputs : // inputs - [a_ptr] "r"(&(a.x)), - [b_ptr] "r"(&(b.x)), + [a_ptr] "r"(&(a.x)), [b_ptr] "r"(&(b.x)), [res_ptr] "r"(&(res.x)) : // clobbers "memory", "v0", "v1"); @@ -519,8 +517,7 @@ HOST inline float16 operator*(const float16& a, const float16& b) { "st1 {v0.h}[0], [%[res_ptr]]\n" : // outputs : // inputs - [a_ptr] "r"(&(a.x)), - [b_ptr] "r"(&(b.x)), + [a_ptr] "r"(&(a.x)), [b_ptr] "r"(&(b.x)), [res_ptr] "r"(&(res.x)) : // clobbers "memory", "v0", "v1"); @@ -536,8 +533,7 @@ HOST inline float16 operator/(const float16& a, const float16& b) { "st1 {v0.h}[0], [%[res_ptr]]\n" : // outputs : // inputs - [a_ptr] "r"(&(a.x)), - [b_ptr] "r"(&(b.x)), + [a_ptr] "r"(&(a.x)), [b_ptr] "r"(&(b.x)), [res_ptr] "r"(&(res.x)) : // clobbers "memory", "v0", "v1"); @@ -588,8 +584,7 @@ HOST inline bool operator==(const float16& a, const float16& b) { "st1 {v0.h}[0], [%[res_ptr]]\n" : // outputs : // inputs - [a_ptr] "r"(&(a.x)), - [b_ptr] "r"(&(b.x)), + [a_ptr] "r"(&(a.x)), [b_ptr] "r"(&(b.x)), [res_ptr] "r"(&res) : // clobbers "memory", "v0", "v1"); @@ -609,8 +604,7 @@ HOST inline bool operator<(const float16& a, const float16& b) { "st1 {v0.h}[0], [%[res_ptr]]\n" : // outputs : // inputs - [a_ptr] "r"(&(a.x)), - [b_ptr] "r"(&(b.x)), + [a_ptr] "r"(&(a.x)), [b_ptr] "r"(&(b.x)), [res_ptr] "r"(&res) : // clobbers "memory", "v0", "v1"); @@ -626,8 +620,7 @@ HOST inline bool operator<=(const float16& a, const float16& b) { "st1 {v0.h}[0], [%[res_ptr]]\n" : // outputs : // inputs - [a_ptr] "r"(&(a.x)), - [b_ptr] "r"(&(b.x)), + [a_ptr] "r"(&(a.x)), [b_ptr] "r"(&(b.x)), [res_ptr] "r"(&res) : // clobbers "memory", "v0", "v1"); @@ -643,8 +636,7 @@ HOST inline bool operator>(const float16& a, const float16& b) { "st1 {v0.h}[0], [%[res_ptr]]\n" : // outputs : // inputs - [a_ptr] "r"(&(a.x)), - [b_ptr] "r"(&(b.x)), + [a_ptr] "r"(&(a.x)), [b_ptr] "r"(&(b.x)), [res_ptr] "r"(&res) : // clobbers "memory", "v0", "v1"); @@ -660,8 +652,7 @@ HOST inline bool operator>=(const float16& a, const float16& b) { "st1 {v0.h}[0], [%[res_ptr]]\n" : // outputs : // inputs - [a_ptr] "r"(&(a.x)), - [b_ptr] "r"(&(b.x)), + [a_ptr] "r"(&(a.x)), [b_ptr] "r"(&(b.x)), [res_ptr] "r"(&res) : // clobbers "memory", "v0", "v1"); diff --git a/paddle/math/tests/test_float16.cpp b/paddle/fluid/platform/float16_test.cc similarity index 98% rename from paddle/math/tests/test_float16.cpp rename to paddle/fluid/platform/float16_test.cc index 64cc43f972..bed29dbfa7 100644 --- a/paddle/math/tests/test_float16.cpp +++ b/paddle/fluid/platform/float16_test.cc @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/math/float16.h" +#include "paddle/fluid/platform/float16.h" #include diff --git a/paddle/math/tests/test_float16.cu b/paddle/fluid/platform/float16_test.cu similarity index 99% rename from paddle/math/tests/test_float16.cu rename to paddle/fluid/platform/float16_test.cu index 3b2d8cfcec..7e6c9f58ac 100644 --- a/paddle/math/tests/test_float16.cu +++ b/paddle/fluid/platform/float16_test.cu @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/math/float16.h" +#include "paddle/fluid/platform/float16.h" #include diff --git a/paddle/math/tests/CMakeLists.txt b/paddle/math/tests/CMakeLists.txt index dcd2a34583..d8b7f9e3fc 100644 --- a/paddle/math/tests/CMakeLists.txt +++ b/paddle/math/tests/CMakeLists.txt @@ -22,7 +22,6 @@ if(WITH_GPU) link_paddle_test(test_Tensor) CUDA_ADD_EXECUTABLE(test_lazyAssign test_lazyAssign.cu) link_paddle_test(test_lazyAssign) - nv_test(test_float16_gpu SRCS test_float16.cu) else() compile_cu_as_cpp(test_Tensor.cu) add_unittest(test_Tensor test_Tensor.cu) @@ -34,4 +33,3 @@ add_simple_unittest(test_FPException) add_simple_unittest(test_GpuProfiler) add_simple_unittest(test_BaseMatrix) add_simple_unittest(test_Matrix) -add_simple_unittest(test_float16) -- GitLab From 004df46f28be0201d4bfb40b981be4fc919f632f Mon Sep 17 00:00:00 2001 From: xuwei06 Date: Mon, 12 Feb 2018 14:02:24 -0800 Subject: [PATCH 096/217] Make print_op able to show the value of bool tensor And some minor fixes on comments. --- .../fluid/operators/elementwise_op_function.h | 1 - paddle/fluid/operators/print_op.cc | 16 +++++----- python/paddle/v2/fluid/layers/control_flow.py | 2 +- python/paddle/v2/fluid/layers/nn.py | 30 +++++++++---------- 4 files changed, 25 insertions(+), 24 deletions(-) diff --git a/paddle/fluid/operators/elementwise_op_function.h b/paddle/fluid/operators/elementwise_op_function.h index 0ee7291f04..2a4a611511 100644 --- a/paddle/fluid/operators/elementwise_op_function.h +++ b/paddle/fluid/operators/elementwise_op_function.h @@ -314,7 +314,6 @@ EIGEN_FUNCTOR(Div, EIGEN_DIV); template void ElementwiseGradCompute(const framework::ExecutionContext& ctx, - const framework::Tensor* x, const framework::Tensor* y, const framework::Tensor* out, diff --git a/paddle/fluid/operators/print_op.cc b/paddle/fluid/operators/print_op.cc index a76ba796fe..7fa2b060af 100644 --- a/paddle/fluid/operators/print_op.cc +++ b/paddle/fluid/operators/print_op.cc @@ -46,7 +46,7 @@ struct Formater { } private: - void PrintMessage() { CLOG << std::time(nullptr) << "\t" << message; } + void PrintMessage() { CLOG << std::time(nullptr) << "\t" << message << "\t"; } void PrintName() { if (!name.empty()) { CLOG << "Tensor[" << name << "]" << std::endl; @@ -85,15 +85,16 @@ struct Formater { // print float if (dtype.hash_code() == typeid(float).hash_code()) { Display(size); - } - if (dtype.hash_code() == typeid(double).hash_code()) { + } else if (dtype.hash_code() == typeid(double).hash_code()) { Display(size); - } - if (dtype.hash_code() == typeid(int).hash_code()) { + } else if (dtype.hash_code() == typeid(int).hash_code()) { Display(size); - } - if (dtype.hash_code() == typeid(int64_t).hash_code()) { + } else if (dtype.hash_code() == typeid(int64_t).hash_code()) { Display(size); + } else if (dtype.hash_code() == typeid(bool).hash_code()) { + Display(size); + } else { + CLOG << "\tdata: unprintable type: " << dtype.name() << std::endl; } } @@ -182,6 +183,7 @@ class TensorPrintOp : public framework::OperatorBase { } Formater formater; + formater.message = Attr("message"); if (Attr("print_tensor_name")) { formater.name = printed_var_name; } diff --git a/python/paddle/v2/fluid/layers/control_flow.py b/python/paddle/v2/fluid/layers/control_flow.py index 800c11a53b..1ca11bb35b 100644 --- a/python/paddle/v2/fluid/layers/control_flow.py +++ b/python/paddle/v2/fluid/layers/control_flow.py @@ -174,7 +174,7 @@ def Print(input, print_tensor_type (bool): Print the tensor type. print_tensor_shape (bool): Print the tensor shape. print_tensor_lod (bool): Print the tensor lod. - print_phase (bool): Which phase to displace, including 'forward', + print_phase (str): Which phase to displace, including 'forward', 'backward' and 'both'. If set to 'backward' or 'both', will print the gradients of input tensor. diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index f5b64fee1d..5f1842f5fb 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -1579,7 +1579,7 @@ def layer_norm(input, """ **Layer Normalization** - Assume feature vectors exist on dimensions + Assume feature vectors exist on dimensions :attr:`begin_norm_axis ... rank(input)` and calculate the moment statistics along these dimensions for each feature vector :math:`a` with size :math:`H`, then normalize each feature vector using the corresponding @@ -1600,13 +1600,13 @@ def layer_norm(input, Args: input(Variable): The input tensor variable. - scale(bool): Whether to learn the adaptive gain :math:`g` after + scale(bool): Whether to learn the adaptive gain :math:`g` after normalization. - shift(bool): Whether to learn the adaptive bias :math:`b` after + shift(bool): Whether to learn the adaptive bias :math:`b` after normalization. - begin_norm_axis(bool): The normalization will be performed along + begin_norm_axis(bool): The normalization will be performed along dimensions from :attr:`begin_norm_axis` to :attr:`rank(input)`. - epsilon(float): The small value added to the variance to prevent + epsilon(float): The small value added to the variance to prevent division by zero. param_attr(ParamAttr|None): The parameter attribute for the learnable gain :math:`g`. @@ -2070,7 +2070,7 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None): Tensor variable with a single element, otherwise must be in the range :math:`[-rank(input), rank(input))`. If :math:`dim < 0`, the dimension to reduce is :math:`rank + dim`. - keep_dim (bool): Whether to reserve the reduced dimension in the + keep_dim (bool|False): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have one fewer dimension than the :attr:`input` unless :attr:`keep_dim` is true. name(str|None): A name for this layer(optional). If set None, the layer @@ -3098,33 +3098,33 @@ def multiplex(inputs, index): def softmax_with_cross_entropy(logits, label, soft_label=False): """ **Softmax With Cross Entropy Operator.** - + Cross entropy loss with softmax is used as the output layer extensively. This operator computes the softmax normalized values for each row of the input tensor, after which cross-entropy loss is computed. This provides a more numerically stable gradient. - + Because this operator performs a softmax on logits internally, it expects unscaled logits. This operator should not be used with the output of softmax operator since that would produce incorrect results. - + When the attribute soft_label is set false, this operators expects mutually exclusive hard labels, each sample in a batch is in exactly one class with a probability of 1.0. Each sample in the batch will have a single label. - + The equation is as follows: - + 1) Hard label (one-hot label, so every sample has exactly one class) - + .. math:: loss_j = -\\text{logit}_{label_j} + \\log\\left(\\sum_{i=0}^{K}\\exp(\\text{logit}_i)\\right), j = 1,..., K - + 2) Soft label (each sample can have a distribution over all classes) .. math:: - + loss_j = -\\sum_{i=0}^{K}\\text{label}_i \\left(\\text{logit}_i - \\log\\left(\\sum_{i=0}^{K} \\exp(\\text{logit}_i)\\right)\\right), j = 1,...,K @@ -3169,7 +3169,7 @@ def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None): The operator takes the first dimension of X and Y as batch size. For each instance, it computes the smooth l1 loss element by element first and then sums all the losses. So the shape of Out is [batch_size, 1]. - + Args: x (Variable): A tensor with rank at least 2. The input value of smooth l1 loss op with shape [batch_size, dim1, ..., dimN]. -- GitLab From a259ad41b03e52d3d3e97f53e5ffa163f80c79bb Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Mon, 12 Feb 2018 23:13:12 +0000 Subject: [PATCH 097/217] remove duplicated cbegin and cend in mixed vector --- paddle/fluid/framework/mixed_vector.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/paddle/fluid/framework/mixed_vector.h b/paddle/fluid/framework/mixed_vector.h index 114c21c26c..c1a89a1261 100644 --- a/paddle/fluid/framework/mixed_vector.h +++ b/paddle/fluid/framework/mixed_vector.h @@ -132,10 +132,6 @@ class Vector { const T* cend() const { return end(); } - const T* cbegin() const { return begin(); } - - const T* cend() const { return end(); } - const T& back() const { auto it = end(); --it; -- GitLab From fcadb452515b6acabf9daf987b4a92dcb8e62d73 Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Mon, 12 Feb 2018 16:05:41 -0800 Subject: [PATCH 098/217] Separate VarType from VarDesc in framework.proto and fix all related compiler errors (#8414) * Refine Type system * Fixing type inference * Fixed create_reader_op.cc * Fix var_desc.h * Fixed executor.cc * Fix shape_inference.h * Fixed create_reader_op.cc * Fix tensor_util.h * Fixed var_type_inference_test.cc * Fix shape_inference.cc * Fixed sum_op.c * Fixed read_op.cc * Fix var_type.h * Fixed beam_search_decode_op.cc * sendrecvop_utils.cc * Fix operator.cc * Fixed lookup_table_op.cc * Fixed op_desc.cc * Fixed get_places_op.cc * Fixed lod_rank_table_op.cc * Fixed beam_search_op.cc * Fix var_desc.cc * Fixed lod_tensor_to_array_op.cc * Fixed while_op.cc * Fix program_desc_test.cc * tensor_array_read_write_op.cc * Fix assign_op.cc * Fix executor.cc * Fix protobuf.cc * Fix protobuf.cc --- paddle/fluid/framework/executor.cc | 28 ++--- paddle/fluid/framework/framework.proto | 51 ++++---- paddle/fluid/framework/op_desc.cc | 10 +- paddle/fluid/framework/operator.cc | 2 +- paddle/fluid/framework/program_desc_test.cc | 12 +- paddle/fluid/framework/shape_inference.cc | 8 +- paddle/fluid/framework/shape_inference.h | 8 +- paddle/fluid/framework/tensor_util.h | 4 +- paddle/fluid/framework/var_desc.cc | 119 ++++++++++-------- paddle/fluid/framework/var_desc.h | 14 +-- paddle/fluid/framework/var_type.h | 22 ++-- .../framework/var_type_inference_test.cc | 26 ++-- paddle/fluid/operators/assign_op.cc | 4 +- .../fluid/operators/beam_search_decode_op.cc | 4 +- paddle/fluid/operators/beam_search_op.cc | 4 +- paddle/fluid/operators/create_reader_op.cc | 12 +- .../operators/detail/sendrecvop_utils.cc | 4 +- paddle/fluid/operators/get_places_op.cc | 2 +- paddle/fluid/operators/lod_rank_table_op.cc | 2 +- .../fluid/operators/lod_tensor_to_array_op.cc | 2 +- paddle/fluid/operators/lookup_table_op.cc | 4 +- paddle/fluid/operators/read_op.cc | 2 +- paddle/fluid/operators/sum_op.cc | 12 +- .../operators/tensor_array_read_write_op.cc | 2 +- paddle/fluid/operators/while_op.cc | 4 +- paddle/fluid/pybind/protobuf.cc | 20 +-- 26 files changed, 198 insertions(+), 184 deletions(-) diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index 179f9194a9..ebfd54fdc5 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -36,24 +36,24 @@ namespace framework { Executor::Executor(const platform::Place& place) : place_(place) {} -static void CreateTensor(Variable* var, proto::VarDesc::VarType var_type) { - if (var_type == proto::VarDesc::LOD_TENSOR) { +static void CreateTensor(Variable* var, proto::VarType::Type var_type) { + if (var_type == proto::VarType::LOD_TENSOR) { var->GetMutable(); - } else if (var_type == proto::VarDesc::SELECTED_ROWS) { + } else if (var_type == proto::VarType::SELECTED_ROWS) { var->GetMutable(); - } else if (var_type == proto::VarDesc::FEED_MINIBATCH) { + } else if (var_type == proto::VarType::FEED_MINIBATCH) { var->GetMutable(); - } else if (var_type == proto::VarDesc::FETCH_LIST) { + } else if (var_type == proto::VarType::FETCH_LIST) { var->GetMutable(); - } else if (var_type == proto::VarDesc::STEP_SCOPES) { + } else if (var_type == proto::VarType::STEP_SCOPES) { var->GetMutable>(); - } else if (var_type == proto::VarDesc::LOD_RANK_TABLE) { + } else if (var_type == proto::VarType::LOD_RANK_TABLE) { var->GetMutable(); - } else if (var_type == proto::VarDesc::LOD_TENSOR_ARRAY) { + } else if (var_type == proto::VarType::LOD_TENSOR_ARRAY) { var->GetMutable(); - } else if (var_type == proto::VarDesc::PLACE_LIST) { + } else if (var_type == proto::VarType::PLACE_LIST) { var->GetMutable(); - } else if (var_type == proto::VarDesc::READER) { + } else if (var_type == proto::VarType::READER) { var->GetMutable(); } else { PADDLE_THROW( @@ -182,7 +182,7 @@ static bool has_feed_operators( auto var = block->FindVar(feed_holder_name); PADDLE_ENFORCE_NOT_NULL(var, "Block should already have a '%s' variable", feed_holder_name); - PADDLE_ENFORCE_EQ(var->GetType(), proto::VarDesc::FEED_MINIBATCH, + PADDLE_ENFORCE_EQ(var->GetType(), proto::VarType::FEED_MINIBATCH, "'%s' variable should be 'FEED_MINIBATCH' type", feed_holder_name); } @@ -222,7 +222,7 @@ static bool has_fetch_operators( auto var = block->FindVar(fetch_holder_name); PADDLE_ENFORCE_NOT_NULL(var, "Block should already have a '%s' variable", fetch_holder_name); - PADDLE_ENFORCE_EQ(var->GetType(), proto::VarDesc::FETCH_LIST, + PADDLE_ENFORCE_EQ(var->GetType(), proto::VarType::FETCH_LIST, "'%s' variable should be 'FETCH_LIST' type", fetch_holder_name); } @@ -241,7 +241,7 @@ void Executor::Run(const ProgramDesc& program, Scope* scope, if (!has_feed_operators(global_block, feed_targets, feed_holder_name)) { // create feed_holder variable auto* feed_holder = global_block->Var(feed_holder_name); - feed_holder->SetType(proto::VarDesc::FEED_MINIBATCH); + feed_holder->SetType(proto::VarType::FEED_MINIBATCH); feed_holder->SetPersistable(true); int i = 0; @@ -274,7 +274,7 @@ void Executor::Run(const ProgramDesc& program, Scope* scope, if (!has_fetch_operators(global_block, fetch_targets, fetch_holder_name)) { // create fetch_holder variable auto* fetch_holder = global_block->Var(fetch_holder_name); - fetch_holder->SetType(proto::VarDesc::FETCH_LIST); + fetch_holder->SetType(proto::VarType::FETCH_LIST); fetch_holder->SetPersistable(true); int i = 0; diff --git a/paddle/fluid/framework/framework.proto b/paddle/fluid/framework/framework.proto index ad8da21ae0..fa7f437851 100644 --- a/paddle/fluid/framework/framework.proto +++ b/paddle/fluid/framework/framework.proto @@ -101,25 +101,8 @@ enum DataType { FP64 = 6; } -message TensorDesc { - required DataType data_type = 1; - repeated int64 dims = 2; // [UNK, 640, 480] is saved as [-1, 640, 480] -} - -message LoDTensorDesc { - required TensorDesc tensor = 1; - optional int32 lod_level = 2 [ default = 0 ]; -} - -message LoDTensorArrayDesc { - required TensorDesc tensor = 1; - optional int32 lod_level = 2 [ default = 0 ]; -} - -message ReaderDesc { repeated LoDTensorDesc lod_tensor = 1; } - -message VarDesc { - enum VarType { +message VarType { + enum Type { LOD_TENSOR = 1; SELECTED_ROWS = 2; FEED_MINIBATCH = 3; @@ -130,13 +113,35 @@ message VarDesc { PLACE_LIST = 8; READER = 9; } + + required Type type = 1; + + message TensorDesc { + required DataType data_type = 1; + repeated int64 dims = 2; // [UNK, 640, 480] is saved as [-1, 640, 480] + } + optional TensorDesc selected_rows = 2; + + message LoDTensorDesc { + required TensorDesc tensor = 1; + optional int32 lod_level = 2 [ default = 0 ]; + } + optional LoDTensorDesc lod_tensor = 3; + + message LoDTensorArrayDesc { + required TensorDesc tensor = 1; + optional int32 lod_level = 2 [ default = 0 ]; + } + optional LoDTensorArrayDesc tensor_array = 4; + + message ReaderDesc { repeated LoDTensorDesc lod_tensor = 1; } + optional ReaderDesc reader = 5; +} + +message VarDesc { required string name = 1; required VarType type = 2; optional bool persistable = 3 [ default = false ]; - optional LoDTensorDesc lod_tensor = 4; - optional TensorDesc selected_rows = 5; - optional LoDTensorArrayDesc tensor_array = 6; - optional ReaderDesc reader = 7; } message BlockDesc { diff --git a/paddle/fluid/framework/op_desc.cc b/paddle/fluid/framework/op_desc.cc index e740010c63..eabfdc11a8 100644 --- a/paddle/fluid/framework/op_desc.cc +++ b/paddle/fluid/framework/op_desc.cc @@ -53,11 +53,11 @@ class CompileTimeInferShapeContext : public InferShapeContext { PADDLE_ENFORCE_LT(j, Outputs(out).size()); auto *in_var = block_.FindVarRecursive(Inputs(in)[i]); auto *out_var = block_.FindVarRecursive(Outputs(out)[j]); - if (in_var->GetType() != proto::VarDesc::LOD_TENSOR) { + if (in_var->GetType() != proto::VarType::LOD_TENSOR) { VLOG(3) << "input " << in << " is not LodTensor"; return; } - PADDLE_ENFORCE_EQ(in_var->GetType(), proto::VarDesc::LOD_TENSOR, + PADDLE_ENFORCE_EQ(in_var->GetType(), proto::VarType::LOD_TENSOR, "The %d-th output of Output(%s) must be LoDTensor.", j, out); out_var->SetLoDLevel(in_var->GetLoDLevel()); @@ -66,7 +66,7 @@ class CompileTimeInferShapeContext : public InferShapeContext { bool IsRuntime() const override; protected: - proto::VarDesc::VarType GetVarType(const std::string &name) const override; + proto::VarType::Type GetVarType(const std::string &name) const override; DDim GetDim(const std::string &name) const override; @@ -388,7 +388,7 @@ void OpDesc::InferVarType(BlockDesc *block) const { for (auto &out_pair : this->outputs_) { for (auto &out_var_name : out_pair.second) { block->FindRecursiveOrCreateVar(out_var_name) - .SetType(proto::VarDesc::LOD_TENSOR); + .SetType(proto::VarType::LOD_TENSOR); } } } @@ -507,7 +507,7 @@ void CompileTimeInferShapeContext::SetRepeatedDims( bool CompileTimeInferShapeContext::IsRuntime() const { return false; } -proto::VarDesc::VarType CompileTimeInferShapeContext::GetVarType( +proto::VarType::Type CompileTimeInferShapeContext::GetVarType( const std::string &name) const { return block_.FindVarRecursive(name)->GetType(); } diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index bc529b8269..ff90aba10b 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -477,7 +477,7 @@ class RuntimeInferShapeContext : public InferShapeContext { } } - proto::VarDesc::VarType GetVarType(const std::string& name) const override { + proto::VarType::Type GetVarType(const std::string& name) const override { auto* var = scope_.FindVar(name); return ToVarType(var->Type()); } diff --git a/paddle/fluid/framework/program_desc_test.cc b/paddle/fluid/framework/program_desc_test.cc index 3a4a87cfa5..d9c4331da1 100644 --- a/paddle/fluid/framework/program_desc_test.cc +++ b/paddle/fluid/framework/program_desc_test.cc @@ -22,13 +22,13 @@ TEST(ProgramDesc, copy_ctor) { ProgramDesc program; auto* global_block = program.MutableBlock(0); auto* x = global_block->Var("X"); - x->SetType(proto::VarDesc_VarType_LOD_TENSOR); + x->SetType(proto::VarType::LOD_TENSOR); x->SetLoDLevel(0); x->SetDataType(proto::FP32); x->SetShape({1000, 784}); auto* y = global_block->Var("Y"); - y->SetType(proto::VarDesc_VarType_LOD_TENSOR); + y->SetType(proto::VarType::LOD_TENSOR); y->SetLoDLevel(0); y->SetDataType(proto::FP32); y->SetShape({784, 100}); @@ -39,7 +39,7 @@ TEST(ProgramDesc, copy_ctor) { op->SetInput("Y", {y->Name()}); auto* out = global_block->Var("Out"); - out->SetType(proto::VarDesc_VarType_LOD_TENSOR); + out->SetType(proto::VarType::LOD_TENSOR); op->SetOutput("Y", {out->Name()}); ProgramDesc program_copy(program); @@ -84,13 +84,13 @@ TEST(ProgramDescBind, serialize_and_deserialize) { ProgramDesc program_origin; auto* global_block = program_origin.MutableBlock(0); auto* x = global_block->Var("X"); - x->SetType(proto::VarDesc_VarType_LOD_TENSOR); + x->SetType(proto::VarType::LOD_TENSOR); x->SetLoDLevel(0); x->SetDataType(proto::FP32); x->SetShape({1000, 784}); auto* y = global_block->Var("Y"); - y->SetType(proto::VarDesc_VarType_LOD_TENSOR); + y->SetType(proto::VarType::LOD_TENSOR); y->SetLoDLevel(0); y->SetDataType(proto::FP32); y->SetShape({784, 100}); @@ -101,7 +101,7 @@ TEST(ProgramDescBind, serialize_and_deserialize) { op->SetInput("Y", {y->Name()}); auto* out = global_block->Var("Out"); - out->SetType(proto::VarDesc_VarType_LOD_TENSOR); + out->SetType(proto::VarType::LOD_TENSOR); op->SetOutput("Y", {out->Name()}); std::string binary_str; diff --git a/paddle/fluid/framework/shape_inference.cc b/paddle/fluid/framework/shape_inference.cc index 1b518970ac..dc9a79020f 100644 --- a/paddle/fluid/framework/shape_inference.cc +++ b/paddle/fluid/framework/shape_inference.cc @@ -116,19 +116,19 @@ void InferShapeContext::SetDims(const std::vector &names, } } -std::vector InferShapeContext::GetInputsVarType( +std::vector InferShapeContext::GetInputsVarType( const std::string &name) const { return GetVarTypes(Inputs(name)); } -std::vector InferShapeContext::GetOutputsVarType( +std::vector InferShapeContext::GetOutputsVarType( const std::string &name) const { return GetVarTypes(Outputs(name)); } -std::vector InferShapeContext::GetVarTypes( +std::vector InferShapeContext::GetVarTypes( const std::vector &names) const { - std::vector retv; + std::vector retv; retv.resize(names.size()); std::transform(names.begin(), names.end(), retv.begin(), std::bind(std::mem_fn(&InferShapeContext::GetVarType), this, diff --git a/paddle/fluid/framework/shape_inference.h b/paddle/fluid/framework/shape_inference.h index 3739d640fe..bc02d700da 100644 --- a/paddle/fluid/framework/shape_inference.h +++ b/paddle/fluid/framework/shape_inference.h @@ -31,9 +31,9 @@ class InferShapeContext { virtual bool HasInput(const std::string &name) const = 0; virtual bool HasOutput(const std::string &name) const = 0; - std::vector GetInputsVarType( + std::vector GetInputsVarType( const std::string &name) const; - std::vector GetOutputsVarType( + std::vector GetOutputsVarType( const std::string &name) const; virtual bool HasInputs(const std::string &name) const = 0; @@ -75,10 +75,10 @@ class InferShapeContext { std::vector GetDims(const std::vector &names) const; - std::vector GetVarTypes( + std::vector GetVarTypes( const std::vector &names) const; - virtual proto::VarDesc::VarType GetVarType(const std::string &name) const = 0; + virtual proto::VarType::Type GetVarType(const std::string &name) const = 0; virtual InferShapeVarPtr GetVarPtr(const std::string &name) = 0; }; diff --git a/paddle/fluid/framework/tensor_util.h b/paddle/fluid/framework/tensor_util.h index 22519013cc..f0464d4807 100644 --- a/paddle/fluid/framework/tensor_util.h +++ b/paddle/fluid/framework/tensor_util.h @@ -225,7 +225,7 @@ inline void SerializeToStream(std::ostream& os, const Tensor& tensor, { // the 2nd field, tensor description // int32_t size // void* protobuf message - proto::TensorDesc desc; + proto::VarType::TensorDesc desc; desc.set_data_type(framework::ToDataType(tensor.type())); auto dims = framework::vectorize(tensor.dims()); auto* pb_dims = desc.mutable_dims(); @@ -290,7 +290,7 @@ inline void DeserializeFromStream(std::istream& is, Tensor* tensor, uint32_t version; is.read(reinterpret_cast(&version), sizeof(version)); PADDLE_ENFORCE_EQ(version, 0U, "Only version 0 is supported"); - proto::TensorDesc desc; + proto::VarType::TensorDesc desc; { // int32_t size // proto buffer int32_t size; diff --git a/paddle/fluid/framework/var_desc.cc b/paddle/fluid/framework/var_desc.cc index eb88146969..bb2be1ab50 100644 --- a/paddle/fluid/framework/var_desc.cc +++ b/paddle/fluid/framework/var_desc.cc @@ -18,18 +18,21 @@ limitations under the License. */ namespace paddle { namespace framework { -proto::VarDesc::VarType VarDesc::GetType() const { return desc_.type(); } +proto::VarType::Type VarDesc::GetType() const { return desc_.type().type(); } -void VarDesc::SetType(proto::VarDesc::VarType type) { desc_.set_type(type); } +void VarDesc::SetType(proto::VarType::Type type) { + desc_.mutable_type()->set_type(type); +} void VarDesc::SetShape(const std::vector &dims) { VectorToRepeated(dims, mutable_tensor_desc()->mutable_dims()); } void VarDesc::SetTensorDescNum(size_t num) { - switch (desc_.type()) { - case proto::VarDesc::READER: { - auto *lod_tensors_ptr = desc_.mutable_reader()->mutable_lod_tensor(); + switch (desc_.type().type()) { + case proto::VarType::READER: { + auto *lod_tensors_ptr = + desc_.mutable_type()->mutable_reader()->mutable_lod_tensor(); lod_tensors_ptr->Clear(); for (size_t i = 0; i < num; ++i) { lod_tensors_ptr->Add(); @@ -44,9 +47,9 @@ void VarDesc::SetTensorDescNum(size_t num) { } size_t VarDesc::GetTensorDescNum() const { - switch (desc_.type()) { - case proto::VarDesc::READER: - return desc_.reader().lod_tensor_size(); + switch (desc_.type().type()) { + case proto::VarType::READER: + return desc_.type().reader().lod_tensor_size(); break; default: PADDLE_THROW( @@ -64,7 +67,7 @@ void VarDesc::SetShapes( << "). The Reader is going to be reinitialized."; SetTensorDescNum(multiple_dims.size()); } - std::vector tensors = mutable_tensor_descs(); + std::vector tensors = mutable_tensor_descs(); for (size_t i = 0; i < multiple_dims.size(); ++i) { VectorToRepeated(multiple_dims[i], tensors[i]->mutable_dims()); } @@ -75,7 +78,7 @@ std::vector VarDesc::GetShape() const { } std::vector> VarDesc::GetShapes() const { - std::vector descs = tensor_descs(); + std::vector descs = tensor_descs(); std::vector> res; res.reserve(descs.size()); for (const auto &tensor_desc : descs) { @@ -98,7 +101,8 @@ void VarDesc::SetDataTypes( << "). The Reader is going to be reinitialized."; SetTensorDescNum(multiple_data_type.size()); } - std::vector tensor_descs = mutable_tensor_descs(); + std::vector tensor_descs = + mutable_tensor_descs(); for (size_t i = 0; i < multiple_data_type.size(); ++i) { tensor_descs[i]->set_data_type(multiple_data_type[i]); } @@ -109,7 +113,7 @@ proto::DataType VarDesc::GetDataType() const { } std::vector VarDesc::GetDataTypes() const { - std::vector descs = tensor_descs(); + std::vector descs = tensor_descs(); std::vector res; res.reserve(descs.size()); for (const auto &tensor_desc : descs) { @@ -119,12 +123,12 @@ std::vector VarDesc::GetDataTypes() const { } void VarDesc::SetLoDLevel(int32_t lod_level) { - switch (desc_.type()) { - case proto::VarDesc::LOD_TENSOR: - desc_.mutable_lod_tensor()->set_lod_level(lod_level); + switch (desc_.type().type()) { + case proto::VarType::LOD_TENSOR: + desc_.mutable_type()->mutable_lod_tensor()->set_lod_level(lod_level); break; - case proto::VarDesc::LOD_TENSOR_ARRAY: - desc_.mutable_tensor_array()->set_lod_level(lod_level); + case proto::VarType::LOD_TENSOR_ARRAY: + desc_.mutable_type()->mutable_tensor_array()->set_lod_level(lod_level); break; default: PADDLE_THROW( @@ -142,10 +146,11 @@ void VarDesc::SetLoDLevels(const std::vector &multiple_lod_level) { << "). The Reader is going to be reinitialized."; SetTensorDescNum(multiple_lod_level.size()); } - switch (desc_.type()) { - case proto::VarDesc::READER: { + switch (desc_.type().type()) { + case proto::VarType::READER: { size_t i = 0; - for (auto &lod_tensor : *desc_.mutable_reader()->mutable_lod_tensor()) { + for (auto &lod_tensor : + *desc_.mutable_type()->mutable_reader()->mutable_lod_tensor()) { lod_tensor.set_lod_level(multiple_lod_level[i++]); } } break; @@ -157,11 +162,11 @@ void VarDesc::SetLoDLevels(const std::vector &multiple_lod_level) { } int32_t VarDesc::GetLoDLevel() const { - switch (desc_.type()) { - case proto::VarDesc::LOD_TENSOR: - return desc_.lod_tensor().lod_level(); - case proto::VarDesc::LOD_TENSOR_ARRAY: - return desc_.tensor_array().lod_level(); + switch (desc_.type().type()) { + case proto::VarType::LOD_TENSOR: + return desc_.type().lod_tensor().lod_level(); + case proto::VarType::LOD_TENSOR_ARRAY: + return desc_.type().tensor_array().lod_level(); default: PADDLE_THROW( "Getting 'lod_level' is not supported by the type of var %s.", @@ -171,10 +176,10 @@ int32_t VarDesc::GetLoDLevel() const { std::vector VarDesc::GetLoDLevels() const { std::vector res; - switch (desc_.type()) { - case proto::VarDesc::READER: - res.reserve(desc_.reader().lod_tensor_size()); - for (auto &lod_tensor : desc_.reader().lod_tensor()) { + switch (desc_.type().type()) { + case proto::VarType::READER: + res.reserve(desc_.type().reader().lod_tensor_size()); + for (auto &lod_tensor : desc_.type().reader().lod_tensor()) { res.push_back(lod_tensor.lod_level()); } return res; @@ -186,15 +191,16 @@ std::vector VarDesc::GetLoDLevels() const { } } -const proto::TensorDesc &VarDesc::tensor_desc() const { +const proto::VarType::TensorDesc &VarDesc::tensor_desc() const { PADDLE_ENFORCE(desc_.has_type(), "The var's type hasn't been set."); - switch (desc_.type()) { - case proto::VarDesc::SELECTED_ROWS: - return desc_.selected_rows(); - case proto::VarDesc::LOD_TENSOR: - return desc_.lod_tensor().tensor(); - case proto::VarDesc::LOD_TENSOR_ARRAY: - return desc_.tensor_array().tensor(); + PADDLE_ENFORCE(desc_.type().has_type(), "The var type hasn't been set."); + switch (desc_.type().type()) { + case proto::VarType::SELECTED_ROWS: + return desc_.type().selected_rows(); + case proto::VarType::LOD_TENSOR: + return desc_.type().lod_tensor().tensor(); + case proto::VarType::LOD_TENSOR_ARRAY: + return desc_.type().tensor_array().tensor(); default: PADDLE_THROW( "Getting 'tensor_desc' is not supported by the type of var %s.", @@ -202,13 +208,13 @@ const proto::TensorDesc &VarDesc::tensor_desc() const { } } -std::vector VarDesc::tensor_descs() const { +std::vector VarDesc::tensor_descs() const { PADDLE_ENFORCE(desc_.has_type(), "The var type hasn't been set."); - std::vector res; + std::vector res; res.reserve(GetTensorDescNum()); - switch (desc_.type()) { - case proto::VarDesc::READER: - for (const auto &lod_tensor : desc_.reader().lod_tensor()) { + switch (desc_.type().type()) { + case proto::VarType::READER: + for (const auto &lod_tensor : desc_.type().reader().lod_tensor()) { res.push_back(lod_tensor.tensor()); } return res; @@ -220,15 +226,16 @@ std::vector VarDesc::tensor_descs() const { } } -proto::TensorDesc *VarDesc::mutable_tensor_desc() { +proto::VarType::TensorDesc *VarDesc::mutable_tensor_desc() { PADDLE_ENFORCE(desc_.has_type(), "The var type hasn't been set."); - switch (desc_.type()) { - case proto::VarDesc::SELECTED_ROWS: - return desc_.mutable_selected_rows(); - case proto::VarDesc::LOD_TENSOR: - return desc_.mutable_lod_tensor()->mutable_tensor(); - case proto::VarDesc::LOD_TENSOR_ARRAY: - return desc_.mutable_tensor_array()->mutable_tensor(); + PADDLE_ENFORCE(desc_.type().has_type(), "The var type hasn't been set."); + switch (desc_.type().type()) { + case proto::VarType::SELECTED_ROWS: + return desc_.mutable_type()->mutable_selected_rows(); + case proto::VarType::LOD_TENSOR: + return desc_.mutable_type()->mutable_lod_tensor()->mutable_tensor(); + case proto::VarType::LOD_TENSOR_ARRAY: + return desc_.mutable_type()->mutable_tensor_array()->mutable_tensor(); default: PADDLE_THROW( "Getting 'mutable_tensor_desc' is not supported by the type of var " @@ -237,13 +244,15 @@ proto::TensorDesc *VarDesc::mutable_tensor_desc() { } } -std::vector VarDesc::mutable_tensor_descs() { +std::vector VarDesc::mutable_tensor_descs() { PADDLE_ENFORCE(desc_.has_type(), "The var type hasn't been set."); - std::vector res; + PADDLE_ENFORCE(desc_.type().has_type(), "The var type hasn't been set."); + std::vector res; res.reserve(GetTensorDescNum()); - switch (desc_.type()) { - case proto::VarDesc::READER: - for (auto &lod_tensor : *desc_.mutable_reader()->mutable_lod_tensor()) { + switch (desc_.type().type()) { + case proto::VarType::READER: + for (auto &lod_tensor : + *desc_.mutable_type()->mutable_reader()->mutable_lod_tensor()) { res.push_back(lod_tensor.mutable_tensor()); } return res; diff --git a/paddle/fluid/framework/var_desc.h b/paddle/fluid/framework/var_desc.h index b272e5063e..013ba446b9 100644 --- a/paddle/fluid/framework/var_desc.h +++ b/paddle/fluid/framework/var_desc.h @@ -57,7 +57,7 @@ class VarDesc { public: explicit VarDesc(const std::string &name) { desc_.set_name(name); - desc_.set_type(proto::VarDesc::LOD_TENSOR); + desc_.mutable_type()->set_type(proto::VarType::LOD_TENSOR); } explicit VarDesc(const proto::VarDesc &desc) : desc_(desc) {} @@ -96,19 +96,19 @@ class VarDesc { std::vector GetLoDLevels() const; - proto::VarDesc::VarType GetType() const; + proto::VarType::Type GetType() const; - void SetType(proto::VarDesc::VarType type); + void SetType(proto::VarType::Type type); bool Persistable() const { return desc_.persistable(); } void SetPersistable(bool persistable) { desc_.set_persistable(persistable); } private: - const proto::TensorDesc &tensor_desc() const; - std::vector tensor_descs() const; - proto::TensorDesc *mutable_tensor_desc(); - std::vector mutable_tensor_descs(); + const proto::VarType::TensorDesc &tensor_desc() const; + std::vector tensor_descs() const; + proto::VarType::TensorDesc *mutable_tensor_desc(); + std::vector mutable_tensor_descs(); proto::VarDesc desc_; }; diff --git a/paddle/fluid/framework/var_type.h b/paddle/fluid/framework/var_type.h index b5a6183892..960ebff9d7 100644 --- a/paddle/fluid/framework/var_type.h +++ b/paddle/fluid/framework/var_type.h @@ -23,17 +23,17 @@ limitations under the License. */ namespace paddle { namespace framework { -inline proto::VarDesc::VarType ToVarType(std::type_index type) { +inline proto::VarType::Type ToVarType(std::type_index type) { if (type.hash_code() == typeid(LoDTensor).hash_code()) { - return proto::VarDesc_VarType_LOD_TENSOR; + return proto::VarType_Type_LOD_TENSOR; } else if (type.hash_code() == typeid(LoDRankTable).hash_code()) { - return proto::VarDesc_VarType_LOD_RANK_TABLE; + return proto::VarType_Type_LOD_RANK_TABLE; } else if (type.hash_code() == typeid(LoDTensorArray).hash_code()) { - return proto::VarDesc_VarType_LOD_TENSOR_ARRAY; + return proto::VarType_Type_LOD_TENSOR_ARRAY; } else if (type.hash_code() == typeid(SelectedRows).hash_code()) { - return proto::VarDesc_VarType_SELECTED_ROWS; + return proto::VarType_Type_SELECTED_ROWS; } else if (type.hash_code() == typeid(ReaderHolder).hash_code()) { - return proto::VarDesc_VarType_READER; + return proto::VarType_Type_READER; } else { PADDLE_THROW("ToVarType:Unsupported type %s", type.name()); } @@ -42,19 +42,19 @@ inline proto::VarDesc::VarType ToVarType(std::type_index type) { template inline void VisitVarType(const framework::Variable& var, Visitor visitor) { switch (ToVarType(var.Type())) { - case proto::VarDesc_VarType_LOD_TENSOR: + case proto::VarType_Type_LOD_TENSOR: visitor(var.Get()); return; - case proto::VarDesc_VarType_LOD_RANK_TABLE: + case proto::VarType_Type_LOD_RANK_TABLE: visitor(var.Get()); return; - case proto::VarDesc_VarType_LOD_TENSOR_ARRAY: + case proto::VarType_Type_LOD_TENSOR_ARRAY: visitor(var.Get()); return; - case proto::VarDesc_VarType_SELECTED_ROWS: + case proto::VarType_Type_SELECTED_ROWS: visitor(var.Get()); return; - case proto::VarDesc_VarType_READER: + case proto::VarType_Type_READER: visitor(var.Get()); return; default: diff --git a/paddle/fluid/framework/var_type_inference_test.cc b/paddle/fluid/framework/var_type_inference_test.cc index 961f209ee1..1dced845ed 100644 --- a/paddle/fluid/framework/var_type_inference_test.cc +++ b/paddle/fluid/framework/var_type_inference_test.cc @@ -35,14 +35,14 @@ class SumOpVarTypeInference : public VarTypeInference { public: void operator()(const OpDesc &op_desc, BlockDesc *block) const override { auto &inputs = op_desc.Input("X"); - auto default_var_type = proto::VarDesc::SELECTED_ROWS; + auto default_var_type = proto::VarType::SELECTED_ROWS; bool any_input_is_lod_tensor = std::any_of( inputs.begin(), inputs.end(), [block](const std::string &name) { - return block->Var(name)->GetType() == proto::VarDesc::LOD_TENSOR; + return block->Var(name)->GetType() == proto::VarType::LOD_TENSOR; }); if (any_input_is_lod_tensor) { - default_var_type = proto::VarDesc::LOD_TENSOR; + default_var_type = proto::VarType::LOD_TENSOR; } auto out_var_name = op_desc.Output("Out").front(); @@ -67,19 +67,19 @@ TEST(InferVarType, sum_op) { op->SetInput("X", {"test_a", "test_b", "test_c"}); op->SetOutput("Out", {"test_out"}); - prog.MutableBlock(0)->Var("test_a")->SetType(proto::VarDesc::SELECTED_ROWS); - prog.MutableBlock(0)->Var("test_b")->SetType(proto::VarDesc::SELECTED_ROWS); - prog.MutableBlock(0)->Var("test_c")->SetType(proto::VarDesc::SELECTED_ROWS); + prog.MutableBlock(0)->Var("test_a")->SetType(proto::VarType::SELECTED_ROWS); + prog.MutableBlock(0)->Var("test_b")->SetType(proto::VarType::SELECTED_ROWS); + prog.MutableBlock(0)->Var("test_c")->SetType(proto::VarType::SELECTED_ROWS); prog.MutableBlock(0)->Var("test_out"); op->InferVarType(prog.MutableBlock(0)); - ASSERT_EQ(proto::VarDesc::SELECTED_ROWS, + ASSERT_EQ(proto::VarType::SELECTED_ROWS, prog.MutableBlock(0)->Var("test_out")->GetType()); - prog.MutableBlock(0)->Var("test_b")->SetType(proto::VarDesc::LOD_TENSOR); + prog.MutableBlock(0)->Var("test_b")->SetType(proto::VarType::LOD_TENSOR); op->InferVarType(prog.MutableBlock(0)); - ASSERT_EQ(proto::VarDesc::LOD_TENSOR, + ASSERT_EQ(proto::VarType::LOD_TENSOR, prog.MutableBlock(0)->Var("test_out")->GetType()); } @@ -90,14 +90,14 @@ TEST(InferVarType, sum_op_without_infer_var_type) { op->SetInput("X", {"test2_a", "test2_b", "test2_c"}); op->SetOutput("Out", {"test2_out"}); - prog.MutableBlock(0)->Var("test2_a")->SetType(proto::VarDesc::SELECTED_ROWS); - prog.MutableBlock(0)->Var("test2_b")->SetType(proto::VarDesc::SELECTED_ROWS); - prog.MutableBlock(0)->Var("test2_c")->SetType(proto::VarDesc::SELECTED_ROWS); + prog.MutableBlock(0)->Var("test2_a")->SetType(proto::VarType::SELECTED_ROWS); + prog.MutableBlock(0)->Var("test2_b")->SetType(proto::VarType::SELECTED_ROWS); + prog.MutableBlock(0)->Var("test2_c")->SetType(proto::VarType::SELECTED_ROWS); prog.MutableBlock(0)->Var("test2_out"); op->InferVarType(prog.MutableBlock(0)); - ASSERT_EQ(proto::VarDesc_VarType_LOD_TENSOR, + ASSERT_EQ(proto::VarType_Type_LOD_TENSOR, prog.MutableBlock(0)->Var("test2_out")->GetType()); } diff --git a/paddle/fluid/operators/assign_op.cc b/paddle/fluid/operators/assign_op.cc index eedf6b8c66..e21dc6d77f 100644 --- a/paddle/fluid/operators/assign_op.cc +++ b/paddle/fluid/operators/assign_op.cc @@ -115,8 +115,8 @@ class AssignInferShape : public framework::InferShapeBase { void operator()(framework::InferShapeContext *context) const override { if (context->HasInput("X")) { auto type = context->GetInputsVarType("X")[0]; - if (type == framework::proto::VarDesc_VarType_SELECTED_ROWS || - type == framework::proto::VarDesc_VarType_LOD_TENSOR) { + if (type == framework::proto::VarType::SELECTED_ROWS || + type == framework::proto::VarType::LOD_TENSOR) { context->SetOutputDim("Out", context->GetInputDim("X")); } } diff --git a/paddle/fluid/operators/beam_search_decode_op.cc b/paddle/fluid/operators/beam_search_decode_op.cc index dacb0e2681..718f469d38 100644 --- a/paddle/fluid/operators/beam_search_decode_op.cc +++ b/paddle/fluid/operators/beam_search_decode_op.cc @@ -128,10 +128,10 @@ class BeamSearchDecodeInferVarType : public framework::VarTypeInference { void operator()(const framework::OpDesc& op_desc, framework::BlockDesc* block) const override { for (auto& o : op_desc.Output("SentenceIds")) { - block->Var(o)->SetType(framework::proto::VarDesc::LOD_TENSOR); + block->Var(o)->SetType(framework::proto::VarType::LOD_TENSOR); } for (auto& o : op_desc.Output("SentenceScores")) { - block->Var(o)->SetType(framework::proto::VarDesc::LOD_TENSOR); + block->Var(o)->SetType(framework::proto::VarType::LOD_TENSOR); } } }; diff --git a/paddle/fluid/operators/beam_search_op.cc b/paddle/fluid/operators/beam_search_op.cc index 76985ea9c2..e848b1f12c 100644 --- a/paddle/fluid/operators/beam_search_op.cc +++ b/paddle/fluid/operators/beam_search_op.cc @@ -240,10 +240,10 @@ class BeamSearchInferVarType : public framework::VarTypeInference { void operator()(const framework::OpDesc &op_desc, framework::BlockDesc *block) const override { for (auto &o : op_desc.Output("selected_ids")) { - block->Var(o)->SetType(framework::proto::VarDesc::LOD_TENSOR); + block->Var(o)->SetType(framework::proto::VarType::LOD_TENSOR); } for (auto &o : op_desc.Output("selected_scores")) { - block->Var(o)->SetType(framework::proto::VarDesc::LOD_TENSOR); + block->Var(o)->SetType(framework::proto::VarType::LOD_TENSOR); } } }; diff --git a/paddle/fluid/operators/create_reader_op.cc b/paddle/fluid/operators/create_reader_op.cc index 1393f1a66b..17ed7e24ec 100644 --- a/paddle/fluid/operators/create_reader_op.cc +++ b/paddle/fluid/operators/create_reader_op.cc @@ -84,7 +84,7 @@ class CreateFileReaderInferVarType : public framework::VarTypeInference { framework::BlockDesc* block) const override { std::string reader_name = op_desc.Output("Out")[0]; framework::VarDesc* reader = block->FindVarRecursive(reader_name); - reader->SetType(framework::proto::VarDesc::READER); + reader->SetType(framework::proto::VarType::READER); } }; @@ -97,7 +97,7 @@ class CreateDecoratedReaderInferVarType : public framework::VarTypeInference { framework::VarDesc* in_reader = block->FindVarRecursive(in_reader_name); std::string out_reader_name = op_desc.Output("Out")[0]; framework::VarDesc* out_reader = block->FindVarRecursive(out_reader_name); - out_reader->SetType(framework::proto::VarDesc::READER); + out_reader->SetType(framework::proto::VarType::READER); out_reader->SetDataTypes(in_reader->GetDataTypes()); } }; @@ -147,7 +147,7 @@ class CreateRandomDataGeneratorOpMaker AddComment(R"DOC( CreateRandomDataGenerator Operator - This Op creates a random reader. + This Op creates a random reader. The reader generates random data instead of really reading from files. Generated data follow an uniform distribution between 'min' and 'max'. )DOC"); @@ -183,7 +183,7 @@ class CreateShuffleReaderOpMaker : public framework::OpProtoAndCheckerMaker { CreateShuffleReader Operator A shuffle reader takes another reader as its 'underlying reader' - and yields the underlying reader's outputs in a shuffled order. + and yields the underlying reader's outputs in a shuffled order. )DOC"); } }; @@ -218,8 +218,8 @@ class CreateBatchReaderOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( CreateBatchReader Operator - A batch reader takes another reader as its 'underlying reader', - gathers the underlying reader's outputs and then yields them in batches. + A batch reader takes another reader as its 'underlying reader', + gathers the underlying reader's outputs and then yields them in batches. )DOC"); } }; diff --git a/paddle/fluid/operators/detail/sendrecvop_utils.cc b/paddle/fluid/operators/detail/sendrecvop_utils.cc index 5403dbc2a0..169fd40fd9 100644 --- a/paddle/fluid/operators/detail/sendrecvop_utils.cc +++ b/paddle/fluid/operators/detail/sendrecvop_utils.cc @@ -24,11 +24,11 @@ void SerializeToMessage(const std::string& name, const framework::Variable* var, msg->set_varname(name); std::ostringstream oss; switch (framework::ToVarType(var->Type())) { - case framework::proto::VarDesc_VarType_LOD_TENSOR: + case framework::proto::VarType_Type_LOD_TENSOR: msg->set_type(sendrecv::VarType::LOD_TENSOR); framework::SerializeToStream(oss, var->Get(), ctx); break; - case framework::proto::VarDesc_VarType_SELECTED_ROWS: + case framework::proto::VarType_Type_SELECTED_ROWS: msg->set_type(sendrecv::VarType::SELECTED_ROWS); framework::SerializeToStream(oss, var->Get(), ctx); diff --git a/paddle/fluid/operators/get_places_op.cc b/paddle/fluid/operators/get_places_op.cc index 8555b0778f..9002ce4717 100644 --- a/paddle/fluid/operators/get_places_op.cc +++ b/paddle/fluid/operators/get_places_op.cc @@ -98,7 +98,7 @@ class GetPlacesInferVarType : public framework::VarTypeInference { framework::BlockDesc *block) const override { for (auto &o_name : op_desc.Output("Out")) { block->FindRecursiveOrCreateVar(o_name).SetType( - framework::proto::VarDesc::PLACE_LIST); + framework::proto::VarType::PLACE_LIST); } } }; diff --git a/paddle/fluid/operators/lod_rank_table_op.cc b/paddle/fluid/operators/lod_rank_table_op.cc index 2d01ed6737..590b44e14f 100644 --- a/paddle/fluid/operators/lod_rank_table_op.cc +++ b/paddle/fluid/operators/lod_rank_table_op.cc @@ -69,7 +69,7 @@ class LoDRankTableInferVarType : public framework::VarTypeInference { framework::BlockDesc *block) const override { for (auto &o : op_desc.Output("Out")) { block->FindRecursiveOrCreateVar(o).SetType( - framework::proto::VarDesc::LOD_RANK_TABLE); + framework::proto::VarType::LOD_RANK_TABLE); } } }; diff --git a/paddle/fluid/operators/lod_tensor_to_array_op.cc b/paddle/fluid/operators/lod_tensor_to_array_op.cc index be47fdfd04..b5e778a581 100644 --- a/paddle/fluid/operators/lod_tensor_to_array_op.cc +++ b/paddle/fluid/operators/lod_tensor_to_array_op.cc @@ -138,7 +138,7 @@ class LoDTensorToArrayInferVarType : public framework::VarTypeInference { void operator()(const framework::OpDesc &op_desc, framework::BlockDesc *block) const override { for (auto &out_var : op_desc.Output("Out")) { - block->Var(out_var)->SetType(framework::proto::VarDesc::LOD_TENSOR_ARRAY); + block->Var(out_var)->SetType(framework::proto::VarType::LOD_TENSOR_ARRAY); } } }; diff --git a/paddle/fluid/operators/lookup_table_op.cc b/paddle/fluid/operators/lookup_table_op.cc index d338553f7c..3acdca17af 100644 --- a/paddle/fluid/operators/lookup_table_op.cc +++ b/paddle/fluid/operators/lookup_table_op.cc @@ -123,11 +123,11 @@ class LookupTableOpGradVarTypeInference : public framework::VarTypeInference { VLOG(3) << "lookup_table_grad op " << framework::GradVarName("W") << " is set to SelectedRows"; block->Var(out_var_name) - ->SetType(framework::proto::VarDesc::SELECTED_ROWS); + ->SetType(framework::proto::VarType::SELECTED_ROWS); } else { VLOG(3) << "lookup_table_grad op " << framework::GradVarName("W") << " is set to LoDTensor"; - block->Var(out_var_name)->SetType(framework::proto::VarDesc::LOD_TENSOR); + block->Var(out_var_name)->SetType(framework::proto::VarType::LOD_TENSOR); } } }; diff --git a/paddle/fluid/operators/read_op.cc b/paddle/fluid/operators/read_op.cc index 127df82ff1..62beab82d4 100644 --- a/paddle/fluid/operators/read_op.cc +++ b/paddle/fluid/operators/read_op.cc @@ -45,7 +45,7 @@ class ReadInferVarType : public framework::VarTypeInference { PADDLE_ENFORCE_EQ(dtypes.size(), out_names.size()); for (size_t i = 0; i < dtypes.size(); ++i) { framework::VarDesc& out = block->FindRecursiveOrCreateVar(out_names[i]); - out.SetType(framework::proto::VarDesc::LOD_TENSOR); + out.SetType(framework::proto::VarType::LOD_TENSOR); out.SetDataType(dtypes[i]); } } diff --git a/paddle/fluid/operators/sum_op.cc b/paddle/fluid/operators/sum_op.cc index bfc5709c4b..7b88387c33 100644 --- a/paddle/fluid/operators/sum_op.cc +++ b/paddle/fluid/operators/sum_op.cc @@ -29,7 +29,7 @@ class SumOp : public framework::OperatorWithKernel { "Output(Out) of SumOp should not be null."); if (ctx->IsRuntime() && ctx->GetOutputsVarType("Out")[0] == - framework::proto::VarDesc::LOD_TENSOR_ARRAY) { + framework::proto::VarType::LOD_TENSOR_ARRAY) { return; // skip runtime infershape when is tensor array; } @@ -118,7 +118,7 @@ class SumOpVarTypeInference : public framework::VarTypeInference { void operator()(const framework::OpDesc& op_desc, framework::BlockDesc* block) const override { auto& inputs = op_desc.Input("X"); - auto var_type = framework::proto::VarDesc::SELECTED_ROWS; + auto var_type = framework::proto::VarType::SELECTED_ROWS; for (auto& name : op_desc.Input("X")) { VLOG(10) << name << " " @@ -128,12 +128,12 @@ class SumOpVarTypeInference : public framework::VarTypeInference { bool any_input_is_lod_tensor = std::any_of( inputs.begin(), inputs.end(), [block](const std::string& name) { return block->FindRecursiveOrCreateVar(name).GetType() == - framework::proto::VarDesc::LOD_TENSOR; + framework::proto::VarType::LOD_TENSOR; }); auto is_tensor_array = [block](const std::string& name) { return block->FindRecursiveOrCreateVar(name).GetType() == - framework::proto::VarDesc::LOD_TENSOR_ARRAY; + framework::proto::VarType::LOD_TENSOR_ARRAY; }; bool any_input_is_tensor_array = @@ -151,9 +151,9 @@ class SumOpVarTypeInference : public framework::VarTypeInference { PADDLE_ENFORCE(all_inputs_are_tensor_array, "Not all inputs are tensor array:\n%s", os.str()); } - var_type = framework::proto::VarDesc::LOD_TENSOR_ARRAY; + var_type = framework::proto::VarType::LOD_TENSOR_ARRAY; } else if (any_input_is_lod_tensor) { - var_type = framework::proto::VarDesc::LOD_TENSOR; + var_type = framework::proto::VarType::LOD_TENSOR; } auto out_var_name = op_desc.Output("Out").front(); diff --git a/paddle/fluid/operators/tensor_array_read_write_op.cc b/paddle/fluid/operators/tensor_array_read_write_op.cc index 278b348117..9b484cda12 100644 --- a/paddle/fluid/operators/tensor_array_read_write_op.cc +++ b/paddle/fluid/operators/tensor_array_read_write_op.cc @@ -108,7 +108,7 @@ class WriteToArrayInferVarType : public framework::VarTypeInference { auto out_name = op_desc.Output("Out")[0]; VLOG(10) << "Set Variable " << out_name << " as LOD_TENSOR_ARRAY"; auto &out = block->FindRecursiveOrCreateVar(out_name); - out.SetType(framework::proto::VarDesc::LOD_TENSOR_ARRAY); + out.SetType(framework::proto::VarType::LOD_TENSOR_ARRAY); auto *x = block->FindVarRecursive(x_name); if (x != nullptr) { out.SetDataType(x->GetDataType()); diff --git a/paddle/fluid/operators/while_op.cc b/paddle/fluid/operators/while_op.cc index 94a11eaf78..3d5cdeda26 100644 --- a/paddle/fluid/operators/while_op.cc +++ b/paddle/fluid/operators/while_op.cc @@ -330,10 +330,10 @@ class WhileGradOpShapeInference : public framework::InferShapeBase { continue; } auto dims = ctx->GetInputsElementDim(kX, i); - if (var_types[i] == framework::proto::VarDesc::LOD_TENSOR) { + if (var_types[i] == framework::proto::VarType::LOD_TENSOR) { names_to_set.push_back(pg_names[i]); dims_to_set.push_back(dims); - } else if (var_types[i] == framework::proto::VarDesc::LOD_TENSOR_ARRAY) { + } else if (var_types[i] == framework::proto::VarType::LOD_TENSOR_ARRAY) { // not sure how to set the dim of LOD_TENSOR_ARRAY names_to_set.push_back(pg_names[i]); dims_to_set.push_back(dims); diff --git a/paddle/fluid/pybind/protobuf.cc b/paddle/fluid/pybind/protobuf.cc index 3341edb370..9f97cc5007 100644 --- a/paddle/fluid/pybind/protobuf.cc +++ b/paddle/fluid/pybind/protobuf.cc @@ -232,16 +232,16 @@ void BindVarDsec(py::module &m) { .def("persistable", &VarDesc::Persistable) .def("set_persistable", &VarDesc::SetPersistable); - py::enum_(var_desc, "VarType", "") - .value("LOD_TENSOR", proto::VarDesc::LOD_TENSOR) - .value("SELECTED_ROWS", proto::VarDesc::SELECTED_ROWS) - .value("FEED_MINIBATCH", proto::VarDesc::FEED_MINIBATCH) - .value("FETCH_LIST", proto::VarDesc::FETCH_LIST) - .value("STEP_SCOPES", proto::VarDesc::STEP_SCOPES) - .value("LOD_RANK_TABLE", proto::VarDesc::LOD_RANK_TABLE) - .value("LOD_TENSOR_ARRAY", proto::VarDesc::LOD_TENSOR_ARRAY) - .value("PLACE_LIST", proto::VarDesc::PLACE_LIST) - .value("READER", proto::VarDesc::READER); + py::enum_(var_desc, "VarType", "") + .value("LOD_TENSOR", proto::VarType::LOD_TENSOR) + .value("SELECTED_ROWS", proto::VarType::SELECTED_ROWS) + .value("FEED_MINIBATCH", proto::VarType::FEED_MINIBATCH) + .value("FETCH_LIST", proto::VarType::FETCH_LIST) + .value("STEP_SCOPES", proto::VarType::STEP_SCOPES) + .value("LOD_RANK_TABLE", proto::VarType::LOD_RANK_TABLE) + .value("LOD_TENSOR_ARRAY", proto::VarType::LOD_TENSOR_ARRAY) + .value("PLACE_LIST", proto::VarType::PLACE_LIST) + .value("READER", proto::VarType::READER); } void BindOpDesc(py::module &m) { -- GitLab From cde6241a1bc1c4c42d3991d5a394f7f1398c702d Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Tue, 13 Feb 2018 08:37:48 +0800 Subject: [PATCH 099/217] Run Python OP tests in a single Python process to improve test time. (#8362) Currently, our tests run with 2 GPUs, the init time is absurdly long: about 4s for each process. Currently, we run each OP test on different processes. This PR: 1. create cmake function py_test_modules which will generate the Makefile that runs a list of Python unittest module in a single Python process. 2. move all "python unittest compatible" (e.g., used the unittest package, not just a regular python file). from fluid/tests to fluid/tests/unittests. 3. cmake now will run all OP tests in fluid/tests/unittests in a single process, except the time-consuming tests, they are separated into different processes to utilize parallelism. Please make sure to use the unittest package if you put the python test file in fluid/tests/unittests 4. remove all exit(0) from fluid/tests/unittests/*.py, exit(0) is used to disable unittest, we can not do it when running all tests in a single process since it will terminate the process without running the other tests. Instead, the test is disabled in fluid/tests/unittests/CMakeLists.txt. FIXME is added for each disabled item. Please disable the unittest from fluid/tests/unittests/CMakeLists.txt, instead of adding exit(0) to the Python file, for all Python file in fluid/tests/unittests/. 5. add an option WITH_FAST_BUNDLE_TEST. When OFF, will run the unit tests in separate process so that they can be tested individually. --- CMakeLists.txt | 1 + python/paddle/v2/fluid/tests/CMakeLists.txt | 9 +- .../v2/fluid/tests/unittests/CMakeLists.txt | 89 +++++++++++++++++++ .../v2/fluid/tests/unittests/__init__.py | 13 +++ .../fluid/tests/{ => unittests}/decorators.py | 0 .../v2/fluid/tests/{ => unittests}/op_test.py | 0 .../tests/{ => unittests}/test_accuracy_op.py | 0 .../{ => unittests}/test_activation_op.py | 0 .../tests/{ => unittests}/test_adadelta_op.py | 0 .../tests/{ => unittests}/test_adagrad_op.py | 0 .../tests/{ => unittests}/test_adam_op.py | 0 .../tests/{ => unittests}/test_adamax_op.py | 0 .../test_array_read_write_op.py | 0 .../tests/{ => unittests}/test_assign_op.py | 0 .../{ => unittests}/test_assign_value_op.py | 0 .../tests/{ => unittests}/test_auc_op.py | 0 .../{ => unittests}/test_batch_norm_op.py | 0 .../test_beam_search_decode_op.py | 0 .../{ => unittests}/test_beam_search_op.py | 0 .../test_bilinear_tensor_product_op.py | 0 .../test_bipartite_match_op.py | 0 .../{ => unittests}/test_box_coder_op.py | 0 .../{ => unittests}/test_calc_gradient.py | 0 .../tests/{ => unittests}/test_cast_op.py | 0 .../{ => unittests}/test_chunk_eval_op.py | 0 .../{ => unittests}/test_clip_by_norm_op.py | 0 .../tests/{ => unittests}/test_clip_op.py | 0 .../tests/{ => unittests}/test_compare_op.py | 0 .../tests/{ => unittests}/test_concat_op.py | 0 .../tests/{ => unittests}/test_cond_op.py | 3 - .../{ => unittests}/test_conditional_block.py | 0 .../tests/{ => unittests}/test_const_value.py | 0 .../tests/{ => unittests}/test_conv2d_op.py | 0 .../test_conv2d_transpose_op.py | 0 .../tests/{ => unittests}/test_conv3d_op.py | 0 .../test_conv3d_transpose_op.py | 0 .../{ => unittests}/test_conv_shift_op.py | 0 .../tests/{ => unittests}/test_cos_sim_op.py | 0 .../test_create_op_doc_string.py | 0 .../{ => unittests}/test_crf_decoding_op.py | 0 .../tests/{ => unittests}/test_crop_op.py | 0 .../{ => unittests}/test_cross_entropy_op.py | 0 .../tests/{ => unittests}/test_ctc_align.py | 0 .../tests/{ => unittests}/test_cumsum_op.py | 0 .../test_decayed_adagrad_op.py | 0 .../test_default_scope_funcs.py | 0 .../{ => unittests}/test_detection_map_op.py | 0 .../test_detection_output_op.py | 4 +- .../tests/{ => unittests}/test_dropout_op.py | 0 .../tests/{ => unittests}/test_dyn_rnn.py | 0 .../test_dynrnn_gradient_check.py | 0 .../test_dynrnn_static_input.py | 0 .../{ => unittests}/test_edit_distance_op.py | 0 .../test_elementwise_add_op.py | 0 .../test_elementwise_div_op.py | 0 .../test_elementwise_max_op.py | 0 .../test_elementwise_min_op.py | 0 .../test_elementwise_mul_op.py | 0 .../test_elementwise_pow_op.py | 0 .../test_elementwise_sub_op.py | 0 .../tests/{ => unittests}/test_exception.py | 0 .../{ => unittests}/test_executor_and_mul.py | 0 .../tests/{ => unittests}/test_expand_op.py | 0 .../{ => unittests}/test_feed_fetch_method.py | 0 .../tests/{ => unittests}/test_fetch_var.py | 0 .../test_fill_constant_batch_size_like_op.py | 0 .../{ => unittests}/test_fill_constant_op.py | 0 .../tests/{ => unittests}/test_fill_op.py | 0 .../test_fill_zeros_like_op.py | 0 .../test_framework_debug_str.py | 0 .../tests/{ => unittests}/test_ftrl_op.py | 0 .../tests/{ => unittests}/test_gather_op.py | 0 .../test_gaussian_random_op.py | 0 .../{ => unittests}/test_get_places_op.py | 0 .../tests/{ => unittests}/test_gru_op.py | 0 .../tests/{ => unittests}/test_gru_unit_op.py | 0 .../{ => unittests}/test_hinge_loss_op.py | 0 .../{ => unittests}/test_huber_loss_op.py | 0 .../{ => unittests}/test_im2sequence_op.py | 0 .../test_image_classification_layer.py | 0 .../tests/{ => unittests}/test_infer_shape.py | 0 .../test_inference_model_io.py | 0 .../tests/{ => unittests}/test_initializer.py | 0 .../{ => unittests}/test_iou_similarity_op.py | 0 .../tests/{ => unittests}/test_is_empty_op.py | 0 .../tests/{ => unittests}/test_l1_norm_op.py | 0 .../{ => unittests}/test_label_smooth_op.py | 0 .../{ => unittests}/test_layer_norm_op.py | 0 .../tests/{ => unittests}/test_layers.py | 0 .../test_learning_rate_decay.py | 0 .../test_linear_chain_crf_op.py | 0 .../test_lod_array_length_op.py | 0 .../{ => unittests}/test_lod_rank_table.py | 0 .../{ => unittests}/test_lod_reset_op.py | 0 .../{ => unittests}/test_lod_tensor_array.py | 0 .../test_lod_tensor_array_ops.py | 0 .../tests/{ => unittests}/test_log_loss_op.py | 0 .../tests/{ => unittests}/test_logical_op.py | 0 .../{ => unittests}/test_lookup_table_op.py | 0 .../tests/{ => unittests}/test_lrn_op.py | 0 .../tests/{ => unittests}/test_lstm_op.py | 0 .../{ => unittests}/test_lstm_unit_op.py | 2 - .../tests/{ => unittests}/test_lstmp_op.py | 0 .../test_margin_rank_loss_op.py | 0 .../{ => unittests}/test_math_op_patch.py | 0 .../tests/{ => unittests}/test_matmul_op.py | 0 .../tests/{ => unittests}/test_maxout_op.py | 0 .../tests/{ => unittests}/test_mean_op.py | 0 .../test_memory_optimization_transpiler.py | 0 .../test_mine_hard_examples_op.py | 0 .../tests/{ => unittests}/test_minus_op.py | 0 .../test_modified_huber_loss_op.py | 2 - .../tests/{ => unittests}/test_momentum_op.py | 0 .../tests/{ => unittests}/test_mul_op.py | 0 .../{ => unittests}/test_multiclass_nms_op.py | 0 .../test_multihead_attention.py | 0 .../{ => unittests}/test_multiplex_op.py | 0 .../fluid/tests/{ => unittests}/test_nce.py | 2 - .../fluid/tests/{ => unittests}/test_net.py | 0 .../tests/{ => unittests}/test_norm_op.py | 0 .../test_normalization_wrapper.py | 0 .../tests/{ => unittests}/test_one_hot_op.py | 0 .../{ => unittests}/test_op_support_gpu.py | 0 .../tests/{ => unittests}/test_operator.py | 0 .../{ => unittests}/test_operator_desc.py | 0 .../tests/{ => unittests}/test_optimizer.py | 0 .../tests/{ => unittests}/test_pad_op.py | 0 .../tests/{ => unittests}/test_parallel_op.py | 7 +- .../tests/{ => unittests}/test_parameter.py | 0 .../tests/{ => unittests}/test_pool2d_op.py | 0 .../tests/{ => unittests}/test_pool3d_op.py | 0 .../tests/{ => unittests}/test_pool_max_op.py | 0 .../test_positive_negative_pair_op.py | 0 .../test_precision_recall_op.py | 0 .../tests/{ => unittests}/test_prelu_op.py | 0 .../tests/{ => unittests}/test_print_op.py | 0 .../{ => unittests}/test_prior_box_op.py | 0 .../tests/{ => unittests}/test_profiler.py | 0 .../tests/{ => unittests}/test_program.py | 0 .../tests/{ => unittests}/test_protobuf.py | 0 .../{ => unittests}/test_protobuf_descs.py | 0 .../test_proximal_adagrad_op.py | 0 .../{ => unittests}/test_proximal_gd_op.py | 0 .../{ => unittests}/test_rank_loss_op.py | 0 .../{ => unittests}/test_recurrent_op.py | 2 - .../tests/{ => unittests}/test_recv_op.py | 0 .../tests/{ => unittests}/test_reduce_op.py | 0 .../tests/{ => unittests}/test_registry.py | 2 +- .../tests/{ => unittests}/test_regularizer.py | 0 .../test_reorder_lod_tensor.py | 0 .../tests/{ => unittests}/test_reshape_op.py | 0 .../tests/{ => unittests}/test_rmsprop_op.py | 0 .../test_rnn_memory_helper_op.py | 0 .../tests/{ => unittests}/test_roi_pool_op.py | 0 .../tests/{ => unittests}/test_row_conv_op.py | 0 .../tests/{ => unittests}/test_scale_op.py | 0 .../tests/{ => unittests}/test_scatter_op.py | 0 .../fluid/tests/{ => unittests}/test_scope.py | 0 .../{ => unittests}/test_selected_rows.py | 0 .../{ => unittests}/test_seq_concat_op.py | 1 - .../tests/{ => unittests}/test_seq_conv.py | 0 .../tests/{ => unittests}/test_seq_pool.py | 0 .../{ => unittests}/test_sequence_erase_op.py | 0 .../{ => unittests}/test_sequence_expand.py | 0 .../{ => unittests}/test_sequence_reshape.py | 0 .../{ => unittests}/test_sequence_slice_op.py | 0 .../test_sequence_softmax_op.py | 0 .../tests/{ => unittests}/test_sgd_op.py | 0 .../{ => unittests}/test_shrink_rnn_memory.py | 0 ...st_sigmoid_cross_entropy_with_logits_op.py | 0 .../tests/{ => unittests}/test_sign_op.py | 0 .../{ => unittests}/test_smooth_l1_loss_op.py | 0 .../tests/{ => unittests}/test_softmax_op.py | 0 .../test_softmax_with_cross_entropy_op.py | 0 .../test_split_and_merge_lod_tensor_op.py | 0 .../tests/{ => unittests}/test_split_op.py | 0 .../test_split_selected_rows_op.py | 0 .../test_split_var.py | 0 .../tests/{ => unittests}/test_spp_op.py | 0 .../test_squared_l2_distance_op.py | 0 .../test_squared_l2_norm_op.py | 0 .../tests/{ => unittests}/test_sum_op.py | 0 .../tests/{ => unittests}/test_switch.py | 0 .../{ => unittests}/test_target_assign_op.py | 0 .../tests/{ => unittests}/test_tensor.py | 0 .../tests/{ => unittests}/test_top_k_op.py | 0 .../{ => unittests}/test_transpose_op.py | 0 .../{ => unittests}/test_uniform_random_op.py | 0 .../tests/{ => unittests}/test_unpool_op.py | 0 .../tests/{ => unittests}/test_variable.py | 0 .../tests/{ => unittests}/test_warpctc_op.py | 0 .../test_weight_normalization.py | 0 .../tests/{ => unittests}/test_while_op.py | 0 193 files changed, 111 insertions(+), 26 deletions(-) create mode 100644 python/paddle/v2/fluid/tests/unittests/CMakeLists.txt create mode 100644 python/paddle/v2/fluid/tests/unittests/__init__.py rename python/paddle/v2/fluid/tests/{ => unittests}/decorators.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/op_test.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_accuracy_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_activation_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_adadelta_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_adagrad_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_adam_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_adamax_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_array_read_write_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_assign_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_assign_value_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_auc_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_batch_norm_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_beam_search_decode_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_beam_search_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_bilinear_tensor_product_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_bipartite_match_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_box_coder_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_calc_gradient.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_cast_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_chunk_eval_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_clip_by_norm_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_clip_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_compare_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_concat_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_cond_op.py (96%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_conditional_block.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_const_value.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_conv2d_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_conv2d_transpose_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_conv3d_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_conv3d_transpose_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_conv_shift_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_cos_sim_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_create_op_doc_string.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_crf_decoding_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_crop_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_cross_entropy_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_ctc_align.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_cumsum_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_decayed_adagrad_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_default_scope_funcs.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_detection_map_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_detection_output_op.py (94%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_dropout_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_dyn_rnn.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_dynrnn_gradient_check.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_dynrnn_static_input.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_edit_distance_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_elementwise_add_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_elementwise_div_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_elementwise_max_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_elementwise_min_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_elementwise_mul_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_elementwise_pow_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_elementwise_sub_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_exception.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_executor_and_mul.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_expand_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_feed_fetch_method.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_fetch_var.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_fill_constant_batch_size_like_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_fill_constant_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_fill_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_fill_zeros_like_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_framework_debug_str.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_ftrl_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_gather_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_gaussian_random_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_get_places_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_gru_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_gru_unit_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_hinge_loss_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_huber_loss_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_im2sequence_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_image_classification_layer.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_infer_shape.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_inference_model_io.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_initializer.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_iou_similarity_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_is_empty_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_l1_norm_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_label_smooth_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_layer_norm_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_layers.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_learning_rate_decay.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_linear_chain_crf_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_lod_array_length_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_lod_rank_table.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_lod_reset_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_lod_tensor_array.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_lod_tensor_array_ops.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_log_loss_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_logical_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_lookup_table_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_lrn_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_lstm_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_lstm_unit_op.py (95%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_lstmp_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_margin_rank_loss_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_math_op_patch.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_matmul_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_maxout_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_mean_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_memory_optimization_transpiler.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_mine_hard_examples_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_minus_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_modified_huber_loss_op.py (96%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_momentum_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_mul_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_multiclass_nms_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_multihead_attention.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_multiplex_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_nce.py (97%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_net.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_norm_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_normalization_wrapper.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_one_hot_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_op_support_gpu.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_operator.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_operator_desc.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_optimizer.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_pad_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_parallel_op.py (96%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_parameter.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_pool2d_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_pool3d_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_pool_max_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_positive_negative_pair_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_precision_recall_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_prelu_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_print_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_prior_box_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_profiler.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_program.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_protobuf.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_protobuf_descs.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_proximal_adagrad_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_proximal_gd_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_rank_loss_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_recurrent_op.py (99%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_recv_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_reduce_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_registry.py (94%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_regularizer.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_reorder_lod_tensor.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_reshape_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_rmsprop_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_rnn_memory_helper_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_roi_pool_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_row_conv_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_scale_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_scatter_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_scope.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_selected_rows.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_seq_concat_op.py (99%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_seq_conv.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_seq_pool.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_sequence_erase_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_sequence_expand.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_sequence_reshape.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_sequence_slice_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_sequence_softmax_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_sgd_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_shrink_rnn_memory.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_sigmoid_cross_entropy_with_logits_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_sign_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_smooth_l1_loss_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_softmax_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_softmax_with_cross_entropy_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_split_and_merge_lod_tensor_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_split_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_split_selected_rows_op.py (100%) rename python/paddle/v2/fluid/tests/{book_distribute => unittests}/test_split_var.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_spp_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_squared_l2_distance_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_squared_l2_norm_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_sum_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_switch.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_target_assign_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_tensor.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_top_k_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_transpose_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_uniform_random_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_unpool_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_variable.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_warpctc_op.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_weight_normalization.py (100%) rename python/paddle/v2/fluid/tests/{ => unittests}/test_while_op.py (100%) diff --git a/CMakeLists.txt b/CMakeLists.txt index ae04f9ff3f..fb91e3b369 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -60,6 +60,7 @@ option(USE_NNPACK "Compile PaddlePaddle with NNPACK library" OFF) option(WITH_DISTRIBUTE "Compile with grpc distributed support" OFF) option(USE_EIGEN_FOR_BLAS "Use matrix multiplication in Eigen" OFF) option(WITH_ARM_FP16 "Use half precision support on armv8.2-a cpu" OFF) +option(WITH_FAST_BUNDLE_TEST "Bundle tests that can be run in a single process together to reduce launch overhead" ON) # CMAKE_BUILD_TYPE if(NOT CMAKE_BUILD_TYPE) diff --git a/python/paddle/v2/fluid/tests/CMakeLists.txt b/python/paddle/v2/fluid/tests/CMakeLists.txt index 26a80abcb5..5ff7b1b027 100644 --- a/python/paddle/v2/fluid/tests/CMakeLists.txt +++ b/python/paddle/v2/fluid/tests/CMakeLists.txt @@ -1,16 +1,11 @@ file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") -if(NOT WITH_DISTRIBUTE) - list(REMOVE_ITEM TEST_OPS test_recv_op) -endif(NOT WITH_DISTRIBUTE) - -list(REMOVE_ITEM TEST_OPS test_warpctc_op) foreach(src ${TEST_OPS}) - py_test(${src} SRCS ${src}.py) + py_test(${src} SRCS ${src}.py) endforeach() -py_test(test_warpctc_op SRCS test_warpctc_op.py ENVS FLAGS_warpctc_dir=${WARPCTC_LIB_DIR}) +add_subdirectory(unittests) add_subdirectory(book) add_subdirectory(book_distribute) add_subdirectory(book_memory_optimization) diff --git a/python/paddle/v2/fluid/tests/unittests/CMakeLists.txt b/python/paddle/v2/fluid/tests/unittests/CMakeLists.txt new file mode 100644 index 0000000000..9355f51311 --- /dev/null +++ b/python/paddle/v2/fluid/tests/unittests/CMakeLists.txt @@ -0,0 +1,89 @@ +file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") +string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") + +if(NOT WITH_DISTRIBUTE) + list(REMOVE_ITEM TEST_OPS test_recv_op) +endif(NOT WITH_DISTRIBUTE) + +list(REMOVE_ITEM TEST_OPS test_seq_concat_op) # FIXME(helin): https://github.com/PaddlePaddle/Paddle/issues/8290 +list(REMOVE_ITEM TEST_OPS test_modified_huber_loss_op) # FIXME(qijun) https://github.com/PaddlePaddle/Paddle/issues/5184 +list(REMOVE_ITEM TEST_OPS test_lstm_unit_op) # # FIXME(qijun) https://github.com/PaddlePaddle/Paddle/issues/5185 +list(REMOVE_ITEM TEST_OPS test_nce) # IXME(qijun) https://github.com/PaddlePaddle/Paddle/issues/7778 +list(REMOVE_ITEM TEST_OPS test_recurrent_op) # FIXME(qijun) https://github.com/PaddlePaddle/Paddle/issues/6152 +list(REMOVE_ITEM TEST_OPS test_cond_op) # FIXME(qijun): https://github.com/PaddlePaddle/Paddle/issues/5101#issuecomment-339814957 +list(REMOVE_ITEM TEST_OPS test_detection_output_op) # FIXME: detection_output_op will be rewritten. This unittest should be + +list(REMOVE_ITEM TEST_OPS op_test) # op_test is a helper python file, not a test +list(REMOVE_ITEM TEST_OPS decorators) # decorators is a helper python file, not a test + +function(py_test_modules TARGET_NAME) + if(WITH_TESTING) + set(options "") + set(oneValueArgs "") + set(multiValueArgs MODULES DEPS ARGS ENVS) + cmake_parse_arguments(py_test_modules "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + add_test(NAME ${TARGET_NAME} + COMMAND env PYTHONPATH=${PADDLE_PYTHON_BUILD_DIR}/lib-python ${py_test_modules_ENVS} + ${PYTHON_EXECUTABLE} -u -m unittest --verbose ${py_test_modules_MODULES} ${py_test_modules_ARGS} + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) + endif() +endfunction() + +# test time consuming OPs in a separate process for expliot parallism +list(REMOVE_ITEM TEST_OPS test_warpctc_op) +list(REMOVE_ITEM TEST_OPS test_dyn_rnn) +list(REMOVE_ITEM TEST_OPS test_mul_op) + +# tests that need to be run in separate process. +list(REMOVE_ITEM TEST_OPS test_multihead_attention) +list(REMOVE_ITEM TEST_OPS test_calc_gradient) +list(REMOVE_ITEM TEST_OPS test_while_op) +list(REMOVE_ITEM TEST_OPS test_lod_array_length_op) +list(REMOVE_ITEM TEST_OPS test_reorder_lod_tensor) +list(REMOVE_ITEM TEST_OPS test_profiler) +list(REMOVE_ITEM TEST_OPS test_normalization_wrapper) +list(REMOVE_ITEM TEST_OPS test_executor_and_mul) +list(REMOVE_ITEM TEST_OPS test_assign_value_op) +list(REMOVE_ITEM TEST_OPS test_array_read_write_op) +list(REMOVE_ITEM TEST_OPS test_lod_rank_table) +list(REMOVE_ITEM TEST_OPS test_weight_normalization) +list(REMOVE_ITEM TEST_OPS test_conditional_block) +list(REMOVE_ITEM TEST_OPS test_parameter) +list(REMOVE_ITEM TEST_OPS test_registry) +list(REMOVE_ITEM TEST_OPS test_fetch_var) +list(REMOVE_ITEM TEST_OPS test_parallel_op) +list(REMOVE_ITEM TEST_OPS test_dynrnn_static_input) + +# tests that can be bundled together in one python process for speed. +if(WITH_FAST_BUNDLE_TEST) + py_test_modules("test_all_ops" MODULES ${TEST_OPS}) +else() + foreach(TEST_OP ${TEST_OPS}) + py_test_modules(${TEST_OP} MODULES ${TEST_OP}) + endforeach(TEST_OP) +endif(WITH_FAST_BUNDLE_TEST) + +# tests with high overhead +py_test_modules(test_warpctc_op MODULES test_warpctc_op ENVS FLAGS_warpctc_dir=${WARPCTC_LIB_DIR}) +py_test_modules(test_train_dyn_rnn MODULES test_dyn_rnn) +py_test_modules(test_mul_op MODULES test_mul_op) + +# tests that need to be run in separate process. +py_test_modules(test_multihead_attention MODULES test_multihead_attention) +py_test_modules(test_calc_gradient MODULES test_calc_gradient) +py_test_modules(test_while_op MODULES test_while_op) +py_test_modules(test_lod_array_length_op MODULES test_lod_array_length_op) +py_test_modules(test_reorder_lod_tensor MODULES test_reorder_lod_tensor) +py_test_modules(test_profiler MODULES test_profiler) +py_test_modules(test_normalization_wrapper MODULES test_normalization_wrapper) +py_test_modules(test_executor_and_mul MODULES test_executor_and_mul) +py_test_modules(test_assign_value_op MODULES test_assign_value_op) +py_test_modules(test_array_read_write_op MODULES test_array_read_write_op) +py_test_modules(test_lod_rank_table MODULES test_lod_rank_table) +py_test_modules(test_weight_normalization MODULES test_weight_normalization) +py_test_modules(test_conditional_block MODULES test_conditional_block) +py_test_modules(test_parameter MODULES test_parameter) +py_test_modules(test_registry MODULES test_registry) +py_test_modules(test_fetch_var MODULES test_fetch_var) +py_test_modules(test_dynrnn_static_input MODULES test_dynrnn_static_input) +py_test_modules(test_parallel_op MODULES test_parallel_op) diff --git a/python/paddle/v2/fluid/tests/unittests/__init__.py b/python/paddle/v2/fluid/tests/unittests/__init__.py new file mode 100644 index 0000000000..b94a21a7e4 --- /dev/null +++ b/python/paddle/v2/fluid/tests/unittests/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/python/paddle/v2/fluid/tests/decorators.py b/python/paddle/v2/fluid/tests/unittests/decorators.py similarity index 100% rename from python/paddle/v2/fluid/tests/decorators.py rename to python/paddle/v2/fluid/tests/unittests/decorators.py diff --git a/python/paddle/v2/fluid/tests/op_test.py b/python/paddle/v2/fluid/tests/unittests/op_test.py similarity index 100% rename from python/paddle/v2/fluid/tests/op_test.py rename to python/paddle/v2/fluid/tests/unittests/op_test.py diff --git a/python/paddle/v2/fluid/tests/test_accuracy_op.py b/python/paddle/v2/fluid/tests/unittests/test_accuracy_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_accuracy_op.py rename to python/paddle/v2/fluid/tests/unittests/test_accuracy_op.py diff --git a/python/paddle/v2/fluid/tests/test_activation_op.py b/python/paddle/v2/fluid/tests/unittests/test_activation_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_activation_op.py rename to python/paddle/v2/fluid/tests/unittests/test_activation_op.py diff --git a/python/paddle/v2/fluid/tests/test_adadelta_op.py b/python/paddle/v2/fluid/tests/unittests/test_adadelta_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_adadelta_op.py rename to python/paddle/v2/fluid/tests/unittests/test_adadelta_op.py diff --git a/python/paddle/v2/fluid/tests/test_adagrad_op.py b/python/paddle/v2/fluid/tests/unittests/test_adagrad_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_adagrad_op.py rename to python/paddle/v2/fluid/tests/unittests/test_adagrad_op.py diff --git a/python/paddle/v2/fluid/tests/test_adam_op.py b/python/paddle/v2/fluid/tests/unittests/test_adam_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_adam_op.py rename to python/paddle/v2/fluid/tests/unittests/test_adam_op.py diff --git a/python/paddle/v2/fluid/tests/test_adamax_op.py b/python/paddle/v2/fluid/tests/unittests/test_adamax_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_adamax_op.py rename to python/paddle/v2/fluid/tests/unittests/test_adamax_op.py diff --git a/python/paddle/v2/fluid/tests/test_array_read_write_op.py b/python/paddle/v2/fluid/tests/unittests/test_array_read_write_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_array_read_write_op.py rename to python/paddle/v2/fluid/tests/unittests/test_array_read_write_op.py diff --git a/python/paddle/v2/fluid/tests/test_assign_op.py b/python/paddle/v2/fluid/tests/unittests/test_assign_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_assign_op.py rename to python/paddle/v2/fluid/tests/unittests/test_assign_op.py diff --git a/python/paddle/v2/fluid/tests/test_assign_value_op.py b/python/paddle/v2/fluid/tests/unittests/test_assign_value_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_assign_value_op.py rename to python/paddle/v2/fluid/tests/unittests/test_assign_value_op.py diff --git a/python/paddle/v2/fluid/tests/test_auc_op.py b/python/paddle/v2/fluid/tests/unittests/test_auc_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_auc_op.py rename to python/paddle/v2/fluid/tests/unittests/test_auc_op.py diff --git a/python/paddle/v2/fluid/tests/test_batch_norm_op.py b/python/paddle/v2/fluid/tests/unittests/test_batch_norm_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_batch_norm_op.py rename to python/paddle/v2/fluid/tests/unittests/test_batch_norm_op.py diff --git a/python/paddle/v2/fluid/tests/test_beam_search_decode_op.py b/python/paddle/v2/fluid/tests/unittests/test_beam_search_decode_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_beam_search_decode_op.py rename to python/paddle/v2/fluid/tests/unittests/test_beam_search_decode_op.py diff --git a/python/paddle/v2/fluid/tests/test_beam_search_op.py b/python/paddle/v2/fluid/tests/unittests/test_beam_search_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_beam_search_op.py rename to python/paddle/v2/fluid/tests/unittests/test_beam_search_op.py diff --git a/python/paddle/v2/fluid/tests/test_bilinear_tensor_product_op.py b/python/paddle/v2/fluid/tests/unittests/test_bilinear_tensor_product_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_bilinear_tensor_product_op.py rename to python/paddle/v2/fluid/tests/unittests/test_bilinear_tensor_product_op.py diff --git a/python/paddle/v2/fluid/tests/test_bipartite_match_op.py b/python/paddle/v2/fluid/tests/unittests/test_bipartite_match_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_bipartite_match_op.py rename to python/paddle/v2/fluid/tests/unittests/test_bipartite_match_op.py diff --git a/python/paddle/v2/fluid/tests/test_box_coder_op.py b/python/paddle/v2/fluid/tests/unittests/test_box_coder_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_box_coder_op.py rename to python/paddle/v2/fluid/tests/unittests/test_box_coder_op.py diff --git a/python/paddle/v2/fluid/tests/test_calc_gradient.py b/python/paddle/v2/fluid/tests/unittests/test_calc_gradient.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_calc_gradient.py rename to python/paddle/v2/fluid/tests/unittests/test_calc_gradient.py diff --git a/python/paddle/v2/fluid/tests/test_cast_op.py b/python/paddle/v2/fluid/tests/unittests/test_cast_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_cast_op.py rename to python/paddle/v2/fluid/tests/unittests/test_cast_op.py diff --git a/python/paddle/v2/fluid/tests/test_chunk_eval_op.py b/python/paddle/v2/fluid/tests/unittests/test_chunk_eval_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_chunk_eval_op.py rename to python/paddle/v2/fluid/tests/unittests/test_chunk_eval_op.py diff --git a/python/paddle/v2/fluid/tests/test_clip_by_norm_op.py b/python/paddle/v2/fluid/tests/unittests/test_clip_by_norm_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_clip_by_norm_op.py rename to python/paddle/v2/fluid/tests/unittests/test_clip_by_norm_op.py diff --git a/python/paddle/v2/fluid/tests/test_clip_op.py b/python/paddle/v2/fluid/tests/unittests/test_clip_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_clip_op.py rename to python/paddle/v2/fluid/tests/unittests/test_clip_op.py diff --git a/python/paddle/v2/fluid/tests/test_compare_op.py b/python/paddle/v2/fluid/tests/unittests/test_compare_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_compare_op.py rename to python/paddle/v2/fluid/tests/unittests/test_compare_op.py diff --git a/python/paddle/v2/fluid/tests/test_concat_op.py b/python/paddle/v2/fluid/tests/unittests/test_concat_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_concat_op.py rename to python/paddle/v2/fluid/tests/unittests/test_concat_op.py diff --git a/python/paddle/v2/fluid/tests/test_cond_op.py b/python/paddle/v2/fluid/tests/unittests/test_cond_op.py similarity index 96% rename from python/paddle/v2/fluid/tests/test_cond_op.py rename to python/paddle/v2/fluid/tests/unittests/test_cond_op.py index 6f4380166b..4a1e806c4b 100644 --- a/python/paddle/v2/fluid/tests/test_cond_op.py +++ b/python/paddle/v2/fluid/tests/unittests/test_cond_op.py @@ -125,7 +125,4 @@ class TestCondOp(unittest.TestCase): if __name__ == "__main__": - exit( - 0 - ) # FIXME(qijun): https://github.com/PaddlePaddle/Paddle/issues/5101#issuecomment-339814957 unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_conditional_block.py b/python/paddle/v2/fluid/tests/unittests/test_conditional_block.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_conditional_block.py rename to python/paddle/v2/fluid/tests/unittests/test_conditional_block.py diff --git a/python/paddle/v2/fluid/tests/test_const_value.py b/python/paddle/v2/fluid/tests/unittests/test_const_value.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_const_value.py rename to python/paddle/v2/fluid/tests/unittests/test_const_value.py diff --git a/python/paddle/v2/fluid/tests/test_conv2d_op.py b/python/paddle/v2/fluid/tests/unittests/test_conv2d_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_conv2d_op.py rename to python/paddle/v2/fluid/tests/unittests/test_conv2d_op.py diff --git a/python/paddle/v2/fluid/tests/test_conv2d_transpose_op.py b/python/paddle/v2/fluid/tests/unittests/test_conv2d_transpose_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_conv2d_transpose_op.py rename to python/paddle/v2/fluid/tests/unittests/test_conv2d_transpose_op.py diff --git a/python/paddle/v2/fluid/tests/test_conv3d_op.py b/python/paddle/v2/fluid/tests/unittests/test_conv3d_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_conv3d_op.py rename to python/paddle/v2/fluid/tests/unittests/test_conv3d_op.py diff --git a/python/paddle/v2/fluid/tests/test_conv3d_transpose_op.py b/python/paddle/v2/fluid/tests/unittests/test_conv3d_transpose_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_conv3d_transpose_op.py rename to python/paddle/v2/fluid/tests/unittests/test_conv3d_transpose_op.py diff --git a/python/paddle/v2/fluid/tests/test_conv_shift_op.py b/python/paddle/v2/fluid/tests/unittests/test_conv_shift_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_conv_shift_op.py rename to python/paddle/v2/fluid/tests/unittests/test_conv_shift_op.py diff --git a/python/paddle/v2/fluid/tests/test_cos_sim_op.py b/python/paddle/v2/fluid/tests/unittests/test_cos_sim_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_cos_sim_op.py rename to python/paddle/v2/fluid/tests/unittests/test_cos_sim_op.py diff --git a/python/paddle/v2/fluid/tests/test_create_op_doc_string.py b/python/paddle/v2/fluid/tests/unittests/test_create_op_doc_string.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_create_op_doc_string.py rename to python/paddle/v2/fluid/tests/unittests/test_create_op_doc_string.py diff --git a/python/paddle/v2/fluid/tests/test_crf_decoding_op.py b/python/paddle/v2/fluid/tests/unittests/test_crf_decoding_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_crf_decoding_op.py rename to python/paddle/v2/fluid/tests/unittests/test_crf_decoding_op.py diff --git a/python/paddle/v2/fluid/tests/test_crop_op.py b/python/paddle/v2/fluid/tests/unittests/test_crop_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_crop_op.py rename to python/paddle/v2/fluid/tests/unittests/test_crop_op.py diff --git a/python/paddle/v2/fluid/tests/test_cross_entropy_op.py b/python/paddle/v2/fluid/tests/unittests/test_cross_entropy_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_cross_entropy_op.py rename to python/paddle/v2/fluid/tests/unittests/test_cross_entropy_op.py diff --git a/python/paddle/v2/fluid/tests/test_ctc_align.py b/python/paddle/v2/fluid/tests/unittests/test_ctc_align.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_ctc_align.py rename to python/paddle/v2/fluid/tests/unittests/test_ctc_align.py diff --git a/python/paddle/v2/fluid/tests/test_cumsum_op.py b/python/paddle/v2/fluid/tests/unittests/test_cumsum_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_cumsum_op.py rename to python/paddle/v2/fluid/tests/unittests/test_cumsum_op.py diff --git a/python/paddle/v2/fluid/tests/test_decayed_adagrad_op.py b/python/paddle/v2/fluid/tests/unittests/test_decayed_adagrad_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_decayed_adagrad_op.py rename to python/paddle/v2/fluid/tests/unittests/test_decayed_adagrad_op.py diff --git a/python/paddle/v2/fluid/tests/test_default_scope_funcs.py b/python/paddle/v2/fluid/tests/unittests/test_default_scope_funcs.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_default_scope_funcs.py rename to python/paddle/v2/fluid/tests/unittests/test_default_scope_funcs.py diff --git a/python/paddle/v2/fluid/tests/test_detection_map_op.py b/python/paddle/v2/fluid/tests/unittests/test_detection_map_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_detection_map_op.py rename to python/paddle/v2/fluid/tests/unittests/test_detection_map_op.py diff --git a/python/paddle/v2/fluid/tests/test_detection_output_op.py b/python/paddle/v2/fluid/tests/unittests/test_detection_output_op.py similarity index 94% rename from python/paddle/v2/fluid/tests/test_detection_output_op.py rename to python/paddle/v2/fluid/tests/unittests/test_detection_output_op.py index 0a132652f1..9468131914 100644 --- a/python/paddle/v2/fluid/tests/test_detection_output_op.py +++ b/python/paddle/v2/fluid/tests/unittests/test_detection_output_op.py @@ -68,6 +68,4 @@ class TestUnpoolOp(OpTest): if __name__ == '__main__': - # FIXME: detection_output_op will be rewritten. This unittest should be - # enabled after rewriting. - exit(0) # temporary disable this unittest + unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_dropout_op.py b/python/paddle/v2/fluid/tests/unittests/test_dropout_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_dropout_op.py rename to python/paddle/v2/fluid/tests/unittests/test_dropout_op.py diff --git a/python/paddle/v2/fluid/tests/test_dyn_rnn.py b/python/paddle/v2/fluid/tests/unittests/test_dyn_rnn.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_dyn_rnn.py rename to python/paddle/v2/fluid/tests/unittests/test_dyn_rnn.py diff --git a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py b/python/paddle/v2/fluid/tests/unittests/test_dynrnn_gradient_check.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py rename to python/paddle/v2/fluid/tests/unittests/test_dynrnn_gradient_check.py diff --git a/python/paddle/v2/fluid/tests/test_dynrnn_static_input.py b/python/paddle/v2/fluid/tests/unittests/test_dynrnn_static_input.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_dynrnn_static_input.py rename to python/paddle/v2/fluid/tests/unittests/test_dynrnn_static_input.py diff --git a/python/paddle/v2/fluid/tests/test_edit_distance_op.py b/python/paddle/v2/fluid/tests/unittests/test_edit_distance_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_edit_distance_op.py rename to python/paddle/v2/fluid/tests/unittests/test_edit_distance_op.py diff --git a/python/paddle/v2/fluid/tests/test_elementwise_add_op.py b/python/paddle/v2/fluid/tests/unittests/test_elementwise_add_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_elementwise_add_op.py rename to python/paddle/v2/fluid/tests/unittests/test_elementwise_add_op.py diff --git a/python/paddle/v2/fluid/tests/test_elementwise_div_op.py b/python/paddle/v2/fluid/tests/unittests/test_elementwise_div_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_elementwise_div_op.py rename to python/paddle/v2/fluid/tests/unittests/test_elementwise_div_op.py diff --git a/python/paddle/v2/fluid/tests/test_elementwise_max_op.py b/python/paddle/v2/fluid/tests/unittests/test_elementwise_max_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_elementwise_max_op.py rename to python/paddle/v2/fluid/tests/unittests/test_elementwise_max_op.py diff --git a/python/paddle/v2/fluid/tests/test_elementwise_min_op.py b/python/paddle/v2/fluid/tests/unittests/test_elementwise_min_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_elementwise_min_op.py rename to python/paddle/v2/fluid/tests/unittests/test_elementwise_min_op.py diff --git a/python/paddle/v2/fluid/tests/test_elementwise_mul_op.py b/python/paddle/v2/fluid/tests/unittests/test_elementwise_mul_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_elementwise_mul_op.py rename to python/paddle/v2/fluid/tests/unittests/test_elementwise_mul_op.py diff --git a/python/paddle/v2/fluid/tests/test_elementwise_pow_op.py b/python/paddle/v2/fluid/tests/unittests/test_elementwise_pow_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_elementwise_pow_op.py rename to python/paddle/v2/fluid/tests/unittests/test_elementwise_pow_op.py diff --git a/python/paddle/v2/fluid/tests/test_elementwise_sub_op.py b/python/paddle/v2/fluid/tests/unittests/test_elementwise_sub_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_elementwise_sub_op.py rename to python/paddle/v2/fluid/tests/unittests/test_elementwise_sub_op.py diff --git a/python/paddle/v2/fluid/tests/test_exception.py b/python/paddle/v2/fluid/tests/unittests/test_exception.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_exception.py rename to python/paddle/v2/fluid/tests/unittests/test_exception.py diff --git a/python/paddle/v2/fluid/tests/test_executor_and_mul.py b/python/paddle/v2/fluid/tests/unittests/test_executor_and_mul.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_executor_and_mul.py rename to python/paddle/v2/fluid/tests/unittests/test_executor_and_mul.py diff --git a/python/paddle/v2/fluid/tests/test_expand_op.py b/python/paddle/v2/fluid/tests/unittests/test_expand_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_expand_op.py rename to python/paddle/v2/fluid/tests/unittests/test_expand_op.py diff --git a/python/paddle/v2/fluid/tests/test_feed_fetch_method.py b/python/paddle/v2/fluid/tests/unittests/test_feed_fetch_method.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_feed_fetch_method.py rename to python/paddle/v2/fluid/tests/unittests/test_feed_fetch_method.py diff --git a/python/paddle/v2/fluid/tests/test_fetch_var.py b/python/paddle/v2/fluid/tests/unittests/test_fetch_var.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_fetch_var.py rename to python/paddle/v2/fluid/tests/unittests/test_fetch_var.py diff --git a/python/paddle/v2/fluid/tests/test_fill_constant_batch_size_like_op.py b/python/paddle/v2/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_fill_constant_batch_size_like_op.py rename to python/paddle/v2/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py diff --git a/python/paddle/v2/fluid/tests/test_fill_constant_op.py b/python/paddle/v2/fluid/tests/unittests/test_fill_constant_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_fill_constant_op.py rename to python/paddle/v2/fluid/tests/unittests/test_fill_constant_op.py diff --git a/python/paddle/v2/fluid/tests/test_fill_op.py b/python/paddle/v2/fluid/tests/unittests/test_fill_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_fill_op.py rename to python/paddle/v2/fluid/tests/unittests/test_fill_op.py diff --git a/python/paddle/v2/fluid/tests/test_fill_zeros_like_op.py b/python/paddle/v2/fluid/tests/unittests/test_fill_zeros_like_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_fill_zeros_like_op.py rename to python/paddle/v2/fluid/tests/unittests/test_fill_zeros_like_op.py diff --git a/python/paddle/v2/fluid/tests/test_framework_debug_str.py b/python/paddle/v2/fluid/tests/unittests/test_framework_debug_str.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_framework_debug_str.py rename to python/paddle/v2/fluid/tests/unittests/test_framework_debug_str.py diff --git a/python/paddle/v2/fluid/tests/test_ftrl_op.py b/python/paddle/v2/fluid/tests/unittests/test_ftrl_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_ftrl_op.py rename to python/paddle/v2/fluid/tests/unittests/test_ftrl_op.py diff --git a/python/paddle/v2/fluid/tests/test_gather_op.py b/python/paddle/v2/fluid/tests/unittests/test_gather_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_gather_op.py rename to python/paddle/v2/fluid/tests/unittests/test_gather_op.py diff --git a/python/paddle/v2/fluid/tests/test_gaussian_random_op.py b/python/paddle/v2/fluid/tests/unittests/test_gaussian_random_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_gaussian_random_op.py rename to python/paddle/v2/fluid/tests/unittests/test_gaussian_random_op.py diff --git a/python/paddle/v2/fluid/tests/test_get_places_op.py b/python/paddle/v2/fluid/tests/unittests/test_get_places_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_get_places_op.py rename to python/paddle/v2/fluid/tests/unittests/test_get_places_op.py diff --git a/python/paddle/v2/fluid/tests/test_gru_op.py b/python/paddle/v2/fluid/tests/unittests/test_gru_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_gru_op.py rename to python/paddle/v2/fluid/tests/unittests/test_gru_op.py diff --git a/python/paddle/v2/fluid/tests/test_gru_unit_op.py b/python/paddle/v2/fluid/tests/unittests/test_gru_unit_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_gru_unit_op.py rename to python/paddle/v2/fluid/tests/unittests/test_gru_unit_op.py diff --git a/python/paddle/v2/fluid/tests/test_hinge_loss_op.py b/python/paddle/v2/fluid/tests/unittests/test_hinge_loss_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_hinge_loss_op.py rename to python/paddle/v2/fluid/tests/unittests/test_hinge_loss_op.py diff --git a/python/paddle/v2/fluid/tests/test_huber_loss_op.py b/python/paddle/v2/fluid/tests/unittests/test_huber_loss_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_huber_loss_op.py rename to python/paddle/v2/fluid/tests/unittests/test_huber_loss_op.py diff --git a/python/paddle/v2/fluid/tests/test_im2sequence_op.py b/python/paddle/v2/fluid/tests/unittests/test_im2sequence_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_im2sequence_op.py rename to python/paddle/v2/fluid/tests/unittests/test_im2sequence_op.py diff --git a/python/paddle/v2/fluid/tests/test_image_classification_layer.py b/python/paddle/v2/fluid/tests/unittests/test_image_classification_layer.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_image_classification_layer.py rename to python/paddle/v2/fluid/tests/unittests/test_image_classification_layer.py diff --git a/python/paddle/v2/fluid/tests/test_infer_shape.py b/python/paddle/v2/fluid/tests/unittests/test_infer_shape.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_infer_shape.py rename to python/paddle/v2/fluid/tests/unittests/test_infer_shape.py diff --git a/python/paddle/v2/fluid/tests/test_inference_model_io.py b/python/paddle/v2/fluid/tests/unittests/test_inference_model_io.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_inference_model_io.py rename to python/paddle/v2/fluid/tests/unittests/test_inference_model_io.py diff --git a/python/paddle/v2/fluid/tests/test_initializer.py b/python/paddle/v2/fluid/tests/unittests/test_initializer.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_initializer.py rename to python/paddle/v2/fluid/tests/unittests/test_initializer.py diff --git a/python/paddle/v2/fluid/tests/test_iou_similarity_op.py b/python/paddle/v2/fluid/tests/unittests/test_iou_similarity_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_iou_similarity_op.py rename to python/paddle/v2/fluid/tests/unittests/test_iou_similarity_op.py diff --git a/python/paddle/v2/fluid/tests/test_is_empty_op.py b/python/paddle/v2/fluid/tests/unittests/test_is_empty_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_is_empty_op.py rename to python/paddle/v2/fluid/tests/unittests/test_is_empty_op.py diff --git a/python/paddle/v2/fluid/tests/test_l1_norm_op.py b/python/paddle/v2/fluid/tests/unittests/test_l1_norm_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_l1_norm_op.py rename to python/paddle/v2/fluid/tests/unittests/test_l1_norm_op.py diff --git a/python/paddle/v2/fluid/tests/test_label_smooth_op.py b/python/paddle/v2/fluid/tests/unittests/test_label_smooth_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_label_smooth_op.py rename to python/paddle/v2/fluid/tests/unittests/test_label_smooth_op.py diff --git a/python/paddle/v2/fluid/tests/test_layer_norm_op.py b/python/paddle/v2/fluid/tests/unittests/test_layer_norm_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_layer_norm_op.py rename to python/paddle/v2/fluid/tests/unittests/test_layer_norm_op.py diff --git a/python/paddle/v2/fluid/tests/test_layers.py b/python/paddle/v2/fluid/tests/unittests/test_layers.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_layers.py rename to python/paddle/v2/fluid/tests/unittests/test_layers.py diff --git a/python/paddle/v2/fluid/tests/test_learning_rate_decay.py b/python/paddle/v2/fluid/tests/unittests/test_learning_rate_decay.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_learning_rate_decay.py rename to python/paddle/v2/fluid/tests/unittests/test_learning_rate_decay.py diff --git a/python/paddle/v2/fluid/tests/test_linear_chain_crf_op.py b/python/paddle/v2/fluid/tests/unittests/test_linear_chain_crf_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_linear_chain_crf_op.py rename to python/paddle/v2/fluid/tests/unittests/test_linear_chain_crf_op.py diff --git a/python/paddle/v2/fluid/tests/test_lod_array_length_op.py b/python/paddle/v2/fluid/tests/unittests/test_lod_array_length_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_lod_array_length_op.py rename to python/paddle/v2/fluid/tests/unittests/test_lod_array_length_op.py diff --git a/python/paddle/v2/fluid/tests/test_lod_rank_table.py b/python/paddle/v2/fluid/tests/unittests/test_lod_rank_table.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_lod_rank_table.py rename to python/paddle/v2/fluid/tests/unittests/test_lod_rank_table.py diff --git a/python/paddle/v2/fluid/tests/test_lod_reset_op.py b/python/paddle/v2/fluid/tests/unittests/test_lod_reset_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_lod_reset_op.py rename to python/paddle/v2/fluid/tests/unittests/test_lod_reset_op.py diff --git a/python/paddle/v2/fluid/tests/test_lod_tensor_array.py b/python/paddle/v2/fluid/tests/unittests/test_lod_tensor_array.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_lod_tensor_array.py rename to python/paddle/v2/fluid/tests/unittests/test_lod_tensor_array.py diff --git a/python/paddle/v2/fluid/tests/test_lod_tensor_array_ops.py b/python/paddle/v2/fluid/tests/unittests/test_lod_tensor_array_ops.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_lod_tensor_array_ops.py rename to python/paddle/v2/fluid/tests/unittests/test_lod_tensor_array_ops.py diff --git a/python/paddle/v2/fluid/tests/test_log_loss_op.py b/python/paddle/v2/fluid/tests/unittests/test_log_loss_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_log_loss_op.py rename to python/paddle/v2/fluid/tests/unittests/test_log_loss_op.py diff --git a/python/paddle/v2/fluid/tests/test_logical_op.py b/python/paddle/v2/fluid/tests/unittests/test_logical_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_logical_op.py rename to python/paddle/v2/fluid/tests/unittests/test_logical_op.py diff --git a/python/paddle/v2/fluid/tests/test_lookup_table_op.py b/python/paddle/v2/fluid/tests/unittests/test_lookup_table_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_lookup_table_op.py rename to python/paddle/v2/fluid/tests/unittests/test_lookup_table_op.py diff --git a/python/paddle/v2/fluid/tests/test_lrn_op.py b/python/paddle/v2/fluid/tests/unittests/test_lrn_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_lrn_op.py rename to python/paddle/v2/fluid/tests/unittests/test_lrn_op.py diff --git a/python/paddle/v2/fluid/tests/test_lstm_op.py b/python/paddle/v2/fluid/tests/unittests/test_lstm_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_lstm_op.py rename to python/paddle/v2/fluid/tests/unittests/test_lstm_op.py diff --git a/python/paddle/v2/fluid/tests/test_lstm_unit_op.py b/python/paddle/v2/fluid/tests/unittests/test_lstm_unit_op.py similarity index 95% rename from python/paddle/v2/fluid/tests/test_lstm_unit_op.py rename to python/paddle/v2/fluid/tests/unittests/test_lstm_unit_op.py index af0c3db701..e343265874 100644 --- a/python/paddle/v2/fluid/tests/test_lstm_unit_op.py +++ b/python/paddle/v2/fluid/tests/unittests/test_lstm_unit_op.py @@ -49,6 +49,4 @@ class LstmUnitTest(OpTest): if __name__ == "__main__": - # FIXME(qijun) https://github.com/PaddlePaddle/Paddle/issues/5185 - exit(0) unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_lstmp_op.py b/python/paddle/v2/fluid/tests/unittests/test_lstmp_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_lstmp_op.py rename to python/paddle/v2/fluid/tests/unittests/test_lstmp_op.py diff --git a/python/paddle/v2/fluid/tests/test_margin_rank_loss_op.py b/python/paddle/v2/fluid/tests/unittests/test_margin_rank_loss_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_margin_rank_loss_op.py rename to python/paddle/v2/fluid/tests/unittests/test_margin_rank_loss_op.py diff --git a/python/paddle/v2/fluid/tests/test_math_op_patch.py b/python/paddle/v2/fluid/tests/unittests/test_math_op_patch.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_math_op_patch.py rename to python/paddle/v2/fluid/tests/unittests/test_math_op_patch.py diff --git a/python/paddle/v2/fluid/tests/test_matmul_op.py b/python/paddle/v2/fluid/tests/unittests/test_matmul_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_matmul_op.py rename to python/paddle/v2/fluid/tests/unittests/test_matmul_op.py diff --git a/python/paddle/v2/fluid/tests/test_maxout_op.py b/python/paddle/v2/fluid/tests/unittests/test_maxout_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_maxout_op.py rename to python/paddle/v2/fluid/tests/unittests/test_maxout_op.py diff --git a/python/paddle/v2/fluid/tests/test_mean_op.py b/python/paddle/v2/fluid/tests/unittests/test_mean_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_mean_op.py rename to python/paddle/v2/fluid/tests/unittests/test_mean_op.py diff --git a/python/paddle/v2/fluid/tests/test_memory_optimization_transpiler.py b/python/paddle/v2/fluid/tests/unittests/test_memory_optimization_transpiler.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_memory_optimization_transpiler.py rename to python/paddle/v2/fluid/tests/unittests/test_memory_optimization_transpiler.py diff --git a/python/paddle/v2/fluid/tests/test_mine_hard_examples_op.py b/python/paddle/v2/fluid/tests/unittests/test_mine_hard_examples_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_mine_hard_examples_op.py rename to python/paddle/v2/fluid/tests/unittests/test_mine_hard_examples_op.py diff --git a/python/paddle/v2/fluid/tests/test_minus_op.py b/python/paddle/v2/fluid/tests/unittests/test_minus_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_minus_op.py rename to python/paddle/v2/fluid/tests/unittests/test_minus_op.py diff --git a/python/paddle/v2/fluid/tests/test_modified_huber_loss_op.py b/python/paddle/v2/fluid/tests/unittests/test_modified_huber_loss_op.py similarity index 96% rename from python/paddle/v2/fluid/tests/test_modified_huber_loss_op.py rename to python/paddle/v2/fluid/tests/unittests/test_modified_huber_loss_op.py index def48c9261..62035efe8e 100644 --- a/python/paddle/v2/fluid/tests/test_modified_huber_loss_op.py +++ b/python/paddle/v2/fluid/tests/unittests/test_modified_huber_loss_op.py @@ -59,6 +59,4 @@ class TestModifiedHuberLossOp(OpTest): if __name__ == '__main__': - exit(0) - # FIXME(qijun) https://github.com/PaddlePaddle/Paddle/issues/5184 unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_momentum_op.py b/python/paddle/v2/fluid/tests/unittests/test_momentum_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_momentum_op.py rename to python/paddle/v2/fluid/tests/unittests/test_momentum_op.py diff --git a/python/paddle/v2/fluid/tests/test_mul_op.py b/python/paddle/v2/fluid/tests/unittests/test_mul_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_mul_op.py rename to python/paddle/v2/fluid/tests/unittests/test_mul_op.py diff --git a/python/paddle/v2/fluid/tests/test_multiclass_nms_op.py b/python/paddle/v2/fluid/tests/unittests/test_multiclass_nms_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_multiclass_nms_op.py rename to python/paddle/v2/fluid/tests/unittests/test_multiclass_nms_op.py diff --git a/python/paddle/v2/fluid/tests/test_multihead_attention.py b/python/paddle/v2/fluid/tests/unittests/test_multihead_attention.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_multihead_attention.py rename to python/paddle/v2/fluid/tests/unittests/test_multihead_attention.py diff --git a/python/paddle/v2/fluid/tests/test_multiplex_op.py b/python/paddle/v2/fluid/tests/unittests/test_multiplex_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_multiplex_op.py rename to python/paddle/v2/fluid/tests/unittests/test_multiplex_op.py diff --git a/python/paddle/v2/fluid/tests/test_nce.py b/python/paddle/v2/fluid/tests/unittests/test_nce.py similarity index 97% rename from python/paddle/v2/fluid/tests/test_nce.py rename to python/paddle/v2/fluid/tests/unittests/test_nce.py index 068081972d..76ecc8ba08 100644 --- a/python/paddle/v2/fluid/tests/test_nce.py +++ b/python/paddle/v2/fluid/tests/unittests/test_nce.py @@ -109,6 +109,4 @@ class TestNCECase1(TestNCE): if __name__ == '__main__': - # FIXME(qijun) https://github.com/PaddlePaddle/Paddle/issues/7778 - exit(0) unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_net.py b/python/paddle/v2/fluid/tests/unittests/test_net.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_net.py rename to python/paddle/v2/fluid/tests/unittests/test_net.py diff --git a/python/paddle/v2/fluid/tests/test_norm_op.py b/python/paddle/v2/fluid/tests/unittests/test_norm_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_norm_op.py rename to python/paddle/v2/fluid/tests/unittests/test_norm_op.py diff --git a/python/paddle/v2/fluid/tests/test_normalization_wrapper.py b/python/paddle/v2/fluid/tests/unittests/test_normalization_wrapper.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_normalization_wrapper.py rename to python/paddle/v2/fluid/tests/unittests/test_normalization_wrapper.py diff --git a/python/paddle/v2/fluid/tests/test_one_hot_op.py b/python/paddle/v2/fluid/tests/unittests/test_one_hot_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_one_hot_op.py rename to python/paddle/v2/fluid/tests/unittests/test_one_hot_op.py diff --git a/python/paddle/v2/fluid/tests/test_op_support_gpu.py b/python/paddle/v2/fluid/tests/unittests/test_op_support_gpu.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_op_support_gpu.py rename to python/paddle/v2/fluid/tests/unittests/test_op_support_gpu.py diff --git a/python/paddle/v2/fluid/tests/test_operator.py b/python/paddle/v2/fluid/tests/unittests/test_operator.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_operator.py rename to python/paddle/v2/fluid/tests/unittests/test_operator.py diff --git a/python/paddle/v2/fluid/tests/test_operator_desc.py b/python/paddle/v2/fluid/tests/unittests/test_operator_desc.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_operator_desc.py rename to python/paddle/v2/fluid/tests/unittests/test_operator_desc.py diff --git a/python/paddle/v2/fluid/tests/test_optimizer.py b/python/paddle/v2/fluid/tests/unittests/test_optimizer.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_optimizer.py rename to python/paddle/v2/fluid/tests/unittests/test_optimizer.py diff --git a/python/paddle/v2/fluid/tests/test_pad_op.py b/python/paddle/v2/fluid/tests/unittests/test_pad_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_pad_op.py rename to python/paddle/v2/fluid/tests/unittests/test_pad_op.py diff --git a/python/paddle/v2/fluid/tests/test_parallel_op.py b/python/paddle/v2/fluid/tests/unittests/test_parallel_op.py similarity index 96% rename from python/paddle/v2/fluid/tests/test_parallel_op.py rename to python/paddle/v2/fluid/tests/unittests/test_parallel_op.py index a0fc91f6de..0d377ae70c 100644 --- a/python/paddle/v2/fluid/tests/test_parallel_op.py +++ b/python/paddle/v2/fluid/tests/unittests/test_parallel_op.py @@ -138,9 +138,10 @@ class BaseParallelForTest(unittest.TestCase): def _impl_(a, b, fetch_id, item_id): item_str = ['CPU', 'ParallelCPU', 'GPU', 'ParallelGPU'] - flag = numpy.allclose(a, b, rtol=0.1) - self.assertTrue(flag, "The {0} are different in {1}".format( - fetch[fetch_id], item_str[item_id])) + flag = numpy.allclose(a, b, rtol=0.1, atol=1e-3) + self.assertTrue(flag, + "The {0} are different in {1}, {2} vs {3}".format( + fetch[fetch_id], item_str[item_id], a, b)) for i, items in enumerate(zip(*args)): self.assertGreater(len(items), 0) diff --git a/python/paddle/v2/fluid/tests/test_parameter.py b/python/paddle/v2/fluid/tests/unittests/test_parameter.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_parameter.py rename to python/paddle/v2/fluid/tests/unittests/test_parameter.py diff --git a/python/paddle/v2/fluid/tests/test_pool2d_op.py b/python/paddle/v2/fluid/tests/unittests/test_pool2d_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_pool2d_op.py rename to python/paddle/v2/fluid/tests/unittests/test_pool2d_op.py diff --git a/python/paddle/v2/fluid/tests/test_pool3d_op.py b/python/paddle/v2/fluid/tests/unittests/test_pool3d_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_pool3d_op.py rename to python/paddle/v2/fluid/tests/unittests/test_pool3d_op.py diff --git a/python/paddle/v2/fluid/tests/test_pool_max_op.py b/python/paddle/v2/fluid/tests/unittests/test_pool_max_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_pool_max_op.py rename to python/paddle/v2/fluid/tests/unittests/test_pool_max_op.py diff --git a/python/paddle/v2/fluid/tests/test_positive_negative_pair_op.py b/python/paddle/v2/fluid/tests/unittests/test_positive_negative_pair_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_positive_negative_pair_op.py rename to python/paddle/v2/fluid/tests/unittests/test_positive_negative_pair_op.py diff --git a/python/paddle/v2/fluid/tests/test_precision_recall_op.py b/python/paddle/v2/fluid/tests/unittests/test_precision_recall_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_precision_recall_op.py rename to python/paddle/v2/fluid/tests/unittests/test_precision_recall_op.py diff --git a/python/paddle/v2/fluid/tests/test_prelu_op.py b/python/paddle/v2/fluid/tests/unittests/test_prelu_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_prelu_op.py rename to python/paddle/v2/fluid/tests/unittests/test_prelu_op.py diff --git a/python/paddle/v2/fluid/tests/test_print_op.py b/python/paddle/v2/fluid/tests/unittests/test_print_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_print_op.py rename to python/paddle/v2/fluid/tests/unittests/test_print_op.py diff --git a/python/paddle/v2/fluid/tests/test_prior_box_op.py b/python/paddle/v2/fluid/tests/unittests/test_prior_box_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_prior_box_op.py rename to python/paddle/v2/fluid/tests/unittests/test_prior_box_op.py diff --git a/python/paddle/v2/fluid/tests/test_profiler.py b/python/paddle/v2/fluid/tests/unittests/test_profiler.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_profiler.py rename to python/paddle/v2/fluid/tests/unittests/test_profiler.py diff --git a/python/paddle/v2/fluid/tests/test_program.py b/python/paddle/v2/fluid/tests/unittests/test_program.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_program.py rename to python/paddle/v2/fluid/tests/unittests/test_program.py diff --git a/python/paddle/v2/fluid/tests/test_protobuf.py b/python/paddle/v2/fluid/tests/unittests/test_protobuf.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_protobuf.py rename to python/paddle/v2/fluid/tests/unittests/test_protobuf.py diff --git a/python/paddle/v2/fluid/tests/test_protobuf_descs.py b/python/paddle/v2/fluid/tests/unittests/test_protobuf_descs.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_protobuf_descs.py rename to python/paddle/v2/fluid/tests/unittests/test_protobuf_descs.py diff --git a/python/paddle/v2/fluid/tests/test_proximal_adagrad_op.py b/python/paddle/v2/fluid/tests/unittests/test_proximal_adagrad_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_proximal_adagrad_op.py rename to python/paddle/v2/fluid/tests/unittests/test_proximal_adagrad_op.py diff --git a/python/paddle/v2/fluid/tests/test_proximal_gd_op.py b/python/paddle/v2/fluid/tests/unittests/test_proximal_gd_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_proximal_gd_op.py rename to python/paddle/v2/fluid/tests/unittests/test_proximal_gd_op.py diff --git a/python/paddle/v2/fluid/tests/test_rank_loss_op.py b/python/paddle/v2/fluid/tests/unittests/test_rank_loss_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_rank_loss_op.py rename to python/paddle/v2/fluid/tests/unittests/test_rank_loss_op.py diff --git a/python/paddle/v2/fluid/tests/test_recurrent_op.py b/python/paddle/v2/fluid/tests/unittests/test_recurrent_op.py similarity index 99% rename from python/paddle/v2/fluid/tests/test_recurrent_op.py rename to python/paddle/v2/fluid/tests/unittests/test_recurrent_op.py index e540ca43b6..177d8fc65f 100644 --- a/python/paddle/v2/fluid/tests/test_recurrent_op.py +++ b/python/paddle/v2/fluid/tests/unittests/test_recurrent_op.py @@ -468,6 +468,4 @@ class RecurrentOpNoMemBootTest(RecurrentOpTest1): if __name__ == '__main__': - # FIXME(qijun) https://github.com/PaddlePaddle/Paddle/issues/6152 - exit(0) unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_recv_op.py b/python/paddle/v2/fluid/tests/unittests/test_recv_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_recv_op.py rename to python/paddle/v2/fluid/tests/unittests/test_recv_op.py diff --git a/python/paddle/v2/fluid/tests/test_reduce_op.py b/python/paddle/v2/fluid/tests/unittests/test_reduce_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_reduce_op.py rename to python/paddle/v2/fluid/tests/unittests/test_reduce_op.py diff --git a/python/paddle/v2/fluid/tests/test_registry.py b/python/paddle/v2/fluid/tests/unittests/test_registry.py similarity index 94% rename from python/paddle/v2/fluid/tests/test_registry.py rename to python/paddle/v2/fluid/tests/unittests/test_registry.py index bf4dc64186..82527a6ec7 100644 --- a/python/paddle/v2/fluid/tests/test_registry.py +++ b/python/paddle/v2/fluid/tests/unittests/test_registry.py @@ -28,4 +28,4 @@ class TestRegistry(unittest.TestCase): exe = fluid.Executor(place) X = np.random.random((10, 10)).astype("float32") mean_out = exe.run(feed={"X": X}, fetch_list=[output]) - self.assertAlmostEqual(np.mean(X), mean_out[0]) + self.assertAlmostEqual(np.mean(X), mean_out[0], delta=1e-5) diff --git a/python/paddle/v2/fluid/tests/test_regularizer.py b/python/paddle/v2/fluid/tests/unittests/test_regularizer.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_regularizer.py rename to python/paddle/v2/fluid/tests/unittests/test_regularizer.py diff --git a/python/paddle/v2/fluid/tests/test_reorder_lod_tensor.py b/python/paddle/v2/fluid/tests/unittests/test_reorder_lod_tensor.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_reorder_lod_tensor.py rename to python/paddle/v2/fluid/tests/unittests/test_reorder_lod_tensor.py diff --git a/python/paddle/v2/fluid/tests/test_reshape_op.py b/python/paddle/v2/fluid/tests/unittests/test_reshape_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_reshape_op.py rename to python/paddle/v2/fluid/tests/unittests/test_reshape_op.py diff --git a/python/paddle/v2/fluid/tests/test_rmsprop_op.py b/python/paddle/v2/fluid/tests/unittests/test_rmsprop_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_rmsprop_op.py rename to python/paddle/v2/fluid/tests/unittests/test_rmsprop_op.py diff --git a/python/paddle/v2/fluid/tests/test_rnn_memory_helper_op.py b/python/paddle/v2/fluid/tests/unittests/test_rnn_memory_helper_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_rnn_memory_helper_op.py rename to python/paddle/v2/fluid/tests/unittests/test_rnn_memory_helper_op.py diff --git a/python/paddle/v2/fluid/tests/test_roi_pool_op.py b/python/paddle/v2/fluid/tests/unittests/test_roi_pool_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_roi_pool_op.py rename to python/paddle/v2/fluid/tests/unittests/test_roi_pool_op.py diff --git a/python/paddle/v2/fluid/tests/test_row_conv_op.py b/python/paddle/v2/fluid/tests/unittests/test_row_conv_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_row_conv_op.py rename to python/paddle/v2/fluid/tests/unittests/test_row_conv_op.py diff --git a/python/paddle/v2/fluid/tests/test_scale_op.py b/python/paddle/v2/fluid/tests/unittests/test_scale_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_scale_op.py rename to python/paddle/v2/fluid/tests/unittests/test_scale_op.py diff --git a/python/paddle/v2/fluid/tests/test_scatter_op.py b/python/paddle/v2/fluid/tests/unittests/test_scatter_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_scatter_op.py rename to python/paddle/v2/fluid/tests/unittests/test_scatter_op.py diff --git a/python/paddle/v2/fluid/tests/test_scope.py b/python/paddle/v2/fluid/tests/unittests/test_scope.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_scope.py rename to python/paddle/v2/fluid/tests/unittests/test_scope.py diff --git a/python/paddle/v2/fluid/tests/test_selected_rows.py b/python/paddle/v2/fluid/tests/unittests/test_selected_rows.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_selected_rows.py rename to python/paddle/v2/fluid/tests/unittests/test_selected_rows.py diff --git a/python/paddle/v2/fluid/tests/test_seq_concat_op.py b/python/paddle/v2/fluid/tests/unittests/test_seq_concat_op.py similarity index 99% rename from python/paddle/v2/fluid/tests/test_seq_concat_op.py rename to python/paddle/v2/fluid/tests/unittests/test_seq_concat_op.py index 1c9b61d8fd..10592d127f 100644 --- a/python/paddle/v2/fluid/tests/test_seq_concat_op.py +++ b/python/paddle/v2/fluid/tests/unittests/test_seq_concat_op.py @@ -16,7 +16,6 @@ import unittest import numpy as np import sys from op_test import OpTest -exit(0) def to_abs_lod(lod): diff --git a/python/paddle/v2/fluid/tests/test_seq_conv.py b/python/paddle/v2/fluid/tests/unittests/test_seq_conv.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_seq_conv.py rename to python/paddle/v2/fluid/tests/unittests/test_seq_conv.py diff --git a/python/paddle/v2/fluid/tests/test_seq_pool.py b/python/paddle/v2/fluid/tests/unittests/test_seq_pool.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_seq_pool.py rename to python/paddle/v2/fluid/tests/unittests/test_seq_pool.py diff --git a/python/paddle/v2/fluid/tests/test_sequence_erase_op.py b/python/paddle/v2/fluid/tests/unittests/test_sequence_erase_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_sequence_erase_op.py rename to python/paddle/v2/fluid/tests/unittests/test_sequence_erase_op.py diff --git a/python/paddle/v2/fluid/tests/test_sequence_expand.py b/python/paddle/v2/fluid/tests/unittests/test_sequence_expand.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_sequence_expand.py rename to python/paddle/v2/fluid/tests/unittests/test_sequence_expand.py diff --git a/python/paddle/v2/fluid/tests/test_sequence_reshape.py b/python/paddle/v2/fluid/tests/unittests/test_sequence_reshape.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_sequence_reshape.py rename to python/paddle/v2/fluid/tests/unittests/test_sequence_reshape.py diff --git a/python/paddle/v2/fluid/tests/test_sequence_slice_op.py b/python/paddle/v2/fluid/tests/unittests/test_sequence_slice_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_sequence_slice_op.py rename to python/paddle/v2/fluid/tests/unittests/test_sequence_slice_op.py diff --git a/python/paddle/v2/fluid/tests/test_sequence_softmax_op.py b/python/paddle/v2/fluid/tests/unittests/test_sequence_softmax_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_sequence_softmax_op.py rename to python/paddle/v2/fluid/tests/unittests/test_sequence_softmax_op.py diff --git a/python/paddle/v2/fluid/tests/test_sgd_op.py b/python/paddle/v2/fluid/tests/unittests/test_sgd_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_sgd_op.py rename to python/paddle/v2/fluid/tests/unittests/test_sgd_op.py diff --git a/python/paddle/v2/fluid/tests/test_shrink_rnn_memory.py b/python/paddle/v2/fluid/tests/unittests/test_shrink_rnn_memory.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_shrink_rnn_memory.py rename to python/paddle/v2/fluid/tests/unittests/test_shrink_rnn_memory.py diff --git a/python/paddle/v2/fluid/tests/test_sigmoid_cross_entropy_with_logits_op.py b/python/paddle/v2/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_sigmoid_cross_entropy_with_logits_op.py rename to python/paddle/v2/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py diff --git a/python/paddle/v2/fluid/tests/test_sign_op.py b/python/paddle/v2/fluid/tests/unittests/test_sign_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_sign_op.py rename to python/paddle/v2/fluid/tests/unittests/test_sign_op.py diff --git a/python/paddle/v2/fluid/tests/test_smooth_l1_loss_op.py b/python/paddle/v2/fluid/tests/unittests/test_smooth_l1_loss_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_smooth_l1_loss_op.py rename to python/paddle/v2/fluid/tests/unittests/test_smooth_l1_loss_op.py diff --git a/python/paddle/v2/fluid/tests/test_softmax_op.py b/python/paddle/v2/fluid/tests/unittests/test_softmax_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_softmax_op.py rename to python/paddle/v2/fluid/tests/unittests/test_softmax_op.py diff --git a/python/paddle/v2/fluid/tests/test_softmax_with_cross_entropy_op.py b/python/paddle/v2/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_softmax_with_cross_entropy_op.py rename to python/paddle/v2/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py diff --git a/python/paddle/v2/fluid/tests/test_split_and_merge_lod_tensor_op.py b/python/paddle/v2/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_split_and_merge_lod_tensor_op.py rename to python/paddle/v2/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py diff --git a/python/paddle/v2/fluid/tests/test_split_op.py b/python/paddle/v2/fluid/tests/unittests/test_split_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_split_op.py rename to python/paddle/v2/fluid/tests/unittests/test_split_op.py diff --git a/python/paddle/v2/fluid/tests/test_split_selected_rows_op.py b/python/paddle/v2/fluid/tests/unittests/test_split_selected_rows_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_split_selected_rows_op.py rename to python/paddle/v2/fluid/tests/unittests/test_split_selected_rows_op.py diff --git a/python/paddle/v2/fluid/tests/book_distribute/test_split_var.py b/python/paddle/v2/fluid/tests/unittests/test_split_var.py similarity index 100% rename from python/paddle/v2/fluid/tests/book_distribute/test_split_var.py rename to python/paddle/v2/fluid/tests/unittests/test_split_var.py diff --git a/python/paddle/v2/fluid/tests/test_spp_op.py b/python/paddle/v2/fluid/tests/unittests/test_spp_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_spp_op.py rename to python/paddle/v2/fluid/tests/unittests/test_spp_op.py diff --git a/python/paddle/v2/fluid/tests/test_squared_l2_distance_op.py b/python/paddle/v2/fluid/tests/unittests/test_squared_l2_distance_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_squared_l2_distance_op.py rename to python/paddle/v2/fluid/tests/unittests/test_squared_l2_distance_op.py diff --git a/python/paddle/v2/fluid/tests/test_squared_l2_norm_op.py b/python/paddle/v2/fluid/tests/unittests/test_squared_l2_norm_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_squared_l2_norm_op.py rename to python/paddle/v2/fluid/tests/unittests/test_squared_l2_norm_op.py diff --git a/python/paddle/v2/fluid/tests/test_sum_op.py b/python/paddle/v2/fluid/tests/unittests/test_sum_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_sum_op.py rename to python/paddle/v2/fluid/tests/unittests/test_sum_op.py diff --git a/python/paddle/v2/fluid/tests/test_switch.py b/python/paddle/v2/fluid/tests/unittests/test_switch.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_switch.py rename to python/paddle/v2/fluid/tests/unittests/test_switch.py diff --git a/python/paddle/v2/fluid/tests/test_target_assign_op.py b/python/paddle/v2/fluid/tests/unittests/test_target_assign_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_target_assign_op.py rename to python/paddle/v2/fluid/tests/unittests/test_target_assign_op.py diff --git a/python/paddle/v2/fluid/tests/test_tensor.py b/python/paddle/v2/fluid/tests/unittests/test_tensor.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_tensor.py rename to python/paddle/v2/fluid/tests/unittests/test_tensor.py diff --git a/python/paddle/v2/fluid/tests/test_top_k_op.py b/python/paddle/v2/fluid/tests/unittests/test_top_k_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_top_k_op.py rename to python/paddle/v2/fluid/tests/unittests/test_top_k_op.py diff --git a/python/paddle/v2/fluid/tests/test_transpose_op.py b/python/paddle/v2/fluid/tests/unittests/test_transpose_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_transpose_op.py rename to python/paddle/v2/fluid/tests/unittests/test_transpose_op.py diff --git a/python/paddle/v2/fluid/tests/test_uniform_random_op.py b/python/paddle/v2/fluid/tests/unittests/test_uniform_random_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_uniform_random_op.py rename to python/paddle/v2/fluid/tests/unittests/test_uniform_random_op.py diff --git a/python/paddle/v2/fluid/tests/test_unpool_op.py b/python/paddle/v2/fluid/tests/unittests/test_unpool_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_unpool_op.py rename to python/paddle/v2/fluid/tests/unittests/test_unpool_op.py diff --git a/python/paddle/v2/fluid/tests/test_variable.py b/python/paddle/v2/fluid/tests/unittests/test_variable.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_variable.py rename to python/paddle/v2/fluid/tests/unittests/test_variable.py diff --git a/python/paddle/v2/fluid/tests/test_warpctc_op.py b/python/paddle/v2/fluid/tests/unittests/test_warpctc_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_warpctc_op.py rename to python/paddle/v2/fluid/tests/unittests/test_warpctc_op.py diff --git a/python/paddle/v2/fluid/tests/test_weight_normalization.py b/python/paddle/v2/fluid/tests/unittests/test_weight_normalization.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_weight_normalization.py rename to python/paddle/v2/fluid/tests/unittests/test_weight_normalization.py diff --git a/python/paddle/v2/fluid/tests/test_while_op.py b/python/paddle/v2/fluid/tests/unittests/test_while_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_while_op.py rename to python/paddle/v2/fluid/tests/unittests/test_while_op.py -- GitLab From 549c74a9378593d81222238f393b3831cb7f55f1 Mon Sep 17 00:00:00 2001 From: "Yang Yang(Tony)" Date: Mon, 12 Feb 2018 17:44:55 -0800 Subject: [PATCH 100/217] Create parallel_do.md --- doc/design/parallel_do.md | 83 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) create mode 100644 doc/design/parallel_do.md diff --git a/doc/design/parallel_do.md b/doc/design/parallel_do.md new file mode 100644 index 0000000000..c41af8c413 --- /dev/null +++ b/doc/design/parallel_do.md @@ -0,0 +1,83 @@ +# Design Doc: Parallel_Do in PaddlePaddle + +In PaddlePaddle, we use parallel_do primitive to represent multithread data parallel processing. + +## Design overview + +The definition of a parallel_do op looks like the following + +```c++ +AddInput(kInputs, "Inputs needed to be split onto different devices").AsDuplicable(); +AddInput(kParameters, "Parameters are duplicated over different devices") + .AsDuplicable(); +AddInput(kPlaces, "Devices used for parallel processing"); +AddOutput(kOutputs, "Outputs needed to be merged from different devices").AsDuplicable(); +AddOutput(kParallelScopes, + "Container for all local variables in forward pass."); +AddAttr(kParallelBlock, + "List of operaters to be executed in parallel"); +``` + +A vanilla implementation of parallel_do can be shown as the following (`|` means single thread and +`||||` means multiple threads) + +``` +In the forward pass + | Split input onto different devices + | Copy parameter to onto different devices + |||| Compute forward pass in parallel + | Merge output from different devices + +In the backward pass + | Split output@grad onto different devices + |||| Compute backward pass in parallel + | accumulate param@grad from different devices to the first device + | Merge input@grad from different devices +``` + +This implementation allows to write mixed device program like this + +```python +# get embedding feature on CPU +feature = some_cpu_only_op(data) + +# parallel processing on multiple GPUs +pd = ParallelDo(gpu_places) +with pd.do(): + read_input(feature) + prediction = my_net(feature) + write_output(activation) +prediction = pd() +loss = cross_entropy(prediction, label) +``` + +## Proformance Imporvement + +There are serial places we can make this parallel_do faster. + +### forward: split input onto different devices + +If the input of the parallel_do is independent from any prior opeartors, we can avoid this step by +prefetching the input onto different devices in a seperate background thread. And the python code +looks like this. +```python +pd = ParallelDo(gpu_places) +with pd.do(): + feature = pre_fetch(gpu_places) + prediction = my_net(feature) + write_output(activation) +``` + +### forward: Copy parameter to onto different devices + +We can avoid this step by making each device have a copy of the parameter. This requires: + +1. `fluid.default_start_up_program()` to be run on all devices +1. In the backward, allreduce param@grad at different devices, this requires + 1. `backward.py` add `allreduce` operators at parallel_do_grad + 1. `allreduce` operators need to be called in async mode to achieve maximum throughput +1. apply gradients related op(i.e. cliping, normalization, decay, sgd) on different devices in parallel + +By doing so, we also avoided "backward: accumulate param@grad from different devices to the first device" + + -- GitLab From ad2dfef4168b31048b777ca5eb328a0368bd74ba Mon Sep 17 00:00:00 2001 From: "Yang Yang(Tony)" Date: Mon, 12 Feb 2018 17:56:28 -0800 Subject: [PATCH 101/217] Update parallel_do.md --- doc/design/parallel_do.md | 76 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 75 insertions(+), 1 deletion(-) diff --git a/doc/design/parallel_do.md b/doc/design/parallel_do.md index c41af8c413..576d30329b 100644 --- a/doc/design/parallel_do.md +++ b/doc/design/parallel_do.md @@ -41,6 +41,7 @@ This implementation allows to write mixed device program like this # get embedding feature on CPU feature = some_cpu_only_op(data) +gpu_places = get_place(use_gpu=True) # parallel processing on multiple GPUs pd = ParallelDo(gpu_places) with pd.do(): @@ -51,6 +52,38 @@ prediction = pd() loss = cross_entropy(prediction, label) ``` +And the programDesc are like the following + +``` +# start_program will be run by executor(CPUPlace), all w1, w2 will be allocated on CPU +start_program +{ + vars: w1, w2 + ops: init(w1), init(w2) +} + +main_program +{ +block0 { + vars: data, places, w1, w2 + ops: data, get_place, parallel_do(block1), + parallel_do_grad(block2), + sgd(w2, w2_grad), + sgd(w1, w1_grad) +} +block1 { + vars: data, h1, h2, loss + ops: fc, fc, softmax +} +block2 { + vars: data_grad, h1_grad, h2_grad, loss_gard, w1_grad, w2_grad + ops: softmax_grad, + fc_grad + fc_grad +} +} +``` + ## Proformance Imporvement There are serial places we can make this parallel_do faster. @@ -78,6 +111,47 @@ We can avoid this step by making each device have a copy of the parameter. This 1. `allreduce` operators need to be called in async mode to achieve maximum throughput 1. apply gradients related op(i.e. cliping, normalization, decay, sgd) on different devices in parallel -By doing so, we also avoided "backward: accumulate param@grad from different devices to the first device" +By doing so, we also avoided "backward: accumulate param@grad from different devices to the first device". +And the ProgramDesc looks like the following + +``` +# w1, w2 will be allocated on all GPUs +start_program +{ +block0 { + parallel_do(block1) +} +block1 { + vars: w1, w2 + ops: init(w1), init(w2) +} +} + +main_program +{ +block0 { + vars: data, places, w1, w2 + ops: data, get_place, parallel_do(block1), + parallel_do_grad(block2), # append_backward + parallel_do(block3) # append_optimization + +} +block1 { + vars: data, h1, h2, loss + ops: fc, fc, softmax +} +block2 { + vars: data_grad, h1_grad, h2_grad, loss_gard, w1_grad, w2_grad + ops: softmax_grad, + fc_grad, allreduce(places, scopes, w1_grad), + fc_grad, allreduce(places, scopes, w2_grad) +} +block3 { + vars: lr + ops: sgd(w2, w2_grad), + sgd(w1, w1_grad) +} +} +``` -- GitLab From ed5dc3d4ae705c879369774e1491aee6967426a2 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Tue, 13 Feb 2018 10:49:45 +0800 Subject: [PATCH 102/217] remove 'friend lod_tensor in tensor' --- paddle/fluid/framework/tensor.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/paddle/fluid/framework/tensor.h b/paddle/fluid/framework/tensor.h index f95af384eb..6f878541e6 100644 --- a/paddle/fluid/framework/tensor.h +++ b/paddle/fluid/framework/tensor.h @@ -130,8 +130,6 @@ class Tensor { inline void set_layout(const DataLayout layout) { layout_ = layout; } private: - friend class LoDTensor; - /** * @note Placeholder hides type T, so it doesn't appear as a template * parameter of Variable. -- GitLab From 8b01546fe44c5c42bc4a72958ed06d04178854f5 Mon Sep 17 00:00:00 2001 From: QI JUN Date: Tue, 13 Feb 2018 11:48:20 +0800 Subject: [PATCH 103/217] fix compile warning (#8430) --- paddle/fluid/operators/detection_map_op.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/operators/detection_map_op.h b/paddle/fluid/operators/detection_map_op.h index 39d17a7cb3..92e0510839 100644 --- a/paddle/fluid/operators/detection_map_op.h +++ b/paddle/fluid/operators/detection_map_op.h @@ -151,7 +151,7 @@ class DetectionMAPOpKernel : public framework::OpKernel { for (int n = 0; n < batch_size; ++n) { std::map> boxes; - for (int i = label_index[n]; i < label_index[n + 1]; ++i) { + for (size_t i = label_index[n]; i < label_index[n + 1]; ++i) { Box box(labels(i, 2), labels(i, 3), labels(i, 4), labels(i, 5)); int label = labels(i, 0); auto is_difficult = labels(i, 1); @@ -167,7 +167,7 @@ class DetectionMAPOpKernel : public framework::OpKernel { auto detect_index = detect_lod[0]; for (int n = 0; n < batch_size; ++n) { std::map>> boxes; - for (int i = detect_index[n]; i < detect_index[n + 1]; ++i) { + for (size_t i = detect_index[n]; i < detect_index[n + 1]; ++i) { Box box(detect(i, 2), detect(i, 3), detect(i, 4), detect(i, 5)); int label = detect(i, 0); auto score = detect(i, 1); @@ -269,8 +269,8 @@ class DetectionMAPOpKernel : public framework::OpKernel { std::map>>& pos) { const T* pos_data = pos_tensor.data(); auto pos_data_lod = pos_tensor.lod(); - for (int i = 0; i < pos_data_lod.size(); ++i) { - for (int j = pos_data_lod[0][i]; j < pos_data_lod[0][i + 1]; ++j) { + for (size_t i = 0; i < pos_data_lod.size(); ++i) { + for (size_t j = pos_data_lod[0][i]; j < pos_data_lod[0][i + 1]; ++j) { T score = pos_data[j * 2]; int flag = 1; if (pos_data[j * 2 + 1] < kEPS) flag = 0; -- GitLab From 8acad27e8d74ee17f7ded7c0d1f0dc0df1633637 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Tue, 13 Feb 2018 14:11:07 +0800 Subject: [PATCH 104/217] refine code --- paddle/fluid/operators/listen_and_serv_op.cc | 15 +- paddle/fluid/operators/recv_op.cc | 4 +- paddle/fluid/operators/send_op.cc | 4 +- .../paddle/v2/fluid/distribute_transpiler.py | 392 +++++++++--------- 4 files changed, 211 insertions(+), 204 deletions(-) diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index 9c33667847..4409df4995 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -75,8 +75,8 @@ class ListenAndServOp : public framework::OperatorBase { server_thread_->join(); } - void Run(const framework::Scope &scope, - const platform::Place &dev_place) const override { + void RunImpl(const framework::Scope &scope, + const platform::Place &dev_place) const override { platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(dev_place); framework::Scope &recv_scope = scope.NewScope(); @@ -101,7 +101,6 @@ class ListenAndServOp : public framework::OperatorBase { // the gradients arrives, just add suffix 0~n and merge the gradient. rpc_service_->SetCond(0); size_t recv_var_cnt = 0; - size_t update_param_cnt = 0; int batch_barrier = 0; while (batch_barrier != fan_in) { const detail::MessageWithName &v = rpc_service_->Get(); @@ -128,29 +127,26 @@ class ListenAndServOp : public framework::OperatorBase { } } } - VLOG(3) << "recv " << recv_var_cnt << " parmeters for one barrier."; if (exit_flag) { rpc_service_->ShutDown(); } - VLOG(3) << "run optimize graph..."; try { executor.Run(*program, &recv_scope, block->ID(), /*global_block*/ false /*create_local_scope*/, false /*create_vars*/); } catch (std::exception &e) { LOG(ERROR) << "run sub program error " << e.what(); } - // Reset the received sparse variables, the sum operator would not // sum the input sparse variables which rows is empty at the next // mini-batch. - // TOOD(Yancey1989): move the reset action into an operator, we couldn't + // TODO(Yancey1989): move the reset action into an operator, we couldn't // have any hide logic in the operator. for (auto &var : sparse_vars) { var->GetMutable()->mutable_rows()->clear(); } rpc_service_->SetCond(1); - rpc_service_->WaitClientGet(update_param_cnt); - grads_counter_.clear(); + // FIXME(typhoonzero): use another condition to sync wait clients get. + rpc_service_->WaitClientGet(ins.size()); sparse_vars.clear(); } // while(true) } @@ -158,7 +154,6 @@ class ListenAndServOp : public framework::OperatorBase { protected: std::shared_ptr rpc_service_; std::shared_ptr server_thread_; - mutable std::unordered_map grads_counter_; }; class ListenAndServOpMaker : public framework::OpProtoAndCheckerMaker { diff --git a/paddle/fluid/operators/recv_op.cc b/paddle/fluid/operators/recv_op.cc index c093f60cee..17b57b5d45 100644 --- a/paddle/fluid/operators/recv_op.cc +++ b/paddle/fluid/operators/recv_op.cc @@ -32,8 +32,8 @@ class RecvOp : public framework::OperatorBase { const framework::AttributeMap& attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope& scope, - const platform::Place& place) const override { + void RunImpl(const framework::Scope& scope, + const platform::Place& place) const override { auto outs = Outputs("Out"); std::vector epmap = Attr>("epmap"); diff --git a/paddle/fluid/operators/send_op.cc b/paddle/fluid/operators/send_op.cc index b241f738cb..39b6c0e8c5 100644 --- a/paddle/fluid/operators/send_op.cc +++ b/paddle/fluid/operators/send_op.cc @@ -48,8 +48,8 @@ class SendOp : public framework::OperatorBase { const framework::AttributeMap& attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const framework::Scope& scope, - const platform::Place& place) const override { + void RunImpl(const framework::Scope& scope, + const platform::Place& place) const override { auto ins = Inputs("X"); auto outs = Outputs("Out"); std::vector epmap = Attr>("epmap"); diff --git a/python/paddle/v2/fluid/distribute_transpiler.py b/python/paddle/v2/fluid/distribute_transpiler.py index 41630998cf..81317df983 100644 --- a/python/paddle/v2/fluid/distribute_transpiler.py +++ b/python/paddle/v2/fluid/distribute_transpiler.py @@ -147,6 +147,21 @@ class DistributeTranspiler: Use different methods to split trainable variables to different parameter servers. + Steps to transpile trainer: + 1. split variable to multiple blocks, aligned by product(dim[1:]) (width). + 2. rename splited grad variables to add trainer_id suffix ".trainer_%d". + 3. modify trainer program add split_op to each grad variable. + 4. append send_op to send splited variables to server and fetch + params(splited blocks or origin param) from server. + 5. append concat_op to merge splited blocks to update local weights. + + Steps to transpile pserver: + 1. create new program for parameter server. + 2. create params and grad variables that assigned to current server instance. + 3. create a sub-block in the server side program + 4. append ops that should run on current server instance. + 5. add listen_and_serv op + :param optimize_ops: op list of optimization, should be the return value of Optimizer.minimize :type optimize_ops: list @@ -154,7 +169,7 @@ class DistributeTranspiler: :type params_grads: list :param trainer_id: one unique id for each trainer in a job. :type trainer_id: int - :param program: program to optimize, default is default_main_program + :param program: program to transpile, default is default_main_program :type program: Program :param pservers: parameter server endpoints like "m1:6174,m2:6174" :type pservers: string @@ -174,27 +189,15 @@ class DistributeTranspiler: # like Kubernetes, we should port this to use etcd later when developing # fluid distributed training with fault-tolerance. self.trainer_id = trainer_id - - # steps to transpile: - # 1. split variable to multiple blocks, aligned by product(dim[1:]) (width). - # 2. modify trainer program add split_op to each Grad. - # 3. append send_op to trainer. - # 4. append concat_op to trainer to update local weights. - # 5. create new program for parameter server. - # 6. create parameter server program by split_method generated endpoint->VarBlock - # 7. update startup_program, rename variables to variables with trainer_id - pserver_endpoints = pservers.split(",") # step1 param_list = [pg[0] for pg in params_grads] grad_list = [pg[1] for pg in params_grads] - # TODO: add split selected rows support grad_blocks = split_dense_variable(grad_list, len(pserver_endpoints)) param_blocks = split_dense_variable(param_list, len(pserver_endpoints)) # step2 grad_var_mapping = self._append_split_op(program, grad_blocks) - # step3 send_inputs = [] send_outputs = [] @@ -222,12 +225,12 @@ class DistributeTranspiler: rpc_client_var = program.global_block().create_var( name="RPC_CLIENT_VAR", - psersistable=True, + persistable=True, dtype='float32', # dtype and shape is not used in fact shape=[0]) # create send_op - send_op = program.global_block().append_op( + program.global_block().append_op( type="send", inputs={"X": send_inputs}, outputs={"Out": send_outputs, @@ -239,23 +242,158 @@ class DistributeTranspiler: if len(splited_var) <= 1: continue orig_param = program.global_block().vars[varname] - concat = program.global_block().append_op( + program.global_block().append_op( type="concat", inputs={"X": splited_var}, outputs={"Out": [orig_param]}, attrs={"axis": 0}) - # step 7 - startup_prog = default_startup_program() - for varname in startup_prog.global_block().vars.keys(): - if varname in param_var_mapping and \ - len(param_var_mapping[varname]) == 1: - new_var_name = "%s.trainer_%d" % \ - (varname, self.trainer_id) - startup_prog.global_block().rename_var(varname, new_var_name) - - def _create_vars_from_blocklist(self, program, block_list): - # Create respective variables using the block_list + def get_trainer_program(self): + # remove optimize ops and add a send op to main_program + self.program.global_block().delete_ops(self.optimize_ops) + return self.program + + def get_pserver_program(self, endpoint): + """ + Get pserver side program using the endpoint. + NOTE: assume blocks of the same variable is not distributed + on the same pserver, only change param/grad varnames for + trainers to fetch. + """ + # step1 + pserver_program = Program() + # step2 + recv_inputs = [] + for v in self.param_grad_ep_mapping[endpoint]["params"]: + self._clone_var(pserver_program.global_block(), v) + for v in self.param_grad_ep_mapping[endpoint]["grads"]: + # create vars for each trainer in global scope, so + # we don't need to create them when grad arrives. + # change client side var name to origin name by + # removing ".trainer_%d" suffix + suff_idx = v.name.find(".trainer_") + if suff_idx >= 0: + orig_var_name = v.name[:suff_idx] + pserver_program.global_block().create_var( + name=orig_var_name, + persistable=True, + dtype=v.dtype, + shape=v.shape) + print("create origin var: ", orig_var_name) + for trainer_id in xrange(self.trainers): + var = pserver_program.global_block().create_var( + name="%s.trainer_%d" % (orig_var_name, trainer_id), + persistable=False, + dtype=v.dtype, + shape=v.shape) + recv_inputs.append(var) + print("create per trainer var: ", var.name) + # step3 + optimize_block = pserver_program.create_block(0) + # step 4 + # Create a union-find data struct from optimize ops, + # If two ops are connected, we could add these two ops + # into one set. + ufind = self._create_ufind(self.optimize_ops) + # step 4.2 + # Iterate through the ops and append optimize op which + # located on current pserver + opt_op_on_pserver = [] + for _, op in enumerate(self.optimize_ops): + if self._is_opt_op(op) and self._is_opt_op_on_pserver(endpoint, op): + opt_op_on_pserver.append(op) + # step 4.3 + # Iterate through the ops, and if an op and the optimize ops + # which located on current pserver are in one set, then + # append it into the sub program. + for _, op in enumerate(self.optimize_ops): + for _, opt_op in enumerate(opt_op_on_pserver): + if ufind.is_connected(op, opt_op): + if self._is_opt_op(op): + self._append_pserver_ops(optimize_block, op, endpoint) + else: + self._append_pserver_non_opt_ops(optimize_block, op) + break + # step5 append the listen_and_serv op + pserver_program.global_block().append_op( + type="listen_and_serv", + inputs={'X': recv_inputs}, + outputs={}, + attrs={ + "OptimizeBlock": optimize_block, + "endpoint": endpoint, + "Fanin": self.trainers + }) + pserver_program.sync_with_cpp() + return pserver_program + + def get_startup_program(self, endpoint, pserver_program): + """ + Get startup program for current parameter server. + Modify operator input variables if there are variables that + were split to several blocks. + """ + s_prog = Program() + orig_s_prog = framework.default_startup_program() + params = self.param_grad_ep_mapping[endpoint]["params"] + + def _get_splited_name_and_shape(varname): + for idx, splited_param in enumerate(params): + pname = splited_param.name + if same_or_split_var(pname, varname) and varname != pname: + return pname, splited_param.shape + return "", [] + + # 1. create vars in pserver program to startup program + pserver_vars = pserver_program.global_block().vars + created_var_map = dict() + for _, var in pserver_vars.iteritems(): + tmpvar = s_prog.global_block().create_var( + name=var.name, + persistable=var.persistable, + dtype=var.dtype, + shape=var.shape) + created_var_map[var.name] = tmpvar + + # 2. rename op outputs + for op in orig_s_prog.global_block().ops: + new_inputs = dict() + new_outputs = dict() + # do not append startup op if var is not on this pserver + op_on_pserver = False + for key in op.output_names: + newname, _ = _get_splited_name_and_shape(op.output(key)[0]) + if newname: + op_on_pserver = True + new_outputs[key] = created_var_map[newname] + elif op.output(key)[0] in pserver_vars: + op_on_pserver = True + new_outputs[key] = pserver_vars[op.output(key)[0]] + + # most startup program ops have no inputs + new_inputs = self._get_input_map_from_op(pserver_vars, op) + + if op_on_pserver: + if op.type in [ + "gaussian_random", "fill_constant", "uniform_random" + ]: + op.attrs["shape"] = new_outputs["Out"].shape + s_prog.global_block().append_op( + type=op.type, + inputs=new_inputs, + outputs=new_outputs, + attrs=op.attrs) + return s_prog + + # ====================== private transpiler functions ===================== + def _create_vars_from_blocklist(self, + program, + block_list, + add_trainer_suffix=False): + """ + NOTE: only grads need to be named for different trainers, use + add_trainer_suffix to rename the grad vars. + """ block_map = dict() var_mapping = dict() for block_str in block_list: @@ -266,12 +404,15 @@ class DistributeTranspiler: for varname, splited in block_map.iteritems(): orig_var = program.global_block().var(varname) if len(splited) == 1: - # rename var to the trainer_id var - new_var_name = "%s.trainer_%d" % \ - (orig_var.name, self.trainer_id) - program.global_block().rename_var(varname, new_var_name) - var_mapping[varname] = \ - [program.global_block().var(new_var_name)] + if add_trainer_suffix: + new_var_name = "%s.trainer_%d" % \ + (orig_var.name, self.trainer_id) + program.global_block().rename_var(varname, new_var_name) + var_mapping[varname] = \ + [program.global_block().var(new_var_name)] + else: + var_mapping[varname] = \ + [program.global_block().var(orig_var.name)] continue var_mapping[varname] = [] @@ -286,10 +427,16 @@ class DistributeTranspiler: splited_shape = [rows] if len(orig_shape) >= 2: splited_shape.extend(orig_shape[1:]) + new_var_name = "" + if add_trainer_suffix: + new_var_name = "%s.block%d.trainer_%d" % \ + (varname, i, self.trainer_id) + else: + new_var_name = "%s.block%d" % \ + (varname, i) var = program.global_block().create_var( - name="%s.block%d.trainer_%d" % - (varname, i, self.trainer_id), - psersistable=False, + name=new_var_name, + persistable=False, dtype=orig_var.dtype, type=orig_var.type, shape=splited_shape) # flattend splited var @@ -305,13 +452,12 @@ class DistributeTranspiler: dtype=var.dtype, type=var.type, lod_level=var.lod_level, - # HACK: let all param in pserver be persistable so the child - # program in recv can get them persistable=True) def _append_split_op(self, program, gradblocks): # Split variables that need to be split and append respective ops - var_mapping = self._create_vars_from_blocklist(program, gradblocks) + var_mapping = self._create_vars_from_blocklist( + program, gradblocks, add_trainer_suffix=True) for varname, splited_vars in var_mapping.iteritems(): # variable that don't need to split have empty splited_vars if len(splited_vars) <= 1: @@ -341,24 +487,6 @@ class DistributeTranspiler: "[LOD_TENSOR, SELECTED_ROWS]") return var_mapping - def get_trainer_program(self): - # remove optimize ops and add a send op to main_program - self.program.global_block().delete_ops(self.optimize_ops) - return self.program - - def _create_var_for_trainers(self, block, var, trainers): - # For each trainer, create the necessary variables - var_list = [] - for i in xrange(trainers): - var_each = block.create_var( - name="%s.trainer_%d" % (var.name, i), - psersistable=var.persistable, - dtype=var.dtype, - type=var.type, - shape=var.shape) - var_list.append(var_each) - return var_list - def _get_optimizer_input_shape(self, op_type, varkey, orig_shape, param_shape): """ @@ -386,6 +514,13 @@ class DistributeTranspiler: pass return orig_shape + def _orig_varname(self, varname): + suff_idx = varname.find(".trainer_") + orig_var_name = "" + if suff_idx >= 0: + orig_var_name = varname[:suff_idx] + return orig_var_name + def _append_pserver_ops(self, optimize_block, opt_op, endpoint): program = optimize_block.program pserver_block = program.global_block() @@ -396,18 +531,23 @@ class DistributeTranspiler: if key == "Grad": grad_block = None for g in self.param_grad_ep_mapping[endpoint]["grads"]: - if same_or_split_var(g.name, opt_op.input(key)[0]): + if same_or_split_var( + self._orig_varname(g.name), opt_op.input(key)[0]): grad_block = g break if not grad_block: # do not append this op if current endpoint # is not dealing with this grad block return - merged_var = pserver_block.vars[grad_block.name] - # append merging ops if trainers > 1 + merged_var = \ + pserver_block.vars[self._orig_varname(grad_block.name)] if self.trainers > 1: - vars2merge = self._create_var_for_trainers( - pserver_block, grad_block, self.trainers) + vars2merge = [] + for i in xrange(self.trainers): + per_trainer_name = "%s.trainer_%d" % \ + (self._orig_varname(grad_block.name), i) + vars2merge.append(pserver_block.vars[per_trainer_name]) + optimize_block.append_op( type="sum", inputs={"X": vars2merge}, @@ -550,76 +690,6 @@ class DistributeTranspiler: return False return False - def get_pserver_program(self, endpoint): - """ - Get pserver side program using the endpoint - - NOTE: assume blocks of the same variable is not distributed - on the same pserver, only change param/grad varnames for - trainers to fetch. For each pserver endpoint, server side - program must be a sub-set of the original optimization program. - """ - # step5 - pserver_program = Program() - recv_inputs = [] - for v in self.param_grad_ep_mapping[endpoint]["params"]: - self._clone_var(pserver_program.global_block(), v) - for v in self.param_grad_ep_mapping[endpoint]["grads"]: - # create vars for each trainer in global scope, so - # we don't need to create them when grad arrives. - pserver_program.global_block().create_var( - name=v.name, persistable=True, dtype=v.dtype, shape=v.shape) - for trainer_id in xrange(self.trainers): - # change client side var name to origin name by - # removing ".trainer_%d" suffix - suff_idx = v.name.find(".trainer_") - if suff_idx >= 0: - orig_var_name = v.name[:suff_idx] - var = pserver_program.global_block().create_var( - name="%s.trainer_%d" % (orig_var_name, trainer_id), - persistable=True, - dtype=v.dtype, - shape=v.shape) - recv_inputs.append(var) - # step6 - optimize_block = pserver_program.create_block(0) - # step 6.1 - # Create a union-find data struct by optimize ops, - # If two ops are connected, we could add these two ops - # into one set. - ufind = self._create_ufind(self.optimize_ops) - # step 6.2 - # Iterate through the ops and append optimize op which - # located on current pserver - opt_op_on_pserver = [] - for _, op in enumerate(self.optimize_ops): - if self._is_opt_op(op) and self._is_opt_op_on_pserver(endpoint, op): - opt_op_on_pserver.append(op) - # step 6.3 - # Iterate through the ops, and if an op and the optimize ops - # which located on current pserver are in one set, then - # append it into the sub program. - for _, op in enumerate(self.optimize_ops): - for _, opt_op in enumerate(opt_op_on_pserver): - if ufind.is_connected(op, opt_op): - if self._is_opt_op(op): - self._append_pserver_ops(optimize_block, op, endpoint) - else: - self._append_pserver_non_opt_ops(optimize_block, op) - break - # Append the listen_and_serv op - pserver_program.global_block().append_op( - type="listen_and_serv", - inputs={'X': recv_inputs}, - outputs={}, - attrs={ - "OptimizeBlock": optimize_block, - "endpoint": endpoint, - "Fanin": self.trainers - }) - pserver_program.sync_with_cpp() - return pserver_program - def _get_input_map_from_op(self, varmap, op): iomap = dict() for key in op.input_names: @@ -643,61 +713,3 @@ class DistributeTranspiler: else: iomap[key] = vars return iomap - - def get_startup_program(self, endpoint, pserver_program): - """ - Get startup program for current parameter server. - Modify operator input variables if there are variables that - were split to several blocks. - """ - s_prog = Program() - orig_s_prog = framework.default_startup_program() - params = self.param_grad_ep_mapping[endpoint]["params"] - - def _get_splited_name_and_shape(varname): - for idx, splited_param in enumerate(params): - pname = splited_param.name - if same_or_split_var(pname, varname) and varname != pname: - return pname, splited_param.shape - return "", [] - - # 1. create vars in pserver program to startup program - pserver_vars = pserver_program.global_block().vars - created_var_map = dict() - for _, var in pserver_vars.iteritems(): - tmpvar = s_prog.global_block().create_var( - name=var.name, - persistable=var.persistable, - dtype=var.dtype, - shape=var.shape) - created_var_map[var.name] = tmpvar - - # 2. rename op outputs - for op in orig_s_prog.global_block().ops: - new_inputs = dict() - new_outputs = dict() - # do not append startup op if var is not on this pserver - op_on_pserver = False - for key in op.output_names: - newname, _ = _get_splited_name_and_shape(op.output(key)[0]) - if newname: - op_on_pserver = True - new_outputs[key] = created_var_map[newname] - elif op.output(key)[0] in pserver_vars: - op_on_pserver = True - new_outputs[key] = pserver_vars[op.output(key)[0]] - - # most startup program ops have no inputs - new_inputs = self._get_input_map_from_op(pserver_vars, op) - - if op_on_pserver: - if op.type in [ - "gaussian_random", "fill_constant", "uniform_random" - ]: - op.attrs["shape"] = new_outputs["Out"].shape - s_prog.global_block().append_op( - type=op.type, - inputs=new_inputs, - outputs=new_outputs, - attrs=op.attrs) - return s_prog -- GitLab From ebb2bcfe0a8098a2c7b5b8646a4ca82cfddc1c30 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Tue, 13 Feb 2018 14:25:54 +0800 Subject: [PATCH 105/217] remove comments --- paddle/fluid/framework/block_desc.cc | 7 ------- 1 file changed, 7 deletions(-) diff --git a/paddle/fluid/framework/block_desc.cc b/paddle/fluid/framework/block_desc.cc index 11d1fbec4b..fbedd6c825 100644 --- a/paddle/fluid/framework/block_desc.cc +++ b/paddle/fluid/framework/block_desc.cc @@ -51,14 +51,7 @@ VarDesc *BlockDesc::RenameVar(const std::string &old_name, auto *var = this->Var(old_name); VarDesc *new_var = new VarDesc(*(var->Proto())); new_var->SetName(new_name); - // new_var->SetShape(var->GetShape()); - // new_var->SetType(var->GetType()); - // new_var->SetDataType(var->GetDataType()); - // new_var->SetLoDLevel(var->GetLoDLevel()); - // new_var->SetPersistable(var->Persistable()); - vars_[new_name].reset(new_var); - // rename inputs and outputs for (const auto &op : ops_) { auto *it = op.get(); -- GitLab From dafc7e3643798a13e32f20ee6c6014cfa40bbe0c Mon Sep 17 00:00:00 2001 From: emailweixu Date: Tue, 13 Feb 2018 09:52:18 -0800 Subject: [PATCH 106/217] Check data format consistency in data_feeder (#8417) --- python/paddle/v2/fluid/data_feeder.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/python/paddle/v2/fluid/data_feeder.py b/python/paddle/v2/fluid/data_feeder.py index f9e2f3e6a2..070bcadd71 100644 --- a/python/paddle/v2/fluid/data_feeder.py +++ b/python/paddle/v2/fluid/data_feeder.py @@ -107,6 +107,9 @@ class DataFeeder(object): dtype=dtype)) for each_sample in iterable: + assert len(each_sample) == len(converter), ( + "The number of fields in data (%s) does not match " + + "len(feed_list) (%s)") % (len(each_sample), len(converter)) for each_converter, each_slot in six.zip(converter, each_sample): each_converter.feed(each_slot) ret_dict = {} -- GitLab From c62ef22da37924d5cc08635b490c86910c474fa6 Mon Sep 17 00:00:00 2001 From: "Yang Yang(Tony)" Date: Tue, 13 Feb 2018 13:50:22 -0800 Subject: [PATCH 107/217] Update parallel_do.md --- doc/design/parallel_do.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/design/parallel_do.md b/doc/design/parallel_do.md index 576d30329b..5b90b50792 100644 --- a/doc/design/parallel_do.md +++ b/doc/design/parallel_do.md @@ -72,10 +72,12 @@ block0 { sgd(w1, w1_grad) } block1 { + parent_block: 0 vars: data, h1, h2, loss ops: fc, fc, softmax } block2 { + parent_block: 1 vars: data_grad, h1_grad, h2_grad, loss_gard, w1_grad, w2_grad ops: softmax_grad, fc_grad @@ -122,6 +124,7 @@ block0 { parallel_do(block1) } block1 { + parent_block: 0 vars: w1, w2 ops: init(w1), init(w2) } @@ -137,16 +140,19 @@ block0 { } block1 { + parent_block: 0 vars: data, h1, h2, loss ops: fc, fc, softmax } block2 { + parent_block: 1 vars: data_grad, h1_grad, h2_grad, loss_gard, w1_grad, w2_grad ops: softmax_grad, fc_grad, allreduce(places, scopes, w1_grad), fc_grad, allreduce(places, scopes, w2_grad) } block3 { + parent_block: 0 vars: lr ops: sgd(w2, w2_grad), sgd(w1, w1_grad) -- GitLab From 87f4311a88694284d95e81460a5adeda47e4367d Mon Sep 17 00:00:00 2001 From: "Yang Yang(Tony)" Date: Tue, 13 Feb 2018 14:05:33 -0800 Subject: [PATCH 108/217] compile with nccl2 (#8411) * compile with nccl2 * add ncclGroup; it is necessary in nccl2 * add back libnccl-dev --- CMakeLists.txt | 1 - paddle/fluid/platform/CMakeLists.txt | 2 +- paddle/fluid/platform/dynload/CMakeLists.txt | 2 +- paddle/fluid/platform/nccl_test.cu | 2 ++ paddle/scripts/docker/build.sh | 5 ++++- 5 files changed, 8 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index fb91e3b369..5db5c228be 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -142,7 +142,6 @@ include(external/boost) # download boost include(external/any) # download libn::any include(external/eigen) # download eigen3 include(external/pybind11) # download pybind11 -include(external/nccl) include(external/cares) include(external/grpc) diff --git a/paddle/fluid/platform/CMakeLists.txt b/paddle/fluid/platform/CMakeLists.txt index 32e768fdf4..0d0cee21d1 100644 --- a/paddle/fluid/platform/CMakeLists.txt +++ b/paddle/fluid/platform/CMakeLists.txt @@ -1,5 +1,5 @@ if(WITH_GPU) - cc_library(enforce SRCS enforce.cc DEPS nccl) + cc_library(enforce SRCS enforce.cc DEPS) else() cc_library(enforce SRCS enforce.cc) endif() diff --git a/paddle/fluid/platform/dynload/CMakeLists.txt b/paddle/fluid/platform/dynload/CMakeLists.txt index cf2081b434..264b4ebf2c 100644 --- a/paddle/fluid/platform/dynload/CMakeLists.txt +++ b/paddle/fluid/platform/dynload/CMakeLists.txt @@ -1,4 +1,4 @@ cc_library(dynamic_loader SRCS dynamic_loader.cc DEPS glog gflags enforce) nv_library(dynload_cuda SRCS cublas.cc cudnn.cc curand.cc nccl.cc - DEPS dynamic_loader nccl) + DEPS dynamic_loader) cc_library(dynload_warpctc SRCS warpctc.cc DEPS dynamic_loader warpctc) diff --git a/paddle/fluid/platform/nccl_test.cu b/paddle/fluid/platform/nccl_test.cu index 7123035363..212ea8517e 100644 --- a/paddle/fluid/platform/nccl_test.cu +++ b/paddle/fluid/platform/nccl_test.cu @@ -89,6 +89,7 @@ TEST(NCCL, all_reduce) { VLOG(1) << "Invoking ncclAllReduce"; + dynload::ncclGroupStart(); for (int i = 0; i < dev_count; ++i) { VLOG(1) << "Invoking ncclAllReduce with device " << i; SetDeviceId(i); @@ -97,6 +98,7 @@ TEST(NCCL, all_reduce) { ncclSum, comms[i], data[i]->dev_ctx.stream())); VLOG(1) << "Invoked ncclAllReduce for device " << i; } + dynload::ncclGroupEnd(); VLOG(1) << "Invoked ncclAllReduce"; diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index 442a7ea883..56fa138786 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -34,6 +34,7 @@ function cmake_gen() { Configuring cmake in /paddle/build ... -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE:-Release} ${PYTHON_FLAGS} + -DWITH_DSO=ON -DWITH_DOC=OFF -DWITH_GPU=${WITH_GPU:-OFF} -DWITH_DISTRIBUTE=${WITH_DISTRIBUTE:-OFF} @@ -57,6 +58,7 @@ EOF cmake .. \ -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE:-Release} \ ${PYTHON_FLAGS} \ + -DWITH_DSO=ON \ -DWITH_DOC=OFF \ -DWITH_GPU=${WITH_GPU:-OFF} \ -DWITH_DISTRIBUTE=${WITH_DISTRIBUTE:-OFF} \ @@ -171,7 +173,7 @@ EOF if [[ ${WITH_GPU} == "ON" ]]; then NCCL_DEPS="apt-get install -y libnccl-dev &&" else - NCCL_DEPS="" + NCCL_DEPS="" fi cat >> /paddle/build/Dockerfile < Date: Tue, 13 Feb 2018 22:09:05 +0000 Subject: [PATCH 109/217] pass compile --- paddle/fluid/framework/executor.cc | 2 +- paddle/fluid/operators/nccl_op.cc | 2 +- python/paddle/v2/fluid/backward.py | 27 +++++++++++++++------------ 3 files changed, 17 insertions(+), 14 deletions(-) diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index 1d7eccbc65..92b32b04d6 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -55,7 +55,7 @@ static void CreateTensor(Variable* var, proto::VarType::Type var_type) { var->GetMutable(); } else if (var_type == proto::VarType::READER) { var->GetMutable(); - } else if (var_type == proto::VarDesc::NCCL_COM) { + } else if (var_type == proto::VarType::NCCL_COM) { // GetMutable will be called in ncclInit } else { PADDLE_THROW( diff --git a/paddle/fluid/operators/nccl_op.cc b/paddle/fluid/operators/nccl_op.cc index f61b5003bd..0994bba782 100644 --- a/paddle/fluid/operators/nccl_op.cc +++ b/paddle/fluid/operators/nccl_op.cc @@ -65,7 +65,7 @@ class NCCLInitOpVarTypeInference : public framework::VarTypeInference { framework::BlockDesc *block) const override { auto out_var_name = op_desc.Output("Communicator").front(); auto &out_var = block->FindRecursiveOrCreateVar(out_var_name); - auto var_type = framework::proto::VarDesc::NCCL_COM; + auto var_type = framework::proto::VarType::NCCL_COM; out_var.SetType(var_type); } }; diff --git a/python/paddle/v2/fluid/backward.py b/python/paddle/v2/fluid/backward.py index cf32c6683b..682df3301b 100644 --- a/python/paddle/v2/fluid/backward.py +++ b/python/paddle/v2/fluid/backward.py @@ -269,7 +269,7 @@ def _append_backward_ops_(block, target_block, no_grad_dict, grad_to_var, - callback=None): + callbacks=None): """ Create all grad ops, and insert them into given block @@ -285,14 +285,13 @@ def _append_backward_ops_(block, val(str): corresponding forward variable name callback(callable object): a callable object used to decorate new generated grad ops """ - if callback is None: - - def empty_callback(block, context): - pass - - callback = empty_callback - elif not hasattr(callback, '__call__'): - raise ValueError("'callback' must be a callable object.") + if callbacks is None: + callbacks = [] + else: + assert (isinstance(callbacks, list)) + for cb in callbacks: + if not hasattr(cb, '__call__'): + raise ValueError("'callback' must be a callable object.") # grad_op_descs holds created grad_op, and will be appended to target_block grad_op_descs = [] @@ -303,9 +302,12 @@ def _append_backward_ops_(block, if op.has_attr("sub_block"): sub_block = program.block(op.block_attr("sub_block")) grad_sub_block = program.create_block(parent_idx=sub_block.idx) + if callbacks is None: + callbacks = [_callback_lookup_(op)] + else: + callbacks.append(_callback_lookup_(op)) _append_backward_ops_(sub_block, sub_block.ops, grad_sub_block, - no_grad_dict, grad_to_var, - _callback_lookup_(op)) + no_grad_dict, grad_to_var, callbacks) grad_sub_block_list.append(grad_sub_block.desc) # Getting op's corresponding grad_op @@ -325,7 +327,8 @@ def _append_backward_ops_(block, new_op_desc = target_block.desc.append_op() new_op_desc.copy_from(op_desc) grad_to_var["__current_op_desc__"] = new_op_desc - callback(block=target_block, context=grad_to_var) + for cb in callbacks: + cb(block=target_block, context=grad_to_var) def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map): -- GitLab From 00dff47cfa9a97db3b119b75fc43067f5e8dae38 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Tue, 13 Feb 2018 22:52:02 +0000 Subject: [PATCH 110/217] temp --- temp | 1 + 1 file changed, 1 insertion(+) create mode 100644 temp diff --git a/temp b/temp new file mode 100644 index 0000000000..9daeafb986 --- /dev/null +++ b/temp @@ -0,0 +1 @@ +test -- GitLab From 0717ff8b900ce2a249c3b8178269bd7a0f8a142a Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Wed, 14 Feb 2018 00:52:53 +0000 Subject: [PATCH 111/217] make boost library hosted on our server --- cmake/external/boost.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/external/boost.cmake b/cmake/external/boost.cmake index 9e135b2c0e..906bed2f04 100644 --- a/cmake/external/boost.cmake +++ b/cmake/external/boost.cmake @@ -17,7 +17,7 @@ include(ExternalProject) set(BOOST_PROJECT "extern_boost") set(BOOST_VER "1.41.0") set(BOOST_TAR "boost_1_41_0") -set(BOOST_URL "http://sourceforge.net/projects/boost/files/boost/${BOOST_VER}/${BOOST_TAR}.tar.gz") +set(BOOST_URL "http://paddlepaddledeps.s3-website-us-west-1.amazonaws.com/${BOOST_TAR}.tar.gz") set(BOOST_SOURCES_DIR ${THIRD_PARTY_PATH}/boost) set(BOOST_DOWNLOAD_DIR "${BOOST_SOURCES_DIR}/src/${BOOST_PROJECT}") set(BOOST_INCLUDE_DIR "${BOOST_DOWNLOAD_DIR}/${BOOST_TAR}" CACHE PATH "boost include directory." FORCE) -- GitLab From 481bd3c2de5f3cf9fea7f167b4df663fd568fb95 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Wed, 14 Feb 2018 00:54:42 +0000 Subject: [PATCH 112/217] clean up --- temp | 1 - 1 file changed, 1 deletion(-) delete mode 100644 temp diff --git a/temp b/temp deleted file mode 100644 index 9daeafb986..0000000000 --- a/temp +++ /dev/null @@ -1 +0,0 @@ -test -- GitLab From 16a8def1cda1a46ed772d5e37e5f679822f55436 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Wed, 14 Feb 2018 01:50:39 +0000 Subject: [PATCH 113/217] fix style --- doc/design/parallel_do.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/doc/design/parallel_do.md b/doc/design/parallel_do.md index 5b90b50792..d51b1014d4 100644 --- a/doc/design/parallel_do.md +++ b/doc/design/parallel_do.md @@ -159,5 +159,3 @@ block3 { } } ``` - - -- GitLab From cfffb1a36251e7d06535dac6db220131e36fe9f8 Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Wed, 14 Feb 2018 10:32:46 -0800 Subject: [PATCH 114/217] Update tensor_util.h (#8422) * Update tensor_util.h * Update with moved TensorDesc * Fix tensur_utils.cu * Update * Update * Update * Update * Make tensor_util.cu a symbolic link --- .../fluid/framework/data_device_transform.cc | 2 +- .../framework/data_device_transform_test.cu | 4 +- paddle/fluid/framework/executor.cc | 6 +- paddle/fluid/framework/lod_tensor.cc | 12 +- paddle/fluid/framework/lod_tensor.h | 4 +- paddle/fluid/framework/mixed_vector.h | 12 +- paddle/fluid/framework/reader.cc | 2 +- paddle/fluid/framework/selected_rows.cc | 4 +- paddle/fluid/framework/tensor_util.cc | 198 ++++++++++++- paddle/fluid/framework/tensor_util.cu | 120 +------- paddle/fluid/framework/tensor_util.h | 261 ++---------------- paddle/fluid/framework/tensor_util_test.cc | 65 ++--- paddle/fluid/framework/tensor_util_test.cu | 8 +- paddle/fluid/framework/threadpool.h | 2 +- paddle/fluid/operators/array_operator.h | 2 +- .../fluid/operators/array_to_lod_tensor_op.cc | 4 +- paddle/fluid/operators/assign_op.cc | 4 +- paddle/fluid/operators/assign_value_op.h | 2 +- .../fluid/operators/beam_search_decode_op.h | 4 +- paddle/fluid/operators/detection_output_op.h | 34 +-- paddle/fluid/operators/expand_op.h | 3 +- paddle/fluid/operators/feed_op.cc | 2 +- paddle/fluid/operators/fetch_op.cc | 2 +- paddle/fluid/operators/fill_op.cc | 2 +- paddle/fluid/operators/layer_norm_op.h | 4 +- paddle/fluid/operators/load_combine_op.cc | 2 +- paddle/fluid/operators/load_op.cc | 2 +- paddle/fluid/operators/lod_reset_op.h | 4 +- .../fluid/operators/lod_tensor_to_array_op.cc | 6 +- paddle/fluid/operators/math/context_project.h | 6 +- paddle/fluid/operators/math/im2col_test.cc | 14 +- .../operators/math/math_function_test.cu | 36 +-- .../math/selected_rows_functor_test.cu | 8 +- .../fluid/operators/math/sequence_padding.cu | 4 +- .../operators/math/sequence_padding_test.cc | 4 +- paddle/fluid/operators/math/vol2col_test.cc | 8 +- paddle/fluid/operators/merge_lod_tensor_op.cc | 7 +- .../fluid/operators/mine_hard_examples_op.cc | 3 +- paddle/fluid/operators/multiplex_op.cu | 4 +- paddle/fluid/operators/nccl_op_test.cu.cc | 2 +- paddle/fluid/operators/parallel_do_op.cc | 6 +- paddle/fluid/operators/print_op.cc | 2 +- paddle/fluid/operators/recurrent_op.cc | 8 +- .../reorder_lod_tensor_by_rank_op.cc | 2 +- paddle/fluid/operators/reshape_op.h | 4 +- paddle/fluid/operators/sequence_reshape_op.h | 4 +- paddle/fluid/operators/sequence_slice_op.h | 16 +- .../fluid/operators/shrink_rnn_memory_op.cc | 2 +- paddle/fluid/operators/split_lod_tensor_op.cc | 9 +- paddle/fluid/operators/sum_op.h | 4 +- .../operators/tensor_array_read_write_op.cc | 4 +- paddle/fluid/operators/warpctc_op.h | 5 +- paddle/fluid/pybind/tensor_py.h | 6 +- 53 files changed, 411 insertions(+), 534 deletions(-) mode change 100644 => 120000 paddle/fluid/framework/tensor_util.cu diff --git a/paddle/fluid/framework/data_device_transform.cc b/paddle/fluid/framework/data_device_transform.cc index 728a2fb6f3..85dbb39e6f 100644 --- a/paddle/fluid/framework/data_device_transform.cc +++ b/paddle/fluid/framework/data_device_transform.cc @@ -37,7 +37,7 @@ void TransDataDevice(const Tensor& in, const platform::Place& dst_place, << " dst_place: " << dst_place; auto* dev_ctx = GetDeviceContext(in.place(), dst_place); dev_ctx->Wait(); - Copy(in, dst_place, *dev_ctx, out); + TensorCopy(in, dst_place, *dev_ctx, out); dev_ctx->Wait(); } diff --git a/paddle/fluid/framework/data_device_transform_test.cu b/paddle/fluid/framework/data_device_transform_test.cu index c9ba071175..db6687985d 100644 --- a/paddle/fluid/framework/data_device_transform_test.cu +++ b/paddle/fluid/framework/data_device_transform_test.cu @@ -157,8 +157,8 @@ TEST(Operator, CPUtoGPU) { auto dev_ctx = pool.Get(cuda_place); paddle::framework::Tensor output_tensor; - Copy(output2->Get(), paddle::platform::CPUPlace(), *dev_ctx, - &output_tensor); + TensorCopy(output2->Get(), paddle::platform::CPUPlace(), *dev_ctx, + &output_tensor); dev_ctx->Wait(); float* output2_ptr = output_tensor.data(); diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index ebfd54fdc5..23eeb276c0 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -73,8 +73,10 @@ static void CheckTensorNANOrInf(const std::string& name, tensor.type().hash_code() != typeid(double).hash_code()) { return; } - PADDLE_ENFORCE(!framework::HasInf(tensor), "Tensor %s has Inf", name); - PADDLE_ENFORCE(!framework::HasNAN(tensor), "Tensor %s has NAN", name); + PADDLE_ENFORCE(!framework::TensorContainsInf(tensor), + "Tensor %s contains Inf", name); + PADDLE_ENFORCE(!framework::TensorContainsNAN(tensor), + "Tensor %s contains NAN", name); } void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id, diff --git a/paddle/fluid/framework/lod_tensor.cc b/paddle/fluid/framework/lod_tensor.cc index 89768bcfd5..4cf14c8da5 100644 --- a/paddle/fluid/framework/lod_tensor.cc +++ b/paddle/fluid/framework/lod_tensor.cc @@ -46,7 +46,7 @@ std::ostream &operator<<(std::ostream &os, const LoDTensor &t) { if (!platform::is_cpu_place(t.place())) { LoDTensor tt; - framework::Copy(t, platform::CPUPlace(), &tt); + framework::TensorCopy(t, platform::CPUPlace(), &tt); platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(t.place()); dev_ctx.Wait(); @@ -255,7 +255,7 @@ void SerializeToStream(std::ostream &os, const LoDTensor &tensor, } } // the 3st field, Tensor - SerializeToStream(os, static_cast(tensor), dev_ctx); + TensorToStream(os, static_cast(tensor), dev_ctx); } void DeserializeFromStream(std::istream &is, LoDTensor *tensor, @@ -282,7 +282,7 @@ void DeserializeFromStream(std::istream &is, LoDTensor *tensor, } } // the 3st filed, Tensor - DeserializeFromStream(is, static_cast(tensor), dev_ctx); + TensorFromStream(is, static_cast(tensor), dev_ctx); } std::vector LoDTensor::SplitLoDTensor( @@ -308,14 +308,14 @@ std::vector LoDTensor::SplitLoDTensor( if (lod().empty()) { auto src = Slice(begin, end); auto &dst_place = places[i]; - framework::Copy(src, dst_place, &dst); + framework::TensorCopy(src, dst_place, &dst); } else { auto lod_and_offset = GetSubLoDAndAbsoluteOffset(lod(), begin, end, 0); auto &offset = lod_and_offset.second; auto src = Slice(offset.first, offset.second); auto &dst_place = places[i]; - framework::Copy(src, dst_place, &dst); + framework::TensorCopy(src, dst_place, &dst); LoD my_lod; for (auto &l : lod_and_offset.first) { @@ -369,7 +369,7 @@ void LoDTensor::MergeLoDTensor( for (auto *src : lod_tensors) { int end = begin + src->dims()[0]; auto dst = Slice(begin, end); - framework::Copy(*src, dst_place, &dst); + framework::TensorCopy(*src, dst_place, &dst); begin = end; } } diff --git a/paddle/fluid/framework/lod_tensor.h b/paddle/fluid/framework/lod_tensor.h index 948389afb6..94d5a6e9fd 100644 --- a/paddle/fluid/framework/lod_tensor.h +++ b/paddle/fluid/framework/lod_tensor.h @@ -175,8 +175,8 @@ LoDTensor LodExpand(const LoDTensor& source, const LoD& lod, size_t level, for (size_t ins = 0; ins < num_instances; ins++) { for (size_t elem = lod_level[ins]; elem < lod_level[ins + 1]; elem++) { auto slice = tensor.Slice(elem, elem + 1); - Copy(source.Slice(ins, ins + 1), platform::CPUPlace(), - platform::CPUDeviceContext(), &slice); + TensorCopy(source.Slice(ins, ins + 1), platform::CPUPlace(), + platform::CPUDeviceContext(), &slice); } } return tensor; diff --git a/paddle/fluid/framework/mixed_vector.h b/paddle/fluid/framework/mixed_vector.h index c1a89a1261..6a6fa53871 100644 --- a/paddle/fluid/framework/mixed_vector.h +++ b/paddle/fluid/framework/mixed_vector.h @@ -291,7 +291,7 @@ class Vector { void CopyToCPU() const { // COPY GPU Data To CPU - Copy(cuda_vec_, platform::CPUPlace(), &cpu_vec_); + TensorCopy(cuda_vec_, platform::CPUPlace(), &cpu_vec_); WaitPlace(cuda_vec_.place()); } @@ -305,13 +305,14 @@ class Vector { void ImmutableCUDA(platform::Place place) const { if (IsDirty()) { if (IsInCPU()) { - Copy(cpu_vec_, boost::get(place), &cuda_vec_); + TensorCopy(cpu_vec_, boost::get(place), + &cuda_vec_); WaitPlace(place); UnsetFlag(kDirty); SetFlag(kDataInCUDA); } else if (IsInCUDA() && !(place == cuda_vec_.place())) { framework::Tensor tmp; - Copy(cuda_vec_, boost::get(place), &tmp); + TensorCopy(cuda_vec_, boost::get(place), &tmp); WaitPlace(cuda_vec_.place()); cuda_vec_.ShareDataWith(tmp); // Still dirty @@ -322,13 +323,14 @@ class Vector { } else { if (!IsInCUDA()) { // Even data is not dirty. However, data is not in CUDA. Copy data. - Copy(cpu_vec_, boost::get(place), &cuda_vec_); + TensorCopy(cpu_vec_, boost::get(place), + &cuda_vec_); WaitPlace(place); SetFlag(kDataInCUDA); } else if (!(place == cuda_vec_.place())) { framework::Tensor tmp; WaitPlace(cuda_vec_.place()); - Copy(cuda_vec_, boost::get(place), &tmp); + TensorCopy(cuda_vec_, boost::get(place), &tmp); WaitPlace(cuda_vec_.place()); WaitPlace(place); cuda_vec_.ShareDataWith(tmp); diff --git a/paddle/fluid/framework/reader.cc b/paddle/fluid/framework/reader.cc index 1ef0c48211..dc1caa72a4 100644 --- a/paddle/fluid/framework/reader.cc +++ b/paddle/fluid/framework/reader.cc @@ -105,7 +105,7 @@ void BatchReader::ReadNext(std::vector* out) { } } Tensor dst = out_tensor.Slice(dst_offset, dst_offset + ins_shape[0]); - Copy(buffer_[i][j], platform::CPUPlace(), &dst); + TensorCopy(buffer_[i][j], platform::CPUPlace(), &dst); dst_offset += ins_shape[0]; } out_tensor.set_lod(batch_lod); diff --git a/paddle/fluid/framework/selected_rows.cc b/paddle/fluid/framework/selected_rows.cc index 08c319002d..504344e937 100644 --- a/paddle/fluid/framework/selected_rows.cc +++ b/paddle/fluid/framework/selected_rows.cc @@ -34,7 +34,7 @@ void SerializeToStream(std::ostream& os, const SelectedRows& selected_rows, os.write(reinterpret_cast(&height), sizeof(height)); } // the 4st field, Tensor data - SerializeToStream(os, selected_rows.value(), dev_ctx); + TensorToStream(os, selected_rows.value(), dev_ctx); } void DeserializeFromStream(std::istream& is, SelectedRows* selected_rows, @@ -62,7 +62,7 @@ void DeserializeFromStream(std::istream& is, SelectedRows* selected_rows, selected_rows->set_height(height); } // the 4st field, tensor which contains the data - DeserializeFromStream(is, selected_rows->mutable_value(), dev_ctx); + TensorFromStream(is, selected_rows->mutable_value(), dev_ctx); } } // namespace framework diff --git a/paddle/fluid/framework/tensor_util.cc b/paddle/fluid/framework/tensor_util.cc index 537fb4614c..9b465b85b0 100644 --- a/paddle/fluid/framework/tensor_util.cc +++ b/paddle/fluid/framework/tensor_util.cc @@ -16,6 +16,76 @@ namespace paddle { namespace framework { + +void TensorCopy(const Tensor& src, const platform::Place& dst_place, + const platform::DeviceContext& ctx, Tensor* dst) { + VLOG(3) << "TensorCopy " << src.dims() << " from " << src.place() << " to " + << dst_place; + src.check_memory_size(); + + dst->Resize(src.dims()); + dst->set_layout(src.layout()); + auto src_place = src.place(); + auto src_ptr = src.data(); + + auto dst_ptr = dst->mutable_data(dst_place, src.type()); + + auto size = src.numel() * SizeOfType(src.type()); + + if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) { + memory::Copy(boost::get(dst_place), dst_ptr, + boost::get(src_place), src_ptr, size); + } +#ifdef PADDLE_WITH_CUDA + else if (platform::is_gpu_place(src_place) && // NOLINT + platform::is_cpu_place(dst_place)) { + auto src_gpu_place = boost::get(src_place); + auto dst_cpu_place = boost::get(dst_place); + auto ctx_place = ctx.GetPlace(); + PADDLE_ENFORCE(platform::is_gpu_place(ctx_place)); + auto ctx_gpu_place = boost::get(ctx_place); + PADDLE_ENFORCE_EQ(src_gpu_place, ctx_gpu_place); + memory::Copy( + dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size, + reinterpret_cast(ctx).stream()); + } else if (platform::is_cpu_place(src_place) && + platform::is_gpu_place(dst_place)) { + auto src_cpu_place = boost::get(src_place); + auto dst_gpu_place = boost::get(dst_place); + auto ctx_place = ctx.GetPlace(); + PADDLE_ENFORCE(platform::is_gpu_place(ctx_place)); + auto ctx_gpu_place = boost::get(ctx_place); + PADDLE_ENFORCE_EQ(dst_gpu_place, ctx_gpu_place); + memory::Copy( + dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size, + reinterpret_cast(ctx).stream()); + } else if (platform::is_gpu_place(src_place) && + platform::is_gpu_place(dst_place)) { + auto src_gpu_place = boost::get(src_place); + auto dst_gpu_place = boost::get(dst_place); + auto ctx_place = ctx.GetPlace(); + PADDLE_ENFORCE(platform::is_gpu_place(ctx_place)); + auto ctx_gpu_place = boost::get(ctx_place); + PADDLE_ENFORCE_EQ(src_gpu_place, ctx_gpu_place); + memory::Copy( + dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, + reinterpret_cast(ctx).stream()); + } +#endif +} + +void TensorCopy(const Tensor& src, const platform::Place& dst_place, + Tensor* dst) { + platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); + const platform::DeviceContext* dev_ctx; + if (platform::is_gpu_place(src.place())) { + dev_ctx = pool.Get(src.place()); + } else { + dev_ctx = pool.Get(dst_place); + } + TensorCopy(src, dst_place, *dev_ctx, dst); +} + template struct AnyDTypeVisitor { Predicate predicate_; @@ -69,7 +139,7 @@ struct AnyVisitor : public boost::static_visitor { tmp.mutable_data(cpu); auto gpuctx = platform::DeviceContextPool::Instance().Get(gpu); gpuctx->Wait(); - Copy(out, cpu, *gpuctx, &tmp); + TensorCopy(out, cpu, *gpuctx, &tmp); gpuctx->Wait(); return GetResult(tmp, cpu); } @@ -87,7 +157,7 @@ inline bool Any(const framework::Tensor& tensor, Predicate predicate) { return platform::VisitPlace(place, visitor); } -struct HasNANPredicate { +struct ContainsNANPredicate { template auto operator()(const T& eigen_vec) const -> decltype(std::declval().isnan()) { @@ -96,12 +166,12 @@ struct HasNANPredicate { } }; -bool HasNAN(const framework::Tensor& tensor) { - HasNANPredicate predicate; +bool TensorContainsNAN(const framework::Tensor& tensor) { + ContainsNANPredicate predicate; return Any(tensor, predicate); } -struct HasInfPredicate { +struct ContainsInfPredicate { template auto operator()(const T& eigen_vec) const -> decltype(std::declval().isinf()) { @@ -110,10 +180,124 @@ struct HasInfPredicate { } }; -bool HasInf(const framework::Tensor& tensor) { - HasInfPredicate predicate; +bool TensorContainsInf(const framework::Tensor& tensor) { + ContainsInfPredicate predicate; return Any(tensor, predicate); } +void TensorToStream(std::ostream& os, const Tensor& tensor, + const platform::DeviceContext& dev_ctx) { + // TODO(typhoonzero): serialize to ostream + { // the 1st field, uint32_t version + constexpr uint32_t version = 0; + os.write(reinterpret_cast(&version), sizeof(version)); + } + { // the 2nd field, tensor description + // int32_t size + // void* protobuf message + proto::VarType::TensorDesc desc; + desc.set_data_type(framework::ToDataType(tensor.type())); + auto dims = framework::vectorize(tensor.dims()); + auto* pb_dims = desc.mutable_dims(); + pb_dims->Resize(static_cast(dims.size()), 0); + std::copy(dims.begin(), dims.end(), pb_dims->begin()); + int32_t size = desc.ByteSize(); + os.write(reinterpret_cast(&size), sizeof(size)); + auto out = desc.SerializeAsString(); + os.write(out.data(), size); + } + { // the 3rd field, tensor data + uint64_t size = tensor.memory_size(); + auto* data_ptr = tensor.data(); + PADDLE_ENFORCE(size < std::numeric_limits::max(), + "Index overflow when writing tensor"); + if (platform::is_gpu_place(tensor.place())) { +#ifdef PADDLE_WITH_CUDA + constexpr size_t kBufSize = 1024 * 1024 * 64; // 64MB + std::unique_ptr buf(new char[kBufSize]); + auto& gpu_dev_ctx = + static_cast(dev_ctx); + platform::CPUPlace cpu; + uintptr_t data = reinterpret_cast(data_ptr); + while (size != 0) { + size_t size_to_write = std::min(kBufSize, static_cast(size)); + memory::Copy(cpu, buf.get(), + boost::get(tensor.place()), + reinterpret_cast(data), size_to_write, + gpu_dev_ctx.stream()); + gpu_dev_ctx.Wait(); + os.write(buf.get(), size_to_write); + data += size_to_write; + size -= size_to_write; + } +#else + PADDLE_THROW("Unexpected branch"); +#endif + } else { + os.write(static_cast(data_ptr), + static_cast(size)); + } + } +} + +struct DeserializedDataFunctor { + DeserializedDataFunctor(void** buf, Tensor* tensor, + const platform::Place& place) + : buf_(buf), tensor_(tensor), place_(place) {} + + template + void operator()() { + *buf_ = tensor_->mutable_data(place_); + } + + void** buf_; + Tensor* tensor_; + platform::Place place_; +}; + +void TensorFromStream(std::istream& is, Tensor* tensor, + const platform::DeviceContext& dev_ctx) { + uint32_t version; + is.read(reinterpret_cast(&version), sizeof(version)); + PADDLE_ENFORCE_EQ(version, 0U, "Only version 0 is supported"); + proto::VarType::TensorDesc desc; + { // int32_t size + // proto buffer + int32_t size; + is.read(reinterpret_cast(&size), sizeof(size)); + std::unique_ptr buf(new char[size]); + is.read(reinterpret_cast(buf.get()), size); + PADDLE_ENFORCE(desc.ParseFromArray(buf.get(), size), + "Cannot parse tensor desc"); + } + { // read tensor + std::vector dims; + dims.reserve(static_cast(desc.dims().size())); + std::copy(desc.dims().begin(), desc.dims().end(), std::back_inserter(dims)); + tensor->Resize(framework::make_ddim(dims)); + void* buf; + auto ctx = platform::CPUDeviceContext(); + if (platform::is_gpu_place(dev_ctx.GetPlace())) { +#ifdef PADDLE_WITH_CUDA + Tensor cpu_tensor; + cpu_tensor.Resize(framework::make_ddim(dims)); + framework::VisitDataType( + desc.data_type(), + DeserializedDataFunctor(&buf, &cpu_tensor, ctx.GetPlace())); + is.read(static_cast(buf), cpu_tensor.memory_size()); + auto dst_place = dev_ctx.GetPlace(); + framework::TensorCopy(cpu_tensor, dst_place, dev_ctx, tensor); +#else + PADDLE_THROW("Unexpected branch"); +#endif + } else { + framework::VisitDataType( + desc.data_type(), + DeserializedDataFunctor(&buf, tensor, ctx.GetPlace())); + is.read(static_cast(buf), tensor->memory_size()); + } + } +} + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/tensor_util.cu b/paddle/fluid/framework/tensor_util.cu deleted file mode 100644 index 537fb4614c..0000000000 --- a/paddle/fluid/framework/tensor_util.cu +++ /dev/null @@ -1,119 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ - -#include "paddle/fluid/framework/tensor_util.h" - -namespace paddle { -namespace framework { -template -struct AnyDTypeVisitor { - Predicate predicate_; - const Tensor& tensor_; - const DevCtx& ctx_; - Tensor* out_; - - AnyDTypeVisitor(Predicate predicate, const Tensor& tensor, const DevCtx& ctx, - Tensor* out) - : predicate_(predicate), tensor_(tensor), ctx_(ctx), out_(out) {} - - template - void operator()() const { - auto t = EigenVector::Flatten(tensor_); - auto o = EigenScalar::From(*out_); - // return any of predicate_(t) is true. - o.device(*ctx_.eigen_device()) = predicate_(t).any(); - } -}; - -template -inline void AnyImpl(Predicate predicate, const framework::Tensor& tensor, - const DevCtx& ctx, framework::Tensor* out) { - VisitDataType(ToDataType(tensor.type()), AnyDTypeVisitor( - predicate, tensor, ctx, out)); -} - -template -struct AnyVisitor : public boost::static_visitor { - const framework::Tensor& tensor_; - Predicate predicate_; - - AnyVisitor(const framework::Tensor& tensor, Predicate predicate) - : tensor_(tensor), predicate_(std::move(predicate)) {} - - template - bool operator()(const Place& place) const { - framework::Tensor out; - out.Resize({1}); - out.mutable_data(place); - auto* ctx = platform::DeviceContextPool::Instance().GetByPlace(place); - AnyImpl(predicate_, tensor_, *ctx, &out); - return this->GetResult(out, place); - } - - bool GetResult(const framework::Tensor& out, - const platform::CUDAPlace& gpu) const { - platform::CPUPlace cpu; - framework::Tensor tmp; - tmp.Resize({1}); - tmp.mutable_data(cpu); - auto gpuctx = platform::DeviceContextPool::Instance().Get(gpu); - gpuctx->Wait(); - Copy(out, cpu, *gpuctx, &tmp); - gpuctx->Wait(); - return GetResult(tmp, cpu); - } - - bool GetResult(const framework::Tensor& out, - const platform::CPUPlace& cpu) const { - return *out.data(); - } -}; - -template -inline bool Any(const framework::Tensor& tensor, Predicate predicate) { - AnyVisitor visitor(tensor, predicate); - auto place = tensor.place(); - return platform::VisitPlace(place, visitor); -} - -struct HasNANPredicate { - template - auto operator()(const T& eigen_vec) const - -> decltype(std::declval().isnan()) { - // Cast eigen_vector to vector of bool. true if is inf. - return eigen_vec.isnan(); - } -}; - -bool HasNAN(const framework::Tensor& tensor) { - HasNANPredicate predicate; - return Any(tensor, predicate); -} - -struct HasInfPredicate { - template - auto operator()(const T& eigen_vec) const - -> decltype(std::declval().isinf()) { - // Cast eigen_vector to vector of bool. true if is inf. - return eigen_vec.isinf(); - } -}; - -bool HasInf(const framework::Tensor& tensor) { - HasInfPredicate predicate; - return Any(tensor, predicate); -} - -} // namespace framework -} // namespace paddle diff --git a/paddle/fluid/framework/tensor_util.cu b/paddle/fluid/framework/tensor_util.cu new file mode 120000 index 0000000000..edd88c4e54 --- /dev/null +++ b/paddle/fluid/framework/tensor_util.cu @@ -0,0 +1 @@ +tensor_util.cc \ No newline at end of file diff --git a/paddle/fluid/framework/tensor_util.h b/paddle/fluid/framework/tensor_util.h index f0464d4807..38b6d1c5c4 100644 --- a/paddle/fluid/framework/tensor_util.h +++ b/paddle/fluid/framework/tensor_util.h @@ -22,106 +22,38 @@ limitations under the License. */ namespace paddle { namespace framework { -/** - * @brief Copy the content of external tensor to a new place. - * - * @param[in] src The external tensor. - * @param[in] dst_place The dst place. - * @param[in] ctx The device context contains device resources. - * - * @note Copy supports CPU <-> GPU, GPU <-> GPU. - */ -inline void Copy(const Tensor& src, const platform::Place& dst_place, - const platform::DeviceContext& ctx, Tensor* dst) { - VLOG(3) << "Copy " << src.dims() << " from " << src.place() << " to " - << dst_place; - src.check_memory_size(); +void TensorCopy(const Tensor& src, const platform::Place& dst_place, + const platform::DeviceContext& ctx, Tensor* dst); +void TensorCopy(const Tensor& src, const platform::Place& dst_place, + Tensor* dst); - dst->Resize(src.dims()); - dst->set_layout(src.layout()); - auto src_place = src.place(); - auto src_ptr = src.data(); +template +void TensorFromVector(const std::vector& src, + const platform::DeviceContext& ctx, Tensor* dst); +template +void TensorFromVector(const std::vector& src, Tensor* dst); - auto dst_ptr = dst->mutable_data(dst_place, src.type()); +template +void TensorToVector(const Tensor& src, const platform::DeviceContext& ctx, + std::vector* dst); +template +void TesnorToVector(const Tensor& src, std::vector* dst); - auto size = src.numel() * SizeOfType(src.type()); +bool TensorContainsNAN(const framework::Tensor& tensor); +bool TensorContainsInf(const framework::Tensor& tensor); - if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) { - memory::Copy(boost::get(dst_place), dst_ptr, - boost::get(src_place), src_ptr, size); - } -#ifdef PADDLE_WITH_CUDA - else if (platform::is_gpu_place(src_place) && // NOLINT - platform::is_cpu_place(dst_place)) { - auto src_gpu_place = boost::get(src_place); - auto dst_cpu_place = boost::get(dst_place); - auto ctx_place = ctx.GetPlace(); - PADDLE_ENFORCE(platform::is_gpu_place(ctx_place)); - auto ctx_gpu_place = boost::get(ctx_place); - PADDLE_ENFORCE_EQ(src_gpu_place, ctx_gpu_place); - memory::Copy( - dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size, - reinterpret_cast(ctx).stream()); - } else if (platform::is_cpu_place(src_place) && - platform::is_gpu_place(dst_place)) { - auto src_cpu_place = boost::get(src_place); - auto dst_gpu_place = boost::get(dst_place); - auto ctx_place = ctx.GetPlace(); - PADDLE_ENFORCE(platform::is_gpu_place(ctx_place)); - auto ctx_gpu_place = boost::get(ctx_place); - PADDLE_ENFORCE_EQ(dst_gpu_place, ctx_gpu_place); - memory::Copy( - dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size, - reinterpret_cast(ctx).stream()); - } else if (platform::is_gpu_place(src_place) && - platform::is_gpu_place(dst_place)) { - auto src_gpu_place = boost::get(src_place); - auto dst_gpu_place = boost::get(dst_place); - auto ctx_place = ctx.GetPlace(); - PADDLE_ENFORCE(platform::is_gpu_place(ctx_place)); - auto ctx_gpu_place = boost::get(ctx_place); - PADDLE_ENFORCE_EQ(src_gpu_place, ctx_gpu_place); - memory::Copy( - dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, - reinterpret_cast(ctx).stream()); - } -#endif -} +void TensorToStream(std::ostream& os, const Tensor& tensor, + const platform::DeviceContext& dev_ctx); +void TensorFromStream(std::istream& is, Tensor* tensor, + const platform::DeviceContext& dev_ctx); -/** - * @brief Wrapper on - * Copy(const Tensor& src, const platform::Place& dst_place, - * const platform::DeviceContext& ctx, Tensor* dst); - * - * @param[in] src The external tensor. - * @param[in] dst_place The dst place. - * - * @note Copy supports CPU <-> GPU, GPU <-> GPU. - */ -inline void Copy(const Tensor& src, const platform::Place& dst_place, - Tensor* dst) { - platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); - const platform::DeviceContext* dev_ctx; - if (platform::is_gpu_place(src.place())) { - dev_ctx = pool.Get(src.place()); - } else { - dev_ctx = pool.Get(dst_place); - } - Copy(src, dst_place, *dev_ctx, dst); -} +// +// The implementation of template functions. +// -/** - * @brief Copy the content of an external vector to a tensor. - * - * @param[in] src The external tensor. - * @param[in] ctx The device context contains device resources. - * - * * @note CopyFromVector will resize dst to an 1D tensor with the same - * size as src. - */ template -inline void CopyFromVector(const std::vector& src, - const platform::DeviceContext& ctx, Tensor* dst) { +void TensorFromVector(const std::vector& src, + const platform::DeviceContext& ctx, Tensor* dst) { auto dst_place = ctx.GetPlace(); auto src_ptr = static_cast(src.data()); platform::CPUPlace src_place; @@ -143,11 +75,8 @@ inline void CopyFromVector(const std::vector& src, #endif } -/** - * @brief CopyFromVector CPU vector -> CPU Tensor - */ template -inline void CopyFromVector(const std::vector& src, Tensor* dst) { +void TensorFromVector(const std::vector& src, Tensor* dst) { platform::CPUPlace dst_place = platform::CPUPlace(); auto src_ptr = static_cast(src.data()); platform::CPUPlace src_place; @@ -158,18 +87,9 @@ inline void CopyFromVector(const std::vector& src, Tensor* dst) { memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size); } -/** - * @brief Copy the content of a tensor to a vector - * - * @param[in] src The external tensor. - * @param[in] ctx The device context contains device resources. - * - * * @note CopyFromVector assumes that the tensor has been resized - * before invoking. - */ template -inline void CopyToVector(const Tensor& src, const platform::DeviceContext& ctx, - std::vector* dst) { +void TensorToVector(const Tensor& src, const platform::DeviceContext& ctx, + std::vector* dst) { auto src_ptr = static_cast(src.data()); auto size = src.numel() * sizeof(T); @@ -191,11 +111,8 @@ inline void CopyToVector(const Tensor& src, const platform::DeviceContext& ctx, #endif } -/** - * @brief CopyToVector CPUTensor <-> CPU Vector - */ template -inline void CopyToVector(const Tensor& src, std::vector* dst) { +void TensorToVector(const Tensor& src, std::vector* dst) { auto src_ptr = static_cast(src.data()); auto size = src.numel() * sizeof(T); @@ -209,125 +126,5 @@ inline void CopyToVector(const Tensor& src, std::vector* dst) { src_ptr, size); } -// Returns true if a tensor contains NAN, i.e., Not A Number. -bool HasNAN(const framework::Tensor& tensor); - -// Returns true if a tensor contains Inf, i.e., Infinity. -bool HasInf(const framework::Tensor& tensor); - -inline void SerializeToStream(std::ostream& os, const Tensor& tensor, - const platform::DeviceContext& dev_ctx) { - // TODO(typhoonzero): serialize to ostream - { // the 1st field, uint32_t version - constexpr uint32_t version = 0; - os.write(reinterpret_cast(&version), sizeof(version)); - } - { // the 2nd field, tensor description - // int32_t size - // void* protobuf message - proto::VarType::TensorDesc desc; - desc.set_data_type(framework::ToDataType(tensor.type())); - auto dims = framework::vectorize(tensor.dims()); - auto* pb_dims = desc.mutable_dims(); - pb_dims->Resize(static_cast(dims.size()), 0); - std::copy(dims.begin(), dims.end(), pb_dims->begin()); - int32_t size = desc.ByteSize(); - os.write(reinterpret_cast(&size), sizeof(size)); - auto out = desc.SerializeAsString(); - os.write(out.data(), size); - } - { // the 3rd field, tensor data - uint64_t size = tensor.memory_size(); - auto* data_ptr = tensor.data(); - PADDLE_ENFORCE(size < std::numeric_limits::max(), - "Index overflow when writing tensor"); - if (platform::is_gpu_place(tensor.place())) { -#ifdef PADDLE_WITH_CUDA - constexpr size_t kBufSize = 1024 * 1024 * 64; // 64MB - std::unique_ptr buf(new char[kBufSize]); - auto& gpu_dev_ctx = - static_cast(dev_ctx); - platform::CPUPlace cpu; - uintptr_t data = reinterpret_cast(data_ptr); - while (size != 0) { - size_t size_to_write = std::min(kBufSize, static_cast(size)); - memory::Copy(cpu, buf.get(), - boost::get(tensor.place()), - reinterpret_cast(data), size_to_write, - gpu_dev_ctx.stream()); - gpu_dev_ctx.Wait(); - os.write(buf.get(), size_to_write); - data += size_to_write; - size -= size_to_write; - } -#else - PADDLE_THROW("Unexpected branch"); -#endif - } else { - os.write(static_cast(data_ptr), - static_cast(size)); - } - } -} - -struct DeserializedDataFunctor { - DeserializedDataFunctor(void** buf, Tensor* tensor, - const platform::Place& place) - : buf_(buf), tensor_(tensor), place_(place) {} - - template - void operator()() { - *buf_ = tensor_->mutable_data(place_); - } - - void** buf_; - Tensor* tensor_; - platform::Place place_; -}; - -inline void DeserializeFromStream(std::istream& is, Tensor* tensor, - const platform::DeviceContext& dev_ctx) { - uint32_t version; - is.read(reinterpret_cast(&version), sizeof(version)); - PADDLE_ENFORCE_EQ(version, 0U, "Only version 0 is supported"); - proto::VarType::TensorDesc desc; - { // int32_t size - // proto buffer - int32_t size; - is.read(reinterpret_cast(&size), sizeof(size)); - std::unique_ptr buf(new char[size]); - is.read(reinterpret_cast(buf.get()), size); - PADDLE_ENFORCE(desc.ParseFromArray(buf.get(), size), - "Cannot parse tensor desc"); - } - { // read tensor - std::vector dims; - dims.reserve(static_cast(desc.dims().size())); - std::copy(desc.dims().begin(), desc.dims().end(), std::back_inserter(dims)); - tensor->Resize(framework::make_ddim(dims)); - void* buf; - auto ctx = platform::CPUDeviceContext(); - if (platform::is_gpu_place(dev_ctx.GetPlace())) { -#ifdef PADDLE_WITH_CUDA - Tensor cpu_tensor; - cpu_tensor.Resize(framework::make_ddim(dims)); - framework::VisitDataType( - desc.data_type(), - DeserializedDataFunctor(&buf, &cpu_tensor, ctx.GetPlace())); - is.read(static_cast(buf), cpu_tensor.memory_size()); - auto dst_place = dev_ctx.GetPlace(); - framework::Copy(cpu_tensor, dst_place, dev_ctx, tensor); -#else - PADDLE_THROW("Unexpected branch"); -#endif - } else { - framework::VisitDataType( - desc.data_type(), - DeserializedDataFunctor(&buf, tensor, ctx.GetPlace())); - is.read(static_cast(buf), tensor->memory_size()); - } - } -} - } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/tensor_util_test.cc b/paddle/fluid/framework/tensor_util_test.cc index dcdbf9d395..8aebfcb3b6 100644 --- a/paddle/fluid/framework/tensor_util_test.cc +++ b/paddle/fluid/framework/tensor_util_test.cc @@ -20,7 +20,7 @@ namespace paddle { namespace framework { -TEST(Copy, Tensor) { +TEST(TensorCopy, Tensor) { Tensor src_tensor; Tensor dst_tensor; platform::CPUDeviceContext cpu_ctx((platform::CPUPlace())); @@ -33,7 +33,7 @@ TEST(Copy, Tensor) { src_tensor.set_layout(DataLayout::kAnyLayout); auto cpu_place = new platform::CPUPlace(); - Copy(src_tensor, *cpu_place, &dst_tensor); + TensorCopy(src_tensor, *cpu_place, &dst_tensor); const int* dst_ptr = dst_tensor.data(); ASSERT_NE(src_ptr, dst_ptr); @@ -44,7 +44,7 @@ TEST(Copy, Tensor) { EXPECT_TRUE(dst_tensor.layout() == src_tensor.layout()); Tensor slice_tensor = src_tensor.Slice(1, 2); - Copy(slice_tensor, *cpu_place, &dst_tensor); + TensorCopy(slice_tensor, *cpu_place, &dst_tensor); const int* slice_ptr = slice_tensor.data(); dst_ptr = dst_tensor.data(); ASSERT_NE(dst_ptr, slice_ptr); @@ -68,11 +68,11 @@ TEST(Copy, Tensor) { // CPU Tensor to GPU Tensor auto gpu_place = new platform::CUDAPlace(0); platform::CUDADeviceContext gpu_ctx(*gpu_place); - Copy(src_tensor, *gpu_place, gpu_ctx, &gpu_tensor); + TensorCopy(src_tensor, *gpu_place, gpu_ctx, &gpu_tensor); // GPU Tensor to CPU Tensor auto cpu_place = new platform::CPUPlace(); - Copy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor); + TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor); // Sync before Compare Tensors gpu_ctx.Wait(); @@ -85,10 +85,10 @@ TEST(Copy, Tensor) { Tensor slice_tensor = src_tensor.Slice(1, 2); // CPU Slice Tensor to GPU Tensor - Copy(slice_tensor, *gpu_place, gpu_ctx, &gpu_tensor); + TensorCopy(slice_tensor, *gpu_place, gpu_ctx, &gpu_tensor); // GPU Tensor to CPU Tensor - Copy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor); + TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor); // Sync before Compare Slice Tensors gpu_ctx.Wait(); @@ -104,7 +104,7 @@ TEST(Copy, Tensor) { #endif } -TEST(CopyFromVector, Tensor) { +TEST(TensorFromVector, Tensor) { using namespace paddle::framework; using namespace paddle::platform; { @@ -114,7 +114,7 @@ TEST(CopyFromVector, Tensor) { // Copy to CPU Tensor cpu_tensor.Resize(make_ddim({3, 3})); auto cpu_place = new paddle::platform::CPUPlace(); - CopyFromVector(src_vec, &cpu_tensor); + TensorFromVector(src_vec, &cpu_tensor); // Compare Tensors const int* cpu_ptr = cpu_tensor.data(); @@ -126,7 +126,7 @@ TEST(CopyFromVector, Tensor) { src_vec.erase(src_vec.begin(), src_vec.begin() + 5); cpu_tensor.Resize(make_ddim({2, 2})); - CopyFromVector(src_vec, &cpu_tensor); + TensorFromVector(src_vec, &cpu_tensor); cpu_ptr = cpu_tensor.data(); src_ptr = src_vec.data(); ASSERT_NE(src_ptr, cpu_ptr); @@ -148,15 +148,15 @@ TEST(CopyFromVector, Tensor) { cpu_tensor.Resize(make_ddim({3, 3})); auto cpu_place = new paddle::platform::CPUPlace(); CPUDeviceContext cpu_ctx(*cpu_place); - CopyFromVector(src_vec, cpu_ctx, &cpu_tensor); + TensorFromVector(src_vec, cpu_ctx, &cpu_tensor); // Copy to GPUTensor gpu_tensor.Resize(make_ddim({3, 3})); auto gpu_place = new paddle::platform::CUDAPlace(); CUDADeviceContext gpu_ctx(*gpu_place); - CopyFromVector(src_vec, gpu_ctx, &gpu_tensor); + TensorFromVector(src_vec, gpu_ctx, &gpu_tensor); // Copy from GPU to CPU tensor for comparison - Copy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor); + TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor); // Sync before Compare Tensors gpu_ctx.Wait(); @@ -173,10 +173,10 @@ TEST(CopyFromVector, Tensor) { src_vec.erase(src_vec.begin(), src_vec.begin() + 5); cpu_tensor.Resize(make_ddim({2, 2})); - CopyFromVector(src_vec, cpu_ctx, &cpu_tensor); + TensorFromVector(src_vec, cpu_ctx, &cpu_tensor); gpu_tensor.Resize(make_ddim({2, 2})); - CopyFromVector(src_vec, gpu_ctx, &gpu_tensor); - Copy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor); + TensorFromVector(src_vec, gpu_ctx, &gpu_tensor); + TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor); // Sync before Compare Tensors gpu_ctx.Wait(); @@ -196,7 +196,7 @@ TEST(CopyFromVector, Tensor) { #endif } -TEST(CopyToVector, Tensor) { +TEST(TensorToVector, Tensor) { using namespace paddle::framework; using namespace paddle::platform; { @@ -208,7 +208,7 @@ TEST(CopyToVector, Tensor) { CPUPlace place; std::vector dst; - CopyToVector(src, &dst); + TensorToVector(src, &dst); for (int i = 0; i < 3 * 3; ++i) { EXPECT_EQ(src_ptr[i], dst[i]); @@ -220,10 +220,10 @@ TEST(CopyToVector, Tensor) { Tensor gpu_tensor; CUDAPlace place; CUDADeviceContext gpu_ctx(place); - CopyFromVector(src_vec, gpu_ctx, &gpu_tensor); + TensorFromVector(src_vec, gpu_ctx, &gpu_tensor); std::vector dst; - CopyToVector(gpu_tensor, gpu_ctx, &dst); + TensorToVector(gpu_tensor, gpu_ctx, &dst); for (int i = 0; i < 3 * 3; ++i) { EXPECT_EQ(src_vec[i], dst[i]); @@ -232,7 +232,7 @@ TEST(CopyToVector, Tensor) { #endif } -TEST(HasNAN, CPU) { +TEST(TensorContainsNAN, CPU) { using namespace paddle::framework; using namespace paddle::platform; Tensor src; @@ -240,11 +240,12 @@ TEST(HasNAN, CPU) { buf[0] = 0.0; buf[1] = NAN; buf[2] = 0.0; - - ASSERT_TRUE(HasNAN(src)); + ASSERT_TRUE(TensorContainsNAN(src)); + buf[1] = 0.0; + ASSERT_FALSE(TensorContainsNAN(src)); } -TEST(HasInf, CPU) { +TEST(TensorContainsInf, CPU) { using namespace paddle::framework; using namespace paddle::platform; Tensor src; @@ -252,10 +253,12 @@ TEST(HasInf, CPU) { buf[0] = 1.0; buf[1] = INFINITY; buf[2] = 0.0; - ASSERT_TRUE(HasInf(src)); + ASSERT_TRUE(TensorContainsInf(src)); + buf[1] = 1.0; + ASSERT_FALSE(TensorContainsInf(src)); } -TEST(Tensor, SerializeAndDeserialize) { +TEST(Tensor, FromAndToStream) { framework::Tensor src_tensor; int array[6] = {1, 2, 3, 4, 5, 6}; src_tensor.Resize({2, 3}); @@ -268,10 +271,10 @@ TEST(Tensor, SerializeAndDeserialize) { auto place = new platform::CPUPlace(); platform::CPUDeviceContext cpu_ctx(*place); std::ostringstream oss; - SerializeToStream(oss, src_tensor, cpu_ctx); + TensorToStream(oss, src_tensor, cpu_ctx); std::istringstream iss(oss.str()); - DeserializeFromStream(iss, &dst_tensor, cpu_ctx); + TensorFromStream(iss, &dst_tensor, cpu_ctx); int* dst_ptr = dst_tensor.mutable_data(platform::CPUPlace()); for (int i = 0; i < 5; ++i) { ASSERT_EQ(dst_ptr[i], array[i]); @@ -288,13 +291,13 @@ TEST(Tensor, SerializeAndDeserialize) { auto gpu_place = new platform::CUDAPlace(); platform::CUDADeviceContext gpu_ctx(*gpu_place); - Copy(src_tensor, *gpu_place, gpu_ctx, &gpu_tensor); + TensorCopy(src_tensor, *gpu_place, gpu_ctx, &gpu_tensor); std::ostringstream oss; - SerializeToStream(oss, gpu_tensor, gpu_ctx); + TensorToStream(oss, gpu_tensor, gpu_ctx); std::istringstream iss(oss.str()); - DeserializeFromStream(iss, &dst_tensor, gpu_ctx); + TensorFromStream(iss, &dst_tensor, gpu_ctx); int* dst_ptr = dst_tensor.mutable_data(platform::CPUPlace()); for (int i = 0; i < 6; ++i) { diff --git a/paddle/fluid/framework/tensor_util_test.cu b/paddle/fluid/framework/tensor_util_test.cu index 1982b642bc..d630ec44a2 100644 --- a/paddle/fluid/framework/tensor_util_test.cu +++ b/paddle/fluid/framework/tensor_util_test.cu @@ -31,7 +31,7 @@ static __global__ void FillInf(float* buf) { buf[2] = 0.5; } -TEST(HasNAN, GPU) { +TEST(TensorContainsNAN, GPU) { Tensor tensor; platform::CUDAPlace gpu(0); auto& pool = platform::DeviceContextPool::Instance(); @@ -39,10 +39,10 @@ TEST(HasNAN, GPU) { float* buf = tensor.mutable_data({3}, gpu); FillNAN<<<1, 1, 0, cuda_ctx->stream()>>>(buf); cuda_ctx->Wait(); - ASSERT_TRUE(HasNAN(tensor)); + ASSERT_TRUE(TensorContainsNAN(tensor)); } -TEST(HasInf, GPU) { +TEST(TensorContainsInf, GPU) { Tensor tensor; platform::CUDAPlace gpu(0); auto& pool = platform::DeviceContextPool::Instance(); @@ -50,7 +50,7 @@ TEST(HasInf, GPU) { float* buf = tensor.mutable_data({3}, gpu); FillInf<<<1, 1, 0, cuda_ctx->stream()>>>(buf); cuda_ctx->Wait(); - ASSERT_TRUE(HasInf(tensor)); + ASSERT_TRUE(TensorContainsInf(tensor)); } } // namespace framework diff --git a/paddle/fluid/framework/threadpool.h b/paddle/fluid/framework/threadpool.h index 606a93e13b..3adc260caf 100644 --- a/paddle/fluid/framework/threadpool.h +++ b/paddle/fluid/framework/threadpool.h @@ -64,7 +64,6 @@ class ThreadPool { Task task([fn]() -> std::unique_ptr { try { fn(); - return nullptr; } catch (platform::EnforceNotMet ex) { return std::unique_ptr( new platform::EnforceNotMet(ex)); @@ -73,6 +72,7 @@ class ThreadPool { << "Unexpected exception is catched in thread pool. All " "throwable exception in Fluid should be an EnforceNotMet."; } + return nullptr; }); std::future> f = task.get_future(); tasks_.push(std::move(task)); diff --git a/paddle/fluid/operators/array_operator.h b/paddle/fluid/operators/array_operator.h index d0fc153347..dbcc7abb09 100644 --- a/paddle/fluid/operators/array_operator.h +++ b/paddle/fluid/operators/array_operator.h @@ -42,7 +42,7 @@ class ArrayOp : public framework::OperatorBase { if (platform::is_gpu_place(i_tensor.place())) { // FIXME: Avoid copy from GPU to CPU framework::Tensor t; - framework::Copy(i_tensor, platform::CPUPlace(), dev_ctx, &t); + framework::TensorCopy(i_tensor, platform::CPUPlace(), dev_ctx, &t); dev_ctx.Wait(); offset = static_cast(*t.data()); } else { diff --git a/paddle/fluid/operators/array_to_lod_tensor_op.cc b/paddle/fluid/operators/array_to_lod_tensor_op.cc index f59bfad6cc..5db2e4540e 100644 --- a/paddle/fluid/operators/array_to_lod_tensor_op.cc +++ b/paddle/fluid/operators/array_to_lod_tensor_op.cc @@ -112,8 +112,8 @@ class ArrayToLoDTensorOp : public framework::OperatorBase { platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(place); - framework::Copy(x[x_idx].Slice(start_offset, end_offset), place, - dev_ctx, &slice); + framework::TensorCopy(x[x_idx].Slice(start_offset, end_offset), place, + dev_ctx, &slice); out_offset += len; } } diff --git a/paddle/fluid/operators/assign_op.cc b/paddle/fluid/operators/assign_op.cc index e21dc6d77f..39ae3c0040 100644 --- a/paddle/fluid/operators/assign_op.cc +++ b/paddle/fluid/operators/assign_op.cc @@ -45,7 +45,7 @@ class AssignFunctor { out_rows.set_height(rows.height()); auto &t = rows.value(); auto *m = out_rows.mutable_value(); - framework::Copy(t, t.place(), dev_ctx_, m); + framework::TensorCopy(t, t.place(), dev_ctx_, m); } template @@ -57,7 +57,7 @@ class AssignFunctor { void copy_tensor(const framework::LoDTensor &lod_tensor, framework::LoDTensor *out) const { auto &out_tensor = *out; - Copy(lod_tensor, lod_tensor.place(), dev_ctx_, &out_tensor); + TensorCopy(lod_tensor, lod_tensor.place(), dev_ctx_, &out_tensor); out_tensor.set_lod(lod_tensor.lod()); } diff --git a/paddle/fluid/operators/assign_value_op.h b/paddle/fluid/operators/assign_value_op.h index 90c9496a3c..d51b215a08 100644 --- a/paddle/fluid/operators/assign_value_op.h +++ b/paddle/fluid/operators/assign_value_op.h @@ -41,7 +41,7 @@ class AssignValueKernel : public framework::OpKernel { break; } auto values = ctx.Attr>(value_name); - framework::CopyFromVector(values, ctx.device_context(), out); + framework::TensorFromVector(values, ctx.device_context(), out); out->Resize(framework::make_ddim(shape)); } }; diff --git a/paddle/fluid/operators/beam_search_decode_op.h b/paddle/fluid/operators/beam_search_decode_op.h index 40147ce1eb..3cc6ed3105 100644 --- a/paddle/fluid/operators/beam_search_decode_op.h +++ b/paddle/fluid/operators/beam_search_decode_op.h @@ -232,12 +232,12 @@ void BeamSearchDecoder::ConvertSentenceVectorToLodTensor( id_tensor->set_lod(lod); id_tensor->Resize({static_cast(id_data.size())}); id_tensor->mutable_data(paddle::platform::CPUPlace()); - framework::CopyFromVector(id_data, cpu_ctx, id_tensor); + framework::TensorFromVector(id_data, cpu_ctx, id_tensor); score_tensor->set_lod(lod); score_tensor->Resize({static_cast(score_data.size())}); score_tensor->mutable_data(paddle::platform::CPUPlace()); - framework::CopyFromVector(score_data, cpu_ctx, score_tensor); + framework::TensorFromVector(score_data, cpu_ctx, score_tensor); } template diff --git a/paddle/fluid/operators/detection_output_op.h b/paddle/fluid/operators/detection_output_op.h index 0aa5fc010d..af9081c934 100644 --- a/paddle/fluid/operators/detection_output_op.h +++ b/paddle/fluid/operators/detection_output_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -Indicesou may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + Indicesou may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #pragma once #include "paddle/fluid/framework/op_registry.h" @@ -98,16 +98,16 @@ class DetectionOutputKernel : public framework::OpKernel { T* conf_data = conf_tensor.data(); if (platform::is_gpu_place(context.GetPlace())) { loc_cpu.mutable_data(loc_tensor.dims(), platform::CPUPlace()); - framework::Copy(loc_tensor, platform::CPUPlace(), - context.device_context(), &loc_cpu); + framework::TensorCopy(loc_tensor, platform::CPUPlace(), + context.device_context(), &loc_cpu); loc_data = loc_cpu.data(); conf_cpu.mutable_data(conf_tensor.dims(), platform::CPUPlace()); - framework::Copy(conf_tensor, platform::CPUPlace(), - context.device_context(), &conf_cpu); + framework::TensorCopy(conf_tensor, platform::CPUPlace(), + context.device_context(), &conf_cpu); conf_data = conf_cpu.data(); priorbox_cpu.mutable_data(in_priorbox->dims(), platform::CPUPlace()); - framework::Copy(*in_priorbox, platform::CPUPlace(), - context.device_context(), &priorbox_cpu); + framework::TensorCopy(*in_priorbox, platform::CPUPlace(), + context.device_context(), &priorbox_cpu); priorbox_data = priorbox_cpu.data(); } // get decode bboxes @@ -158,8 +158,8 @@ class DetectionOutputKernel : public framework::OpKernel { batch_size, all_indices, all_decoded_bboxes, out_data); if (platform::is_gpu_place(context.GetPlace())) { - framework::Copy(out_cpu, platform::CUDAPlace(), context.device_context(), - out); + framework::TensorCopy(out_cpu, platform::CUDAPlace(), + context.device_context(), out); } } }; diff --git a/paddle/fluid/operators/expand_op.h b/paddle/fluid/operators/expand_op.h index 953d75adae..2c2d5c7c42 100644 --- a/paddle/fluid/operators/expand_op.h +++ b/paddle/fluid/operators/expand_op.h @@ -126,7 +126,8 @@ class ExpandGradKernel : public framework::OpKernel { auto* in0 = context.Input(framework::GradVarName("Out")); auto* out0 = context.Output(framework::GradVarName("X")); out0->mutable_data(context.GetPlace()); - framework::Copy(*in0, context.GetPlace(), context.device_context(), out0); + framework::TensorCopy(*in0, context.GetPlace(), context.device_context(), + out0); } else { switch (dims) { REP_EXPAND_GRAD_TEMPLATE(72) diff --git a/paddle/fluid/operators/feed_op.cc b/paddle/fluid/operators/feed_op.cc index 438d975429..90c31877f6 100644 --- a/paddle/fluid/operators/feed_op.cc +++ b/paddle/fluid/operators/feed_op.cc @@ -57,7 +57,7 @@ class FeedOp : public framework::OperatorBase { if (platform::is_same_place(feed_item.place(), place)) { out_item->ShareDataWith(feed_item); } else { - framework::Copy(feed_item, place, dev_ctx, out_item); + framework::TensorCopy(feed_item, place, dev_ctx, out_item); } out_item->set_lod(feed_item.lod()); } diff --git a/paddle/fluid/operators/fetch_op.cc b/paddle/fluid/operators/fetch_op.cc index 2684e64634..d66f01d1b7 100644 --- a/paddle/fluid/operators/fetch_op.cc +++ b/paddle/fluid/operators/fetch_op.cc @@ -56,7 +56,7 @@ class FetchOp : public framework::OperatorBase { platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(src_item.place()); - Copy(src_item, platform::CPUPlace(), dev_ctx, &dst_item); + TensorCopy(src_item, platform::CPUPlace(), dev_ctx, &dst_item); dev_ctx.Wait(); dst_item.set_lod(src_item.lod()); diff --git a/paddle/fluid/operators/fill_op.cc b/paddle/fluid/operators/fill_op.cc index c505c739d4..3b4b409231 100644 --- a/paddle/fluid/operators/fill_op.cc +++ b/paddle/fluid/operators/fill_op.cc @@ -74,7 +74,7 @@ class FillOp : public framework::OperatorBase { platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(place); - framework::Copy(tensor, place, dev_ctx, &out); + framework::TensorCopy(tensor, place, dev_ctx, &out); } } }; diff --git a/paddle/fluid/operators/layer_norm_op.h b/paddle/fluid/operators/layer_norm_op.h index 84f5a40aac..605b5c258c 100644 --- a/paddle/fluid/operators/layer_norm_op.h +++ b/paddle/fluid/operators/layer_norm_op.h @@ -196,7 +196,7 @@ class LayerNormGradKernel : public framework::OpKernel { // dy_dx ElementwiseComputeEx, DeviceContext, T>( ctx, &d_y, scale, /*axis*/ 1, MulFunctor(), &temp); - framework::Copy(temp, ctx.GetPlace(), ctx.device_context(), d_x); + framework::TensorCopy(temp, ctx.GetPlace(), ctx.device_context(), d_x); // dy_dmean_dx row_mean(dev_ctx, temp, &temp_vec); @@ -208,7 +208,7 @@ class LayerNormGradKernel : public framework::OpKernel { ctx, &temp, &temp_norm, /*axis*/ 0, MulFunctor(), &temp); } else { // dy_dx - framework::Copy(d_y, ctx.GetPlace(), ctx.device_context(), d_x); + framework::TensorCopy(d_y, ctx.GetPlace(), ctx.device_context(), d_x); // dy_dmean_dx row_mean(dev_ctx, d_y, &temp_vec); diff --git a/paddle/fluid/operators/load_combine_op.cc b/paddle/fluid/operators/load_combine_op.cc index ba8fc4a683..e5353144e9 100644 --- a/paddle/fluid/operators/load_combine_op.cc +++ b/paddle/fluid/operators/load_combine_op.cc @@ -69,7 +69,7 @@ class LoadCombineOp : public framework::OperatorBase { out_var->Clear(); tensor = out_var->GetMutable(); tensor->set_lod(cpu_tensor.lod()); - Copy(cpu_tensor, place, dev_ctx, tensor); + TensorCopy(cpu_tensor, place, dev_ctx, tensor); } } } diff --git a/paddle/fluid/operators/load_op.cc b/paddle/fluid/operators/load_op.cc index d72b7a7eb9..05f809ac56 100644 --- a/paddle/fluid/operators/load_op.cc +++ b/paddle/fluid/operators/load_op.cc @@ -55,7 +55,7 @@ class LoadOp : public framework::OperatorBase { out_var->Clear(); tensor = out_var->GetMutable(); tensor->set_lod(cpu_tensor.lod()); - Copy(cpu_tensor, place, dev_ctx, tensor); + TensorCopy(cpu_tensor, place, dev_ctx, tensor); } } }; diff --git a/paddle/fluid/operators/lod_reset_op.h b/paddle/fluid/operators/lod_reset_op.h index e612bc2d36..8186d4f826 100644 --- a/paddle/fluid/operators/lod_reset_op.h +++ b/paddle/fluid/operators/lod_reset_op.h @@ -33,8 +33,8 @@ class LoDResetKernel : public framework::OpKernel { auto* lod = lod_t->data(); if (platform::is_gpu_place(ctx.GetPlace())) { framework::Tensor lod_cpu; - framework::Copy(*lod_t, platform::CPUPlace(), ctx.device_context(), - &lod_cpu); + framework::TensorCopy(*lod_t, platform::CPUPlace(), + ctx.device_context(), &lod_cpu); lod = lod_cpu.data(); } level0 = std::vector(lod, lod + lod_t->numel()); diff --git a/paddle/fluid/operators/lod_tensor_to_array_op.cc b/paddle/fluid/operators/lod_tensor_to_array_op.cc index b5e778a581..543495ce4e 100644 --- a/paddle/fluid/operators/lod_tensor_to_array_op.cc +++ b/paddle/fluid/operators/lod_tensor_to_array_op.cc @@ -94,9 +94,9 @@ class LoDTensorToArrayOp : public framework::OperatorBase { platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(place); - framework::Copy(x.Slice(static_cast(each_range.begin), - static_cast(each_range.end)), - x.place(), dev_ctx, &slice); + framework::TensorCopy(x.Slice(static_cast(each_range.begin), + static_cast(each_range.end)), + x.place(), dev_ctx, &slice); offset += len; } } diff --git a/paddle/fluid/operators/math/context_project.h b/paddle/fluid/operators/math/context_project.h index 83f6ae45fc..4da94383af 100644 --- a/paddle/fluid/operators/math/context_project.h +++ b/paddle/fluid/operators/math/context_project.h @@ -149,7 +149,8 @@ class ContextProjectFunctor { Tensor out_t_sub = out_t.Slice(k * context_length, k * context_length + padding_size); Tensor w_sub = padding_data.Slice(k, k + padding_size); - framework::Copy(w_sub, context.GetPlace(), context, &out_t_sub); + framework::TensorCopy(w_sub, context.GetPlace(), context, + &out_t_sub); } } if (down_pad > 0) { // add down pad @@ -179,7 +180,8 @@ class ContextProjectFunctor { (down_pad_begin_row + t) * context_length); Tensor w_sub = padding_data.Slice( up_pad + padding_idx, up_pad + padding_idx + padding_size); - framework::Copy(w_sub, context.GetPlace(), context, &out_t_sub); + framework::TensorCopy(w_sub, context.GetPlace(), context, + &out_t_sub); } } out_t.Resize({sequence_height, context_length * sequence_width}); diff --git a/paddle/fluid/operators/math/im2col_test.cc b/paddle/fluid/operators/math/im2col_test.cc index 3051925315..b3978536bc 100644 --- a/paddle/fluid/operators/math/im2col_test.cc +++ b/paddle/fluid/operators/math/im2col_test.cc @@ -62,7 +62,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - Copy(input_tmp, *place, *context, &input); + TensorCopy(input_tmp, *place, *context, &input); } output_cfo.mutable_data( {1, filter_size, filter_size, output_height, output_width}, *place); @@ -87,7 +87,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { out_cfo_ptr = output_cfo.data(); } else { - Copy(output_cfo, paddle::platform::CPUPlace(), *context, &output_tmp); + TensorCopy(output_cfo, paddle::platform::CPUPlace(), *context, &output_tmp); out_cfo_ptr = output_tmp.data(); } for (int i = 0; i < 6; ++i) { @@ -98,7 +98,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { out_ocf_ptr = output_ocf.data(); } else { - Copy(output_ocf, paddle::platform::CPUPlace(), *context, &output_tmp); + TensorCopy(output_ocf, paddle::platform::CPUPlace(), *context, &output_tmp); out_ocf_ptr = output_tmp.data(); } @@ -119,7 +119,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - Copy(input_tmp, *place, *context, &input); + TensorCopy(input_tmp, *place, *context, &input); } col2im(*context, output_cfo, dilation, stride, padding, &input); @@ -128,7 +128,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { in_ptr = input.data(); } else { - Copy(input, paddle::platform::CPUPlace(), *context, &input_tmp); + TensorCopy(input, paddle::platform::CPUPlace(), *context, &input_tmp); in_ptr = input_tmp.data(); } for (int i = 0; i < 6; ++i) { @@ -140,7 +140,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - Copy(input_tmp, *place, *context, &input); + TensorCopy(input_tmp, *place, *context, &input); } col2im_ocf(*context, output_ocf, dilation, stride, padding, &input); @@ -148,7 +148,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { in_ptr = input.data(); } else { - Copy(input, paddle::platform::CPUPlace(), *context, &input_tmp); + TensorCopy(input, paddle::platform::CPUPlace(), *context, &input_tmp); in_ptr = input_tmp.data(); } for (int i = 0; i < 6; ++i) { diff --git a/paddle/fluid/operators/math/math_function_test.cu b/paddle/fluid/operators/math/math_function_test.cu index f333c6c98e..207d6a87bc 100644 --- a/paddle/fluid/operators/math/math_function_test.cu +++ b/paddle/fluid/operators/math/math_function_test.cu @@ -29,15 +29,15 @@ TEST(math_function, notrans_mul_trans) { auto* gpu_place = new paddle::platform::CUDAPlace(0); paddle::platform::CUDADeviceContext context(*gpu_place); - paddle::framework::Copy(input1, *gpu_place, context, &input1_gpu); - paddle::framework::Copy(input1, *gpu_place, context, &input2_gpu); + paddle::framework::TensorCopy(input1, *gpu_place, context, &input1_gpu); + paddle::framework::TensorCopy(input1, *gpu_place, context, &input2_gpu); out_gpu.mutable_data({2, 2}, *gpu_place); paddle::operators::math::matmul( context, input1_gpu, false, input2_gpu, true, 1, &out_gpu, 0); - paddle::framework::Copy(out_gpu, *cpu_place, context, &out); + paddle::framework::TensorCopy(out_gpu, *cpu_place, context, &out); float* out_ptr = out.data(); context.Wait(); @@ -63,15 +63,15 @@ TEST(math_function, trans_mul_notrans) { auto* gpu_place = new paddle::platform::CUDAPlace(0); paddle::platform::CUDADeviceContext context(*gpu_place); - paddle::framework::Copy(input1, *gpu_place, context, &input1_gpu); - paddle::framework::Copy(input1, *gpu_place, context, &input2_gpu); + paddle::framework::TensorCopy(input1, *gpu_place, context, &input1_gpu); + paddle::framework::TensorCopy(input1, *gpu_place, context, &input2_gpu); out_gpu.mutable_data({3, 3}, *gpu_place); paddle::operators::math::matmul( context, input1_gpu, true, input2_gpu, false, 1, &out_gpu, 0); - paddle::framework::Copy(out_gpu, *cpu_place, context, &out); + paddle::framework::TensorCopy(out_gpu, *cpu_place, context, &out); float* out_ptr = out.data(); context.Wait(); @@ -112,9 +112,9 @@ TEST(math_function, gemm_notrans_cublas) { auto* gpu_place = new paddle::platform::CUDAPlace(0); paddle::platform::CUDADeviceContext context(*gpu_place); - paddle::framework::Copy(input1, *gpu_place, context, &input1_gpu); - paddle::framework::Copy(input2, *gpu_place, context, &input2_gpu); - paddle::framework::Copy(input3, *gpu_place, context, &input3_gpu); + paddle::framework::TensorCopy(input1, *gpu_place, context, &input1_gpu); + paddle::framework::TensorCopy(input2, *gpu_place, context, &input2_gpu); + paddle::framework::TensorCopy(input3, *gpu_place, context, &input3_gpu); float* a = input1_gpu.data(); float* b = input2_gpu.data(); float* c = input3_gpu.mutable_data(*gpu_place); @@ -122,7 +122,7 @@ TEST(math_function, gemm_notrans_cublas) { paddle::operators::math::gemm( context, false, false, m, n, k, 1, a, 3, b + 1, 4, 1, c + 1, 4); - paddle::framework::Copy(input3_gpu, *cpu_place, context, &input3); + paddle::framework::TensorCopy(input3_gpu, *cpu_place, context, &input3); // numpy code: // a = np.arange(6).reshape(2, 3) @@ -167,9 +167,9 @@ TEST(math_function, gemm_trans_cublas) { auto* gpu_place = new paddle::platform::CUDAPlace(0); paddle::platform::CUDADeviceContext context(*gpu_place); - paddle::framework::Copy(input1, *gpu_place, context, &input1_gpu); - paddle::framework::Copy(input2, *gpu_place, context, &input2_gpu); - paddle::framework::Copy(input3, *gpu_place, context, &input3_gpu); + paddle::framework::TensorCopy(input1, *gpu_place, context, &input1_gpu); + paddle::framework::TensorCopy(input2, *gpu_place, context, &input2_gpu); + paddle::framework::TensorCopy(input3, *gpu_place, context, &input3_gpu); float* a = input1_gpu.data(); float* b = input2_gpu.data(); float* c = input3_gpu.mutable_data(*gpu_place); @@ -177,7 +177,7 @@ TEST(math_function, gemm_trans_cublas) { paddle::operators::math::gemm( context, false, true, m, n, k, 1, a, 3, b + 3, 3, 1, c + 1, 4); - paddle::framework::Copy(input3_gpu, *cpu_place, context, &input3); + paddle::framework::TensorCopy(input3_gpu, *cpu_place, context, &input3); context.Wait(); EXPECT_EQ(input3_ptr[0], 0); @@ -218,15 +218,15 @@ void GemvTest(int m, int n, bool trans) { } paddle::platform::CUDADeviceContext context(*gpu_place); - paddle::framework::Copy(mat_a, *gpu_place, context, &g_mat_a); - paddle::framework::Copy(vec_b, *gpu_place, context, &g_vec_b); + paddle::framework::TensorCopy(mat_a, *gpu_place, context, &g_mat_a); + paddle::framework::TensorCopy(vec_b, *gpu_place, context, &g_vec_b); paddle::operators::math::gemv( context, trans, static_cast(m), static_cast(n), 1., g_data_a, g_data_b, 0., g_data_c); - paddle::framework::Copy(g_vec_c, paddle::platform::CPUPlace(), context, - &vec_c); + paddle::framework::TensorCopy(g_vec_c, paddle::platform::CPUPlace(), context, + &vec_c); if (!trans) { for (int i = 0; i < m; ++i) { diff --git a/paddle/fluid/operators/math/selected_rows_functor_test.cu b/paddle/fluid/operators/math/selected_rows_functor_test.cu index cefe239bd2..942d9b13fc 100644 --- a/paddle/fluid/operators/math/selected_rows_functor_test.cu +++ b/paddle/fluid/operators/math/selected_rows_functor_test.cu @@ -67,7 +67,7 @@ TEST(selected_rows_functor, gpu_add) { EXPECT_EQ(out_rows[6], 9); Tensor out_cpu; - Copy(*out_value, cpu_place, ctx, &out_cpu); + TensorCopy(*out_value, cpu_place, ctx, &out_cpu); ctx.Wait(); auto* out_cpu_data = out_cpu.data(); @@ -94,7 +94,7 @@ TEST(selected_rows_functor, gpu_add) { add_tensor_functor(ctx, *output, *tensor1, tensor2.get()); Tensor tensor2_cpu; - Copy(*tensor2, cpu_place, ctx, &tensor2_cpu); + TensorCopy(*tensor2, cpu_place, ctx, &tensor2_cpu); ctx.Wait(); auto* tensor2_cpu_data = tensor2_cpu.data(); @@ -167,7 +167,7 @@ TEST(selected_rows_functor, gpu_add_to) { EXPECT_EQ(out_rows[6], 9); Tensor out_cpu; - Copy(*out_value, cpu_place, ctx, &out_cpu); + TensorCopy(*out_value, cpu_place, ctx, &out_cpu); ctx.Wait(); auto* out_cpu_data = out_cpu.data(); @@ -191,7 +191,7 @@ TEST(selected_rows_functor, gpu_add_to) { add_to_tensor_functor(ctx, *output, tensor1.get()); Tensor tensor1_cpu; - Copy(*tensor1, cpu_place, ctx, &tensor1_cpu); + TensorCopy(*tensor1, cpu_place, ctx, &tensor1_cpu); ctx.Wait(); auto* tensor1_cpu_data = tensor1_cpu.data(); diff --git a/paddle/fluid/operators/math/sequence_padding.cu b/paddle/fluid/operators/math/sequence_padding.cu index 9eb52f6fd9..c044e6fc32 100644 --- a/paddle/fluid/operators/math/sequence_padding.cu +++ b/paddle/fluid/operators/math/sequence_padding.cu @@ -97,7 +97,7 @@ class PaddingLoDTensorFunctor { "width of sequence in LoDTensor seq."); if (!norm_by_times && num_sequences == 1UL) { - Copy(seq, context.GetPlace(), context, &padding); + TensorCopy(seq, context.GetPlace(), context, &padding); padding.Resize(padding_dims); return; } @@ -172,7 +172,7 @@ class UnpaddingLoDTensorFunctor { "width of sequence in LoDTensor seq."); if (!norm_by_times && num_sequences == 1UL) { - Copy(padding, context.GetPlace(), context, &seq); + TensorCopy(padding, context.GetPlace(), context, &seq); seq.Resize(seq_dims); return; } diff --git a/paddle/fluid/operators/math/sequence_padding_test.cc b/paddle/fluid/operators/math/sequence_padding_test.cc index e1177fb0d7..bece46e753 100644 --- a/paddle/fluid/operators/math/sequence_padding_test.cc +++ b/paddle/fluid/operators/math/sequence_padding_test.cc @@ -40,7 +40,7 @@ void TestSequencePadding(const paddle::framework::LoD& lod, if (paddle::platform::is_cpu_place(*place)) { seq = cpu_seq; } else { - Copy(cpu_seq, *place, *context, &seq); + TensorCopy(cpu_seq, *place, *context, &seq); seq.set_lod(lod); } @@ -63,7 +63,7 @@ void TestSequencePadding(const paddle::framework::LoD& lod, if (paddle::platform::is_cpu_place(*place)) { cpu_seq_back = seq_back; } else { - Copy(seq_back, paddle::platform::CPUPlace(), *context, &cpu_seq_back); + TensorCopy(seq_back, paddle::platform::CPUPlace(), *context, &cpu_seq_back); cpu_seq_back.set_lod(lod); } diff --git a/paddle/fluid/operators/math/vol2col_test.cc b/paddle/fluid/operators/math/vol2col_test.cc index 751d3ef19a..eb91f862e3 100644 --- a/paddle/fluid/operators/math/vol2col_test.cc +++ b/paddle/fluid/operators/math/vol2col_test.cc @@ -71,7 +71,7 @@ void testVol2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - Copy(input_tmp, *place, *context, &input); + paddle::framework::TensorCopy(input_tmp, *place, *context, &input); } output.mutable_data({1, filter_size, filter_size, filter_size, output_depth, output_height, output_width}, @@ -85,7 +85,7 @@ void testVol2col() { if (paddle::platform::is_cpu_place(*place)) { out_cfo_ptr = output.data(); } else { - Copy(output, paddle::platform::CPUPlace(), *context, &output_tmp); + TensorCopy(output, paddle::platform::CPUPlace(), *context, &output_tmp); out_cfo_ptr = output_tmp.data(); } @@ -99,7 +99,7 @@ void testVol2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - Copy(input_tmp, *place, *context, &input); + TensorCopy(input_tmp, *place, *context, &input); } paddle::operators::math::Col2VolFunctor col2vol; @@ -109,7 +109,7 @@ void testVol2col() { if (paddle::platform::is_cpu_place(*place)) { in_ptr = input.data(); } else { - Copy(input, paddle::platform::CPUPlace(), *context, &input_tmp); + TensorCopy(input, paddle::platform::CPUPlace(), *context, &input_tmp); in_ptr = input_tmp.data(); } diff --git a/paddle/fluid/operators/merge_lod_tensor_op.cc b/paddle/fluid/operators/merge_lod_tensor_op.cc index 42ebc8e471..4ebf20cbba 100644 --- a/paddle/fluid/operators/merge_lod_tensor_op.cc +++ b/paddle/fluid/operators/merge_lod_tensor_op.cc @@ -51,7 +51,8 @@ class MergeLoDTensorOp : public framework::OperatorBase { cpu_mask->ShareDataWith(mask); } else if (platform::is_gpu_place(mask.place())) { #ifdef PADDLE_WITH_CUDA - framework::Copy(mask, platform::CPUPlace(), dev_ctx, cpu_mask.get()); + framework::TensorCopy(mask, platform::CPUPlace(), dev_ctx, + cpu_mask.get()); #else PADDLE_THROW("Not supported GPU, Please compile WITH_GPU option"); #endif @@ -106,8 +107,8 @@ class MergeLoDTensorOp : public framework::OperatorBase { continue; } auto slice = out->Slice(out_offset, out_offset + len); - framework::Copy(input->Slice(start_offset, end_offset), place, dev_ctx, - &slice); + framework::TensorCopy(input->Slice(start_offset, end_offset), place, + dev_ctx, &slice); out_offset += len; (*in_idx) += 1; } diff --git a/paddle/fluid/operators/mine_hard_examples_op.cc b/paddle/fluid/operators/mine_hard_examples_op.cc index 2128979fae..b7e9f4e224 100644 --- a/paddle/fluid/operators/mine_hard_examples_op.cc +++ b/paddle/fluid/operators/mine_hard_examples_op.cc @@ -67,7 +67,8 @@ class MineHardExamplesKernel : public framework::OpKernel { auto out_match_indices = ctx.Output("UpdatedMatchIndices"); - framework::Copy(*in_matched_indices, ctx.GetPlace(), out_match_indices); + framework::TensorCopy(*in_matched_indices, ctx.GetPlace(), + out_match_indices); int batch_size = in_matched_indices->dims()[0]; int prior_num = in_matched_indices->dims()[1]; diff --git a/paddle/fluid/operators/multiplex_op.cu b/paddle/fluid/operators/multiplex_op.cu index cb89eeecfb..45a2550793 100644 --- a/paddle/fluid/operators/multiplex_op.cu +++ b/paddle/fluid/operators/multiplex_op.cu @@ -33,7 +33,7 @@ class MultiplexGPUKernel : public framework::OpKernel { auto cols = ins[0]->numel() / rows; // copy index to cpu Tensor index_t_cpu; - Copy(*ids, platform::CPUPlace(), ctx.device_context(), &index_t_cpu); + TensorCopy(*ids, platform::CPUPlace(), ctx.device_context(), &index_t_cpu); auto* index = index_t_cpu.data(); auto stream = ctx.cuda_device_context().stream(); platform::CUDAPlace place = boost::get(ctx.GetPlace()); @@ -69,7 +69,7 @@ class MultiplexGradGPUKernel : public framework::OpKernel { auto cols = ins[0]->numel() / rows; // copy index to cpu Tensor index_t_cpu; - Copy(*ids, platform::CPUPlace(), ctx.device_context(), &index_t_cpu); + TensorCopy(*ids, platform::CPUPlace(), ctx.device_context(), &index_t_cpu); auto* index = index_t_cpu.data(); auto stream = ctx.cuda_device_context().stream(); diff --git a/paddle/fluid/operators/nccl_op_test.cu.cc b/paddle/fluid/operators/nccl_op_test.cu.cc index 24e30f54a1..b4021a5dac 100644 --- a/paddle/fluid/operators/nccl_op_test.cu.cc +++ b/paddle/fluid/operators/nccl_op_test.cu.cc @@ -98,7 +98,7 @@ class NCCLTester : public ::testing::Test { send_tensor->mutable_data(kDims, place); std::vector send_vector(f::product(kDims), gpu_id); - paddle::framework::CopyFromVector(send_vector, *ctx, send_tensor); + paddle::framework::TensorFromVector(send_vector, *ctx, send_tensor); ctx->Wait(); VLOG(1) << "Send Tensor filled with elements " << send_tensor->numel(); } diff --git a/paddle/fluid/operators/parallel_do_op.cc b/paddle/fluid/operators/parallel_do_op.cc index 88c83ee213..b21f9937ef 100644 --- a/paddle/fluid/operators/parallel_do_op.cc +++ b/paddle/fluid/operators/parallel_do_op.cc @@ -78,7 +78,7 @@ inline void CopyOrShare(const framework::Variable &src, dst->GetMutable()->ShareDataWith(src.Get()); dst->GetMutable()->set_lod(src.Get().lod()); } else { - Copy(src.Get(), dst_place, dst->GetMutable()); + TensorCopy(src.Get(), dst_place, dst->GetMutable()); } } else if (src.IsType()) { auto &src_sr = src.Get(); @@ -88,7 +88,7 @@ inline void CopyOrShare(const framework::Variable &src, dst_sr->mutable_value()->ShareDataWith(src_sr.value()); dst_sr->set_rows(src_sr.rows()); } else { - Copy(src_sr.value(), dst_place, dst_sr->mutable_value()); + TensorCopy(src_sr.value(), dst_place, dst_sr->mutable_value()); } } else { PADDLE_THROW("Expect LoDTensor/SelectedRows, get %s", src.Type().name()); @@ -146,7 +146,7 @@ class ParallelDoOp : public framework::OperatorBase { auto &place = places[i]; auto *sub_scope = sub_scopes[i]; auto *dst = sub_scope->Var(param)->GetMutable(); - framework::Copy(src, place, dst); + framework::TensorCopy(src, place, dst); } } WaitOnPlaces(places); diff --git a/paddle/fluid/operators/print_op.cc b/paddle/fluid/operators/print_op.cc index 7fa2b060af..fc09b4aa1d 100644 --- a/paddle/fluid/operators/print_op.cc +++ b/paddle/fluid/operators/print_op.cc @@ -179,7 +179,7 @@ class TensorPrintOp : public framework::OperatorBase { } else { // copy data to cpu to print platform::CPUPlace place; - framework::Copy(in_tensor, place, &printed_tensor); + framework::TensorCopy(in_tensor, place, &printed_tensor); } Formater formater; diff --git a/paddle/fluid/operators/recurrent_op.cc b/paddle/fluid/operators/recurrent_op.cc index 8435d6bcf0..00241e7682 100644 --- a/paddle/fluid/operators/recurrent_op.cc +++ b/paddle/fluid/operators/recurrent_op.cc @@ -291,7 +291,7 @@ class RecurrentOp : public RecurrentBase { auto dst_out = dst_tensor->Slice(seq_offset, seq_offset + 1); // Explicit copy output since the local RNN scope can be destroyed // early. - framework::Copy(src_tensor, place, dev_ctx, &dst_out); + framework::TensorCopy(src_tensor, place, dev_ctx, &dst_out); }); scopes.Next(); @@ -378,7 +378,7 @@ class RecurrentGradOp : public RecurrentBase { auto *cur_grad_var = cur_scope.Var(cur_grad); auto cur_grad_tensor = cur_grad_var->GetMutable(); - framework::Copy(ex_tensor, place, dev_ctx, cur_grad_tensor); + framework::TensorCopy(ex_tensor, place, dev_ctx, cur_grad_tensor); } } @@ -452,7 +452,7 @@ class RecurrentGradOp : public RecurrentBase { } auto dst = outside->Slice(seq_offset, seq_offset + 1); - framework::Copy(inside, place, dev_ctx, &dst); + framework::TensorCopy(inside, place, dev_ctx, &dst); }); VLOG(5) << "Link outside gradient finished "; @@ -465,7 +465,7 @@ class RecurrentGradOp : public RecurrentBase { framework::LoDTensor *outside) { outside->Resize(inside.dims()); outside->mutable_data(place, inside.type()); - framework::Copy(inside, place, dev_ctx, outside); + framework::TensorCopy(inside, place, dev_ctx, outside); }); VLOG(5) << "Link initialize state gradient finished "; } diff --git a/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc b/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc index b0df932f43..5c3e1f5678 100644 --- a/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc +++ b/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc @@ -170,7 +170,7 @@ class ReorderLoDTensorByRankTableBase : public framework::OperatorBase { platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(place); - framework::Copy(x_sliced, out_sliced.place(), dev_ctx, &out_sliced); + framework::TensorCopy(x_sliced, out_sliced.place(), dev_ctx, &out_sliced); out_offset += len; return out_offset; } diff --git a/paddle/fluid/operators/reshape_op.h b/paddle/fluid/operators/reshape_op.h index c01100ef4d..1357bce4b7 100644 --- a/paddle/fluid/operators/reshape_op.h +++ b/paddle/fluid/operators/reshape_op.h @@ -28,7 +28,7 @@ class ReshapeKernel : public framework::OpKernel { auto* in = ctx.Input("X"); auto out_dims = out->dims(); out->mutable_data(ctx.GetPlace()); - framework::Copy(*in, ctx.GetPlace(), ctx.device_context(), out); + framework::TensorCopy(*in, ctx.GetPlace(), ctx.device_context(), out); out->Resize(out_dims); } }; @@ -42,7 +42,7 @@ class ReshapeGradKernel : public framework::OpKernel { d_x->mutable_data(ctx.GetPlace()); auto in_dims = d_x->dims(); - framework::Copy(*d_out, ctx.GetPlace(), ctx.device_context(), d_x); + framework::TensorCopy(*d_out, ctx.GetPlace(), ctx.device_context(), d_x); d_x->Resize(in_dims); } }; diff --git a/paddle/fluid/operators/sequence_reshape_op.h b/paddle/fluid/operators/sequence_reshape_op.h index f0b5be0218..2893808ee9 100644 --- a/paddle/fluid/operators/sequence_reshape_op.h +++ b/paddle/fluid/operators/sequence_reshape_op.h @@ -61,7 +61,7 @@ class SequenceReshapeKernel : public framework::OpKernel { } } - framework::Copy(*in, context.GetPlace(), out); + framework::TensorCopy(*in, context.GetPlace(), out); out->Resize({static_cast(out->lod()[0].back()), out_width}); } }; @@ -77,7 +77,7 @@ class SequenceReshapeGradKernel : public framework::OpKernel { context.Output(framework::GradVarName("X")); xg_tensor_ptr->mutable_data(context.GetPlace()); - framework::Copy(*outg_tensor_ptr, context.GetPlace(), xg_tensor_ptr); + framework::TensorCopy(*outg_tensor_ptr, context.GetPlace(), xg_tensor_ptr); xg_tensor_ptr->Resize(x_tensor_ptr->dims()); } }; diff --git a/paddle/fluid/operators/sequence_slice_op.h b/paddle/fluid/operators/sequence_slice_op.h index 4f6d70483e..b9c565cac9 100644 --- a/paddle/fluid/operators/sequence_slice_op.h +++ b/paddle/fluid/operators/sequence_slice_op.h @@ -66,13 +66,13 @@ class SequenceSliceOpKernel : public framework::OpKernel { if (platform::is_gpu_place(ctx.GetPlace())) { offset_cpu.mutable_data(offset->dims(), platform::CPUPlace()); - framework::Copy(*offset, platform::CPUPlace(), ctx.device_context(), - &offset_cpu); + framework::TensorCopy(*offset, platform::CPUPlace(), ctx.device_context(), + &offset_cpu); offset_data = offset_cpu.data(); length_cpu.mutable_data(length->dims(), platform::CPUPlace()); - framework::Copy(*length, platform::CPUPlace(), ctx.device_context(), - &length_cpu); + framework::TensorCopy(*length, platform::CPUPlace(), ctx.device_context(), + &length_cpu); length_data = length_cpu.data(); } @@ -127,13 +127,13 @@ class SequenceSliceGradOpKernel : public framework::OpKernel { if (platform::is_gpu_place(ctx.GetPlace())) { offset_cpu.mutable_data(offset->dims(), platform::CPUPlace()); - framework::Copy(*offset, platform::CPUPlace(), ctx.device_context(), - &offset_cpu); + framework::TensorCopy(*offset, platform::CPUPlace(), ctx.device_context(), + &offset_cpu); offset_data = offset_cpu.data(); length_cpu.mutable_data(length->dims(), platform::CPUPlace()); - framework::Copy(*length, platform::CPUPlace(), ctx.device_context(), - &length_cpu); + framework::TensorCopy(*length, platform::CPUPlace(), ctx.device_context(), + &length_cpu); length_data = length_cpu.data(); } diff --git a/paddle/fluid/operators/shrink_rnn_memory_op.cc b/paddle/fluid/operators/shrink_rnn_memory_op.cc index 183982f90f..a1871a8e7f 100644 --- a/paddle/fluid/operators/shrink_rnn_memory_op.cc +++ b/paddle/fluid/operators/shrink_rnn_memory_op.cc @@ -133,7 +133,7 @@ class ShrinkRNNMemoryGradOp : public ArrayOp { auto &dout_tensor = dout_var->Get(); auto height = dout_tensor.dims()[0]; auto slice = dx_tensor.Slice(0, static_cast(height)); - framework::Copy(dout_tensor, dout_tensor.place(), dev_ctx, &slice); + framework::TensorCopy(dout_tensor, dout_tensor.place(), dev_ctx, &slice); if (dx_tensor.dims()[0] > height) { auto rest_tensor = dx_tensor.Slice( static_cast(height), static_cast(dx_tensor.dims()[0])); diff --git a/paddle/fluid/operators/split_lod_tensor_op.cc b/paddle/fluid/operators/split_lod_tensor_op.cc index 1c5d647600..3222cce239 100644 --- a/paddle/fluid/operators/split_lod_tensor_op.cc +++ b/paddle/fluid/operators/split_lod_tensor_op.cc @@ -55,7 +55,8 @@ class SplitLoDTensorOp : public framework::OperatorBase { cpu_mask->ShareDataWith(mask); } else if (platform::is_gpu_place(mask.place())) { #ifdef PADDLE_WITH_CUDA - framework::Copy(mask, platform::CPUPlace(), dev_ctx, cpu_mask.get()); + framework::TensorCopy(mask, platform::CPUPlace(), dev_ctx, + cpu_mask.get()); #else PADDLE_THROW("Not supported GPU, Please compile WITH_GPU option"); #endif @@ -113,9 +114,9 @@ class SplitLoDTensorOp : public framework::OperatorBase { // out[offset: offset+len] = x[each_range.begin: each_range.end] auto slice = out->Slice(static_cast(offset), static_cast(offset + len)); - framework::Copy(x.Slice(static_cast(each_range.begin), - static_cast(each_range.end)), - x.place(), dev_ctx, &slice); + framework::TensorCopy(x.Slice(static_cast(each_range.begin), + static_cast(each_range.end)), + x.place(), dev_ctx, &slice); offset += len; } } diff --git a/paddle/fluid/operators/sum_op.h b/paddle/fluid/operators/sum_op.h index c9f22237d9..48b2d2779a 100644 --- a/paddle/fluid/operators/sum_op.h +++ b/paddle/fluid/operators/sum_op.h @@ -137,8 +137,8 @@ class SumKernel : public framework::OpKernel { out_array.resize(i + 1); } if (out_array[i].numel() == 0) { - framework::Copy(in_array[i], in_array[i].place(), - context.device_context(), &out_array[i]); + framework::TensorCopy(in_array[i], in_array[i].place(), + context.device_context(), &out_array[i]); out_array[i].set_lod(in_array[i].lod()); } else { PADDLE_ENFORCE(out_array[i].lod() == in_array[i].lod()); diff --git a/paddle/fluid/operators/tensor_array_read_write_op.cc b/paddle/fluid/operators/tensor_array_read_write_op.cc index 9b484cda12..2636812c42 100644 --- a/paddle/fluid/operators/tensor_array_read_write_op.cc +++ b/paddle/fluid/operators/tensor_array_read_write_op.cc @@ -45,7 +45,7 @@ class WriteToArrayOp : public ArrayOp { platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(place); - Copy(x_tensor, place, dev_ctx, out_tensor); + TensorCopy(x_tensor, place, dev_ctx, out_tensor); out_tensor->set_lod(x_tensor.lod()); } else { VLOG(10) << "WARNING: The input tensor 'x_tensor' holds no memory, so " @@ -138,7 +138,7 @@ class ReadFromArrayOp : public ArrayOp { platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(place); - framework::Copy(x_array[offset], place, dev_ctx, out_tensor); + framework::TensorCopy(x_array[offset], place, dev_ctx, out_tensor); out_tensor->set_lod(x_array[offset].lod()); } else { VLOG(10) << "offset " << offset << " >= " << x_array.size(); diff --git a/paddle/fluid/operators/warpctc_op.h b/paddle/fluid/operators/warpctc_op.h index aefb58bdcd..3e3e308931 100644 --- a/paddle/fluid/operators/warpctc_op.h +++ b/paddle/fluid/operators/warpctc_op.h @@ -185,7 +185,8 @@ class WarpCTCKernel : public framework::OpKernel { // warpctc accesses labels in CPU memory Tensor warpctc_label; - Copy(*label, platform::CPUPlace(), ctx.device_context(), &warpctc_label); + TensorCopy(*label, platform::CPUPlace(), ctx.device_context(), + &warpctc_label); const int* warpctc_label_data = warpctc_label.data(); // warpctc stores loss in CPU memory Tensor warpctc_loss; @@ -200,7 +201,7 @@ class WarpCTCKernel : public framework::OpKernel { sequence_width, num_sequences, blank, warpctc_loss_data); // Copy the loss back - Copy(warpctc_loss, ctx.GetPlace(), ctx.device_context(), loss); + TensorCopy(warpctc_loss, ctx.GetPlace(), ctx.device_context(), loss); } }; diff --git a/paddle/fluid/pybind/tensor_py.h b/paddle/fluid/pybind/tensor_py.h index 7e7fb554ac..1b0916ea03 100644 --- a/paddle/fluid/pybind/tensor_py.h +++ b/paddle/fluid/pybind/tensor_py.h @@ -101,7 +101,7 @@ T TensorGetElement(framework::Tensor &self, size_t offset) { return self.data()[offset]; } else { std::shared_ptr dst(new framework::Tensor); - framework::Copy(self, platform::CPUPlace(), dst.get()); + framework::TensorCopy(self, platform::CPUPlace(), dst.get()); return dst->data()[offset]; } } @@ -111,9 +111,9 @@ template void TensorSetElement(framework::Tensor &self, size_t offset, T elem) { if (platform::is_gpu_place(self.place())) { std::shared_ptr dst(new framework::Tensor); - framework::Copy(self, platform::CPUPlace(), dst.get()); + framework::TensorCopy(self, platform::CPUPlace(), dst.get()); dst->data()[offset] = elem; - framework::Copy(*dst.get(), self.place(), &self); + framework::TensorCopy(*dst.get(), self.place(), &self); } else if (platform::is_cpu_place(self.place())) { self.data()[offset] = elem; -- GitLab From ca126fcab7fdb37b96590f28f3a798657abab726 Mon Sep 17 00:00:00 2001 From: TomorrowIsAnOtherDay <2466956298@qq.com> Date: Thu, 15 Feb 2018 02:52:27 +0800 Subject: [PATCH 115/217] add python API for one_hot OP (#8444) * add python API for one_hot OP * fix code style * fix code style_2 --- python/paddle/v2/fluid/layers/nn.py | 38 +++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index 5f1842f5fb..d1ac6583dd 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -68,6 +68,7 @@ __all__ = [ 'layer_norm', 'softmax_with_cross_entropy', 'smooth_l1', + 'one_hot', ] @@ -3212,3 +3213,40 @@ def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None): 'Out': loss}, attrs={'sigma': sigma}) return loss + + +def one_hot(input, depth): + """ + One Hot Operator. This operator creates the one-hot representations for input + index values. The following example will help to explain the function of this + operator. + + Args: + input(Tensor/LodTensor): A Tensor/LodTensor of indices, last dimension must be 1. + depth(scalar): an interger defining the depth of the one hot dimension. + + Returns: + The one-hot tensor or LodTensor, same as input. + + Examples: + X is a LoDTensor: + X.lod = [[0, 1, 4]] + X.shape = [4, 1] + X.data = [[1], [1], [3], [0]] + set depth = 4 + Out is a LoDTensor: + Out.lod = [[0, 1, 4]] + Out.shape = [4, 4] + Out.data = [[0., 1., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 0., 1.], + [1., 0., 0., 0.]] + """ + helper = LayerHelper("one_hot", **locals()) + one_hot_out = helper.create_tmp_variable(dtype='float32') + helper.append_op( + type="one_hot", + inputs={'X': input}, + attrs={'depth': depth}, + outputs={'Out': one_hot_out}) + return one_hot_out -- GitLab From 9890bb5987739402f7f2cba76230e7967f8230ed Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Wed, 14 Feb 2018 11:16:36 -0800 Subject: [PATCH 116/217] Explain why we choose an old version of boost (#7967) --- cmake/external/boost.cmake | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cmake/external/boost.cmake b/cmake/external/boost.cmake index 906bed2f04..d9cd264b49 100644 --- a/cmake/external/boost.cmake +++ b/cmake/external/boost.cmake @@ -15,6 +15,13 @@ include(ExternalProject) set(BOOST_PROJECT "extern_boost") +# To release PaddlePaddle as a pip package, we have to follow the +# manylinux1 standard, which features as old Linux kernels and +# compilers as possible and recommends CentOS 5. Indeed, the earliest +# CentOS version that works with NVIDIA CUDA is CentOS 6. And a new +# version of boost, say, 1.66.0, doesn't build on CentOS 6. We +# checked that the devtools package of CentOS 6 installs boost 1.41.0. +# So we use 1.41.0 here. set(BOOST_VER "1.41.0") set(BOOST_TAR "boost_1_41_0") set(BOOST_URL "http://paddlepaddledeps.s3-website-us-west-1.amazonaws.com/${BOOST_TAR}.tar.gz") -- GitLab From 8b24bd4fe851a793521f88630f56e9d4afcf8263 Mon Sep 17 00:00:00 2001 From: "Yang Yang(Tony)" Date: Wed, 14 Feb 2018 11:56:46 -0800 Subject: [PATCH 117/217] Update parallel_do.md --- doc/design/parallel_do.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/doc/design/parallel_do.md b/doc/design/parallel_do.md index d51b1014d4..221af6b6a4 100644 --- a/doc/design/parallel_do.md +++ b/doc/design/parallel_do.md @@ -13,7 +13,7 @@ AddInput(kParameters, "Parameters are duplicated over different devices") AddInput(kPlaces, "Devices used for parallel processing"); AddOutput(kOutputs, "Outputs needed to be merged from different devices").AsDuplicable(); AddOutput(kParallelScopes, - "Container for all local variables in forward pass."); + "Scopes for all local variables in forward pass. One scope for each device"); AddAttr(kParallelBlock, "List of operaters to be executed in parallel"); ``` @@ -33,6 +33,7 @@ In the backward pass |||| Compute backward pass in parallel | accumulate param@grad from different devices to the first device | Merge input@grad from different devices +  | Copy param@grad to the place of parallel_do_op ``` This implementation allows to write mixed device program like this @@ -47,7 +48,7 @@ pd = ParallelDo(gpu_places) with pd.do(): read_input(feature) prediction = my_net(feature) - write_output(activation) + write_output(prediction) prediction = pd() loss = cross_entropy(prediction, label) ``` @@ -98,7 +99,7 @@ looks like this. ```python pd = ParallelDo(gpu_places) with pd.do(): - feature = pre_fetch(gpu_places) +    feature = get_data_from_prefetch_queue(gpu_places) prediction = my_net(feature) write_output(activation) ``` -- GitLab From 9d26f1a3dfd03bbce40c52f0195d2ebba9ced22b Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Thu, 15 Feb 2018 02:08:33 +0000 Subject: [PATCH 118/217] callback to list of callbacks --- paddle/fluid/framework/executor.cc | 3 +-- python/paddle/v2/fluid/backward.py | 33 +++++++++++++++++------------ python/paddle/v2/fluid/optimizer.py | 2 +- 3 files changed, 22 insertions(+), 16 deletions(-) diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index 92b32b04d6..93a4883368 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -120,12 +120,11 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id, for (auto& op_desc : block.AllOps()) { auto op = paddle::framework::OpRegistry::CreateOp(*op_desc); - // VLOG(3) << op->DebugStringEx(local_scope); + VLOG(3) << place_ << " " << op->DebugStringEx(local_scope); platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); platform::RecordEvent record_event(op->Type(), pool.Get(place_)); - VLOG(3) << op->Type(); op->Run(*local_scope, place_); if (FLAGS_benchmark) { VLOG(2) << "Memory used after operator " + op->Type() + " running: " diff --git a/python/paddle/v2/fluid/backward.py b/python/paddle/v2/fluid/backward.py index 682df3301b..4e494db93b 100644 --- a/python/paddle/v2/fluid/backward.py +++ b/python/paddle/v2/fluid/backward.py @@ -234,7 +234,6 @@ def _callback_lookup_(op): "ncclInit", {"parallel_scopes": self.parallel_scopes_name}, {"Communicator": ['nccl_com__do_not_change_']}, {}) - print(serialize_op_decs(op_desc)) block.program.global_block().desc.append_op().copy_from( op_desc) self.has_inserted_nccl_init = True @@ -285,9 +284,7 @@ def _append_backward_ops_(block, val(str): corresponding forward variable name callback(callable object): a callable object used to decorate new generated grad ops """ - if callbacks is None: - callbacks = [] - else: + if callbacks is not None: assert (isinstance(callbacks, list)) for cb in callbacks: if not hasattr(cb, '__call__'): @@ -302,12 +299,17 @@ def _append_backward_ops_(block, if op.has_attr("sub_block"): sub_block = program.block(op.block_attr("sub_block")) grad_sub_block = program.create_block(parent_idx=sub_block.idx) - if callbacks is None: - callbacks = [_callback_lookup_(op)] + cb = _callback_lookup_(op) + if cb is not None: + if callbacks is None: + new_callbacks = [cb] + else: + new_callbacks = callbacks + [_callback_lookup_(op)] + _append_backward_ops_(sub_block, sub_block.ops, grad_sub_block, + no_grad_dict, grad_to_var, new_callbacks) else: - callbacks.append(_callback_lookup_(op)) - _append_backward_ops_(sub_block, sub_block.ops, grad_sub_block, - no_grad_dict, grad_to_var, callbacks) + _append_backward_ops_(sub_block, sub_block.ops, grad_sub_block, + no_grad_dict, grad_to_var, callbacks) grad_sub_block_list.append(grad_sub_block.desc) # Getting op's corresponding grad_op @@ -327,8 +329,10 @@ def _append_backward_ops_(block, new_op_desc = target_block.desc.append_op() new_op_desc.copy_from(op_desc) grad_to_var["__current_op_desc__"] = new_op_desc - for cb in callbacks: - cb(block=target_block, context=grad_to_var) + if callbacks is not None: + assert (isinstance(callbacks, list)) + for cb in callbacks: + cb(block=target_block, context=grad_to_var) def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map): @@ -408,7 +412,8 @@ def _get_stop_gradients_(program): return no_grad_dict -def append_backward(loss, parameter_list=None, no_grad_set=None, callback=None): +def append_backward(loss, parameter_list=None, no_grad_set=None, + callbacks=None): """ Append backward part to main_program @@ -424,6 +429,8 @@ def append_backward(loss, parameter_list=None, no_grad_set=None, callback=None): (list[(Variable,Variable)]): list of (parameter, gradient) pair. """ assert isinstance(loss, framework.Variable) + if callbacks is not None: + isinstance(callbacks, list) program = loss.block.program if no_grad_set is None: @@ -451,7 +458,7 @@ def append_backward(loss, parameter_list=None, no_grad_set=None, callback=None): no_grad_dict[0].update(map(_append_grad_suffix_, block_no_grad_set)) _append_backward_ops_(root_block, op_path, root_block, no_grad_dict, - grad_to_var, callback) + grad_to_var, callbacks) # Because calc_gradient may be called multiple times, # we need rename the internal gradient variables so that they have diff --git a/python/paddle/v2/fluid/optimizer.py b/python/paddle/v2/fluid/optimizer.py index 39391eb8e4..ecc42f6215 100644 --- a/python/paddle/v2/fluid/optimizer.py +++ b/python/paddle/v2/fluid/optimizer.py @@ -225,7 +225,7 @@ class Optimizer(object): `create_optimization_pass()` into one. """ params_grads = append_backward(loss, parameter_list, no_grad_set, - error_clip_callback) + [error_clip_callback]) params_grads = append_gradient_clip_ops(params_grads) -- GitLab From 501f4bd2d08adcb0f9b03dca4c034c5e54556305 Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Tue, 13 Feb 2018 21:05:07 -0800 Subject: [PATCH 119/217] Add a script to bisect to a culprit commit. I expect that we need it to: 1. Find flaky test commit. 2. Find regression commit. Disable test is dangerous. We should first try to find the culprit commit and fix it or revert it. I managed to find 3d1ac72a (between 279aa62 and 6773129) that causes the recent failure of test_rnn_encoder_decoder. --- tools/continuous_integration/bisect.py | 141 +++++++++++++++++++++++++ 1 file changed, 141 insertions(+) create mode 100644 tools/continuous_integration/bisect.py diff --git a/tools/continuous_integration/bisect.py b/tools/continuous_integration/bisect.py new file mode 100644 index 0000000000..21a46e5cef --- /dev/null +++ b/tools/continuous_integration/bisect.py @@ -0,0 +1,141 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A script to bisect the mainline commits and find the culprit commit. +# The default 'git bisect' checks feature branches, which is not desired +# because commits in feature branch might not pass tests or compile. +# +# Example: +# python ../bisect.py --git_dir=$PWD/../Paddle --build_dir=$PWD \ +# --good_commit=3647ed6 --bad_commit=279aa6 \ +# --test_target=test_rnn_encoder_decoder + +import argparse +import os +import subprocess +import sys + +parser = argparse.ArgumentParser(description=__doc__) +parser.add_argument( + '--git_dir', type=str, default='', help='git repo root directory.') +parser.add_argument( + '--build_dir', type=str, default='', help='build directory.') +parser.add_argument( + '--good_commit', + type=str, + default='', + help='The old commit known to be good.') +parser.add_argument( + '--bad_commit', + type=str, + default='', + help='The new commit known to be bad.') +parser.add_argument( + '--test_target', type=str, default='', help='The test target to evaluate.') +parser.add_argument( + '--bisect_branch', + type=str, + default='develop', + help='The mainline branch to bisect (feature branch ignored.') +parser.add_argument( + '--log_file', type=str, default='', help='The file use to log outputs.') +parser.add_argument( + '--test_times', + type=int, + default=10, + help="Number of times to run the test target.") +parser.add_argument( + '--build_parallel', type=int, default=32, help="make parallelism.") +args = parser.parse_args() + +if not args.log_file: + args.log_file = '/tmp/%s...%s.log' % (args.good_commit, args.bad_commit) + + +def print_arguments(): + print('----------- Configuration Arguments -----------') + for arg, value in sorted(vars(args).iteritems()): + print('%s: %s' % (arg, value)) + print('------------------------------------------------') + + +print_arguments() + +# List the commits in mainline branch. +os.chdir(args.git_dir) +ret = subprocess.check_output( + [ + 'git rev-list --first-parent %s...%s' % (args.good_commit, + args.bad_commit) + ], + shell=True) +sys.stdout.write('commits found:\n%s\n' % ret) +commits = ret.strip().split('\n') +os.chdir(args.build_dir) +# Clean up previous logs. +subprocess.check_output(['echo "" > %s' % args.log_file], shell=True) + +last_culprit = '' +while True: + # Get to the mainline branch and clean up + os.chdir(args.git_dir) + subprocess.check_output( + [ + 'git checkout %s && git clean -fd && git checkout .' % + args.bisect_branch + ], + shell=True) + + if not commits: + sys.stdout.write('no commits to bisect\n') + exit() + # checkout the picked branch. + pick_idx = len(commits) / 2 + pick = commits[pick_idx] + os.chdir(args.git_dir) + subprocess.check_output(['git checkout %s' % pick], shell=True) + + # Clean builds and compile. + # We assume mainline commits should always compile. + os.chdir(args.build_dir) + sys.stdout.write('eval commit %d/%d: %s\n' % (pick_idx, len(commits), pick)) + # Link error can happen without complete clean up. + cmd = ('rm -rf * && ' + 'cmake -DWITH_TESTING=ON %s >> %s && make -j%s >> %s' % + (args.git_dir, args.log_file, args.build_parallel, args.log_file)) + sys.stdout.write('cmd: %s\n' % cmd) + try: + subprocess.check_output([cmd], shell=True) + except subprocess.CalledProcessError as e: + sys.stderr.write('failed to build commit: %s\n%s\n' % (pick, e)) + exit() + # test the selected branch. + passed = True + try: + cmd = ('ctest --repeat-until-fail %s -R %s >> %s' % + (args.test_times, args.test_target, args.log_file)) + sys.stdout.write('cmd: %s\n' % cmd) + subprocess.check_output([cmd], shell=True) + except subprocess.CalledProcessError as e: + passed = False + last_culprit = pick + sys.stdout.write('eval %s passed: %s\n' % (pick, passed)) + if passed: + if pick_idx == 0: break + commits = commits[:pick_idx] + else: + if pick_idx + 1 >= len(commits): break + commits = commits[pick_idx + 1:] + +sys.stdout.write('Culprit commit: %s\n' % last_culprit) -- GitLab From 6752b06f8cc6d67f00a6bafb39a164d1ffd39322 Mon Sep 17 00:00:00 2001 From: emailweixu Date: Thu, 15 Feb 2018 14:14:21 -0800 Subject: [PATCH 120/217] Generating random numbers with given batch size (#8337) * Generating random numbers with given batch size uniform_random_batch_size_like_op gaussian_random_batch_size_like_op * More comments about random seed. * Move test_*_random_batch_size_like_op to unittests --- paddle/fluid/operators/CMakeLists.txt | 14 ++++ paddle/fluid/operators/batch_size_like.cc | 64 ++++++++++++++++ paddle/fluid/operators/batch_size_like.h | 36 +++++++++ .../fill_constant_batch_size_like_op.cc | 52 ++----------- .../gaussian_random_batch_size_like_op.cc | 73 +++++++++++++++++++ paddle/fluid/operators/gaussian_random_op.cc | 10 ++- paddle/fluid/operators/gaussian_random_op.cu | 6 +- .../uniform_random_batch_size_like_op.cc | 72 ++++++++++++++++++ paddle/fluid/operators/uniform_random_op.cc | 9 ++- paddle/fluid/operators/uniform_random_op.cu | 3 + python/paddle/v2/fluid/layers/ops.py | 3 + .../v2/fluid/tests/unittests/op_test.py | 18 ++++- ...test_gaussian_random_batch_size_like_op.py | 46 ++++++++++++ .../test_uniform_random_batch_size_like_op.py | 42 +++++++++++ .../tests/unittests/test_uniform_random_op.py | 46 ++++-------- 15 files changed, 409 insertions(+), 85 deletions(-) create mode 100644 paddle/fluid/operators/batch_size_like.cc create mode 100644 paddle/fluid/operators/batch_size_like.h create mode 100644 paddle/fluid/operators/gaussian_random_batch_size_like_op.cc create mode 100644 paddle/fluid/operators/uniform_random_batch_size_like_op.cc create mode 100644 python/paddle/v2/fluid/tests/unittests/test_gaussian_random_batch_size_like_op.py create mode 100644 python/paddle/v2/fluid/tests/unittests/test_uniform_random_batch_size_like_op.py diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index cadfd735d7..8f14fd376a 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -176,6 +176,20 @@ op_library(pool_op SRCS pool_op.cc DEPS pooling) op_library(conv_transpose_op SRCS conv_transpose_op.cc DEPS vol2col) endif() +cc_library(batch_size_like SRCS batch_size_like.cc DEPS op_registry) + +op_library(fill_constant_batch_size_like_op + SRCS fill_constant_batch_size_like_op.cc fill_constant_batch_size_like_op.cu.cc + DEPS batch_size_like) + +op_library(uniform_random_batch_size_like_op + SRCS uniform_random_batch_size_like_op.cc + DEPS batch_size_like uniform_random_op) + +op_library(gaussian_random_batch_size_like_op + SRCS gaussian_random_batch_size_like_op.cc + DEPS batch_size_like gaussian_random_op) + # FIXME(typhoonzero): save/load depends lodtensor serialization functions op_library(save_op DEPS lod_tensor) op_library(load_op DEPS lod_tensor) diff --git a/paddle/fluid/operators/batch_size_like.cc b/paddle/fluid/operators/batch_size_like.cc new file mode 100644 index 0000000000..4d4a6d4c47 --- /dev/null +++ b/paddle/fluid/operators/batch_size_like.cc @@ -0,0 +1,64 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/batch_size_like.h" + +namespace paddle { +namespace operators { + +void BatchSizeLikeOp::InferShape(framework::InferShapeContext *ctx) const { + PADDLE_ENFORCE(ctx->HasInput("Input"), + "Input(Input) of %s should not be null.", Type()); + PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of %s should not be null.", + Type()); + + auto &shape = ctx->Attrs().Get>("shape"); + PADDLE_ENFORCE_GT(shape.size(), 0); + std::vector shape_int64(shape.size(), 0); + std::transform(shape.begin(), shape.end(), shape_int64.begin(), + [](int a) { return static_cast(a); }); + auto output_dim = framework::make_ddim(shape_int64); + + int input_dim_idx = ctx->Attrs().Get("input_dim_idx"); + PADDLE_ENFORCE_GE(input_dim_idx, 0); + PADDLE_ENFORCE_GT(ctx->GetInputDim("Input").size(), input_dim_idx); + + int output_dim_idx = ctx->Attrs().Get("output_dim_idx"); + PADDLE_ENFORCE_GE(output_dim_idx, 0); + PADDLE_ENFORCE_GT(static_cast(shape.size()), output_dim_idx); + + output_dim[output_dim_idx] = ctx->GetInputDim("Input")[input_dim_idx]; + ctx->SetOutputDim("Out", output_dim); +} + +BatchSizeLikeOpMaker::BatchSizeLikeOpMaker(OpProto *proto, + OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("Input", + "(Tensor) Tensor " + "whose input_dim_idx'th dimension specifies the batch_size"); + AddOutput("Out", + "(Tensor) Tensor of specified shape will be filled " + "with the specified value"); + AddAttr>("shape", "(vector) The shape of the output"); + AddAttr("input_dim_idx", + "(int, default 0) The index of input's batch size dimension") + .SetDefault(0); + AddAttr("output_dim_idx", + "(int, default 0) The index of output's batch size dimension") + .SetDefault(0); +} + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/batch_size_like.h b/paddle/fluid/operators/batch_size_like.h new file mode 100644 index 0000000000..87e8f053a7 --- /dev/null +++ b/paddle/fluid/operators/batch_size_like.h @@ -0,0 +1,36 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" + +namespace paddle { +namespace operators { + +class BatchSizeLikeOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext *ctx) const override; +}; + +class BatchSizeLikeOpMaker : public framework::OpProtoAndCheckerMaker { + public: + BatchSizeLikeOpMaker(OpProto *proto, OpAttrChecker *op_checker); +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/fill_constant_batch_size_like_op.cc b/paddle/fluid/operators/fill_constant_batch_size_like_op.cc index a36248531e..55eca71c8b 100644 --- a/paddle/fluid/operators/fill_constant_batch_size_like_op.cc +++ b/paddle/fluid/operators/fill_constant_batch_size_like_op.cc @@ -13,42 +13,14 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/fill_constant_batch_size_like_op.h" +#include "paddle/fluid/operators/batch_size_like.h" namespace paddle { namespace operators { -class FillConstantBatchSizeLikeOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE( - ctx->HasInput("Input"), - "Input(Input) of FillConstantBatchSizeLikeOp should not be null."); - PADDLE_ENFORCE( - ctx->HasOutput("Out"), - "Output(Out) of FillConstantBatchSizeLikeOp should not be null."); - - auto &shape = ctx->Attrs().Get>("shape"); - PADDLE_ENFORCE_GT(shape.size(), 0); - std::vector shape_int64(shape.size(), 0); - std::transform(shape.begin(), shape.end(), shape_int64.begin(), - [](int a) { return static_cast(a); }); - auto output_dim = framework::make_ddim(shape_int64); - - int input_dim_idx = ctx->Attrs().Get("input_dim_idx"); - PADDLE_ENFORCE_GE(input_dim_idx, 0); - PADDLE_ENFORCE_GT(ctx->GetInputDim("Input").size(), input_dim_idx); - - int output_dim_idx = ctx->Attrs().Get("output_dim_idx"); - PADDLE_ENFORCE_GE(output_dim_idx, 0); - PADDLE_ENFORCE_GT(static_cast(shape.size()), output_dim_idx); - - output_dim[output_dim_idx] = ctx->GetInputDim("Input")[input_dim_idx]; - ctx->SetOutputDim("Out", output_dim); - } - +class FillConstantBatchSizeLikeOp : public BatchSizeLikeOp { protected: + using BatchSizeLikeOp::BatchSizeLikeOp; framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext &ctx) const override { return framework::OpKernelType( @@ -57,28 +29,14 @@ class FillConstantBatchSizeLikeOp : public framework::OperatorWithKernel { } }; -class FillConstantBatchSizeLikeOpMaker - : public framework::OpProtoAndCheckerMaker { +class FillConstantBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker { public: FillConstantBatchSizeLikeOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + : BatchSizeLikeOpMaker(proto, op_checker) { AddAttr("dtype", "(int, default 5 (FP32)) " "Output data type") .SetDefault(framework::proto::DataType::FP32); - AddInput("Input", - "(Tensor) Tensor " - "whose dim_idx th dimension is used to specify the batch_size"); - AddOutput("Out", - "(Tensor) Tensor of specified shape will be filled " - "with the specified value"); - AddAttr>("shape", "(vector) The shape of the output"); - AddAttr("input_dim_idx", - "(int, default 0) The index of input's batch size dimension") - .SetDefault(0); - AddAttr("output_dim_idx", - "(int, default 0) The index of output's batch size dimension") - .SetDefault(0); AddAttr("value", "(float, default 0) The value to be filled") .SetDefault(0.0f); AddComment(R"DOC( diff --git a/paddle/fluid/operators/gaussian_random_batch_size_like_op.cc b/paddle/fluid/operators/gaussian_random_batch_size_like_op.cc new file mode 100644 index 0000000000..ac516986ad --- /dev/null +++ b/paddle/fluid/operators/gaussian_random_batch_size_like_op.cc @@ -0,0 +1,73 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/operators/batch_size_like.h" + +namespace paddle { +namespace operators { + +class GaussianRandomBatchSizeLikeOp : public BatchSizeLikeOp { + protected: + using BatchSizeLikeOp::BatchSizeLikeOp; + + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + return framework::OpKernelType( + static_cast(ctx.Attr("dtype")), + ctx.GetPlace()); + } +}; + +class GaussianRandomBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker { + public: + GaussianRandomBatchSizeLikeOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : BatchSizeLikeOpMaker(proto, op_checker) { + AddAttr("mean", + "(float, default 0.0) " + "mean of random tensor.") + .SetDefault(.0f); + AddAttr("std", + "(float, default 1.0) " + "std of random tensor.") + .SetDefault(1.0f); + AddAttr("seed", + "(int, default 0) " + "Random seed of generator." + "0 means use system wide seed." + "Note that if seed is not 0, this operator will always " + "generate the same random numbers every time.") + .SetDefault(0); + AddAttr("dtype", + "(int, default 5(FP32)) " + "Output data type.") + .SetDefault(framework::proto::DataType::FP32); + + AddComment(R"DOC( +GaussianRandom Operator. + +Used to initialize tensors with gaussian random generator. +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +REGISTER_OP_WITHOUT_GRADIENT( + gaussian_random_batch_size_like, + paddle::operators::GaussianRandomBatchSizeLikeOp, + paddle::operators::GaussianRandomBatchSizeLikeOpMaker); +// Kernels are registered in gaussian_random_op.cc and gaussian_random_op.cu diff --git a/paddle/fluid/operators/gaussian_random_op.cc b/paddle/fluid/operators/gaussian_random_op.cc index cf3a528bdd..7fb2b2c230 100644 --- a/paddle/fluid/operators/gaussian_random_op.cc +++ b/paddle/fluid/operators/gaussian_random_op.cc @@ -88,7 +88,9 @@ class GaussianRandomOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("seed", "(int, default 0) " "Random seed of generator." - "0 means use system wide seed.") + "0 means use system wide seed." + "Note that if seed is not 0, this operator will always " + "generate the same random numbers every time.") .SetDefault(0); AddAttr("dtype", "(int, default 5(FP32)) " @@ -110,4 +112,8 @@ Used to initialize tensors with gaussian random generator. namespace ops = paddle::operators; REGISTER_OP_WITHOUT_GRADIENT(gaussian_random, ops::GaussianRandomOp, ops::GaussianRandomOpMaker); -REGISTER_OP_CPU_KERNEL(gaussian_random, ops::CPUGaussianRandomKernel); +REGISTER_OP_CPU_KERNEL(gaussian_random, ops::CPUGaussianRandomKernel, + ops::CPUGaussianRandomKernel); +REGISTER_OP_CPU_KERNEL(gaussian_random_batch_size_like, + ops::CPUGaussianRandomKernel, + ops::CPUGaussianRandomKernel); diff --git a/paddle/fluid/operators/gaussian_random_op.cu b/paddle/fluid/operators/gaussian_random_op.cu index 7340590c3e..7784856417 100644 --- a/paddle/fluid/operators/gaussian_random_op.cu +++ b/paddle/fluid/operators/gaussian_random_op.cu @@ -61,4 +61,8 @@ class GPUGaussianRandomKernel : public framework::OpKernel { } // namespace paddle REGISTER_OP_CUDA_KERNEL(gaussian_random, - paddle::operators::GPUGaussianRandomKernel); + paddle::operators::GPUGaussianRandomKernel, + paddle::operators::GPUGaussianRandomKernel); +REGISTER_OP_CUDA_KERNEL(gaussian_random_batch_size_like, + paddle::operators::GPUGaussianRandomKernel, + paddle::operators::GPUGaussianRandomKernel); diff --git a/paddle/fluid/operators/uniform_random_batch_size_like_op.cc b/paddle/fluid/operators/uniform_random_batch_size_like_op.cc new file mode 100644 index 0000000000..fa31dad513 --- /dev/null +++ b/paddle/fluid/operators/uniform_random_batch_size_like_op.cc @@ -0,0 +1,72 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/operators/batch_size_like.h" + +namespace paddle { +namespace operators { + +class UniformRandomBatchSizeLikeOp : public BatchSizeLikeOp { + protected: + using BatchSizeLikeOp::BatchSizeLikeOp; + + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + return framework::OpKernelType( + static_cast(ctx.Attr("dtype")), + ctx.GetPlace()); + } +}; + +class UniformRandomBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker { + public: + UniformRandomBatchSizeLikeOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : BatchSizeLikeOpMaker(proto, op_checker) { + AddComment(R"DOC( +Uniform random operator + +This operator initializes a tensor with the same batch_size as the Input tensor + with random values sampled from a uniform distribution. + +)DOC"); + AddAttr("min", + "(float, default -1.0) " + "Minimum value of uniform random") + .SetDefault(-1.0f); + AddAttr("max", + "(float, default 1.0) " + "Maximun value of uniform random") + .SetDefault(1.0f); + AddAttr("seed", + "(int, default 0) " + "Random seed used for generating samples. " + "0 means use a seed generated by the system." + "Note that if seed is not 0, this operator will always " + "generate the same random numbers every time.") + .SetDefault(0); + AddAttr("dtype", "(int, default 5(FP32)) Output tensor data type") + .SetDefault(framework::proto::DataType::FP32); + } +}; + +} // namespace operators +} // namespace paddle + +REGISTER_OP_WITHOUT_GRADIENT( + uniform_random_batch_size_like, + paddle::operators::UniformRandomBatchSizeLikeOp, + paddle::operators::UniformRandomBatchSizeLikeOpMaker); +// Kernels are registered in uniform_random_op.cc and uniform_random_op.cu diff --git a/paddle/fluid/operators/uniform_random_op.cc b/paddle/fluid/operators/uniform_random_op.cc index 6c0167deab..3a0a0d6fca 100644 --- a/paddle/fluid/operators/uniform_random_op.cc +++ b/paddle/fluid/operators/uniform_random_op.cc @@ -79,7 +79,7 @@ class UniformRandomOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( Uniform random operator. -This operator initializes a tensor with random values sampled from a +This operator initializes a tensor with random values sampled from a uniform distribution. )DOC"); @@ -96,7 +96,9 @@ uniform distribution. AddAttr("seed", "(int, default 0) " "Random seed used for generating samples. " - "0 means use a seed generated by the system.") + "0 means use a seed generated by the system." + "Note that if seed is not 0, this operator will always " + "generate the same random numbers every time.") .SetDefault(0); AddAttr("dtype", "(int, default 5(FP32)) Output tensor data type") .SetDefault(framework::proto::DataType::FP32); @@ -110,3 +112,6 @@ REGISTER_OP_WITHOUT_GRADIENT(uniform_random, paddle::operators::UniformRandomOp, REGISTER_OP_CPU_KERNEL(uniform_random, paddle::operators::CPUUniformRandomKernel, paddle::operators::CPUUniformRandomKernel); +REGISTER_OP_CPU_KERNEL(uniform_random_batch_size_like, + paddle::operators::CPUUniformRandomKernel, + paddle::operators::CPUUniformRandomKernel); diff --git a/paddle/fluid/operators/uniform_random_op.cu b/paddle/fluid/operators/uniform_random_op.cu index 877d81d5c4..1232cd1eb3 100644 --- a/paddle/fluid/operators/uniform_random_op.cu +++ b/paddle/fluid/operators/uniform_random_op.cu @@ -66,3 +66,6 @@ class GPUUniformRandomKernel : public framework::OpKernel { REGISTER_OP_CUDA_KERNEL(uniform_random, paddle::operators::GPUUniformRandomKernel, paddle::operators::GPUUniformRandomKernel); +REGISTER_OP_CUDA_KERNEL(uniform_random_batch_size_like, + paddle::operators::GPUUniformRandomKernel, + paddle::operators::GPUUniformRandomKernel); diff --git a/python/paddle/v2/fluid/layers/ops.py b/python/paddle/v2/fluid/layers/ops.py index 28265a57e6..0b88b63962 100644 --- a/python/paddle/v2/fluid/layers/ops.py +++ b/python/paddle/v2/fluid/layers/ops.py @@ -66,6 +66,9 @@ __all__ = [ 'logical_xor', 'logical_not', 'uniform_random', + 'uniform_random_batch_size_like', + 'gaussian_random', + 'gaussian_random_batch_size_like', 'cumsum', ] + __activations__ diff --git a/python/paddle/v2/fluid/tests/unittests/op_test.py b/python/paddle/v2/fluid/tests/unittests/op_test.py index 940e2bfb16..4761811f0a 100644 --- a/python/paddle/v2/fluid/tests/unittests/op_test.py +++ b/python/paddle/v2/fluid/tests/unittests/op_test.py @@ -248,7 +248,11 @@ class OpTest(unittest.TestCase): return feed_map - def check_output_with_place(self, place, atol): + def calc_output(self, place): + outs, _ = self._calc_output(place) + return outs + + def _calc_output(self, place): op_proto = OpProtoHolder.instance().get_op_proto(self.op_type) program = Program() @@ -281,7 +285,10 @@ class OpTest(unittest.TestCase): feed=feed_map, fetch_list=fetch_list, return_numpy=False) + return outs, fetch_list + def check_output_with_place(self, place, atol): + outs, fetch_list = self._calc_output(place) for out_name, out_dup in Operator.get_op_outputs(self.op_type): if out_name not in self.outputs: continue @@ -340,6 +347,15 @@ class OpTest(unittest.TestCase): for place in places: self.check_output_with_place(place, atol) + def check_output_customized(self, checker): + places = [core.CPUPlace()] + if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type): + places.append(core.CUDAPlace(0)) + for place in places: + outs = self.calc_output(place) + outs = [np.array(out) for out in outs] + checker(outs) + def __assert_is_close(self, numeric_grads, analytic_grads, names, max_relative_error, msg_prefix): diff --git a/python/paddle/v2/fluid/tests/unittests/test_gaussian_random_batch_size_like_op.py b/python/paddle/v2/fluid/tests/unittests/test_gaussian_random_batch_size_like_op.py new file mode 100644 index 0000000000..1398166a74 --- /dev/null +++ b/python/paddle/v2/fluid/tests/unittests/test_gaussian_random_batch_size_like_op.py @@ -0,0 +1,46 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest + + +class TestGaussianRandomBatchSizeLike(OpTest): + def setUp(self): + self.op_type = "gaussian_random_batch_size_like" + self.inputs = {'Input': np.zeros((500, 2000), dtype="float32")} + self.attrs = {'mean': 1., 'std': 2., 'shape': [-1, 2000]} + self.outputs = {'Out': np.zeros((500, 2000), dtype='float32')} + + def test_check_output(self): + self.check_output_customized(self.verify_output) + + def verify_output(self, outs): + self.assertEqual(outs[0].shape, (500, 2000)) + hist, _ = np.histogram(outs[0], range=(-3, 5)) + hist = hist.astype("float32") + hist /= float(outs[0].size) + data = np.random.normal(size=(500, 2000), loc=1, scale=2) + hist2, _ = np.histogram(data, range=(-3, 5)) + hist2 = hist2.astype("float32") + hist2 /= float(outs[0].size) + self.assertTrue( + np.allclose( + hist, hist2, rtol=0, atol=0.01), + "hist: " + str(hist) + " hist2: " + str(hist2)) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/v2/fluid/tests/unittests/test_uniform_random_batch_size_like_op.py b/python/paddle/v2/fluid/tests/unittests/test_uniform_random_batch_size_like_op.py new file mode 100644 index 0000000000..e033e86114 --- /dev/null +++ b/python/paddle/v2/fluid/tests/unittests/test_uniform_random_batch_size_like_op.py @@ -0,0 +1,42 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest + + +class TestUniformRandomBatchSizeLike(OpTest): + def setUp(self): + self.op_type = "uniform_random_batch_size_like" + self.inputs = {'Input': np.zeros((500, 2000), dtype="float32")} + self.attrs = {'min': 1., 'max': 2., 'shape': [-1, 2000]} + self.outputs = {'Out': np.zeros((500, 2000), dtype='float32')} + + def test_check_output(self): + self.check_output_customized(self.verify_output) + + def verify_output(self, outs): + self.assertEqual(outs[0].shape, (500, 2000)) + hist, _ = np.histogram(outs[0], range=(1, 2)) + hist = hist.astype("float32") + hist /= float(outs[0].size) + prob = 0.1 * np.ones((10)) + self.assertTrue( + np.allclose( + hist, prob, rtol=0, atol=0.01), "hist: " + str(hist)) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/v2/fluid/tests/unittests/test_uniform_random_op.py b/python/paddle/v2/fluid/tests/unittests/test_uniform_random_op.py index 53227716ef..75ff85a55f 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_uniform_random_op.py +++ b/python/paddle/v2/fluid/tests/unittests/test_uniform_random_op.py @@ -13,14 +13,11 @@ # limitations under the License. import unittest -import numpy +import numpy as np +from op_test import OpTest -from paddle.v2.fluid.op import Operator -import paddle.v2.fluid.core as core -import paddle.v2.fluid as fluid - -class TestUniformRandomOp(unittest.TestCase): +class TestUniformRandomOp(OpTest): def setUp(self): self.op_type = "uniform_random" self.inputs = {} @@ -30,35 +27,20 @@ class TestUniformRandomOp(unittest.TestCase): "max": 10.0, "seed": 10 } - self.outputs = ["Out"] - - def test_cpu(self): - self.uniform_random_test(place=core.CPUPlace()) - - def test_gpu(self): - if core.is_compiled_with_cuda(): - self.uniform_random_test(place=core.CUDAPlace(0)) - - def uniform_random_test(self, place): - program = fluid.Program() - block = program.global_block() - vout = block.create_var(name="Out") - op = block.append_op( - type=self.op_type, outputs={"Out": vout}, attrs=self.attrs) + self.outputs = {"Out": np.zeros((1000, 784)).astype("float32")} - op.desc.infer_var_type(block.desc) - op.desc.infer_shape(block.desc) - - fetch_list = [] - for var_name in self.outputs: - fetch_list.append(block.var(var_name)) - - exe = fluid.Executor(place) - outs = exe.run(program, fetch_list=fetch_list) + def test_check_output(self): + self.check_output_customized(self.verify_output) + def verify_output(self, outs): tensor = outs[0] - - self.assertAlmostEqual(tensor.mean(), 2.5, delta=0.1) + hist, _ = np.histogram(outs[0], range=(-5, 10)) + hist = hist.astype("float32") + hist /= float(outs[0].size) + prob = 0.1 * np.ones((10)) + self.assertTrue( + np.allclose( + hist, prob, rtol=0, atol=0.01), "hist: " + str(hist)) if __name__ == "__main__": -- GitLab From 4f122c07600bea33a764dffef7524469d8f1890b Mon Sep 17 00:00:00 2001 From: emailweixu Date: Thu, 15 Feb 2018 15:36:40 -0800 Subject: [PATCH 121/217] Remove incorrect statement in compare_op.h (#8416) The type of tensor z should be bool. And there's no need to call mutable_data because ElementwiseComputeEx will do it. --- paddle/fluid/operators/compare_op.h | 1 - 1 file changed, 1 deletion(-) diff --git a/paddle/fluid/operators/compare_op.h b/paddle/fluid/operators/compare_op.h index d7b62782fc..7e78269cf4 100644 --- a/paddle/fluid/operators/compare_op.h +++ b/paddle/fluid/operators/compare_op.h @@ -67,7 +67,6 @@ class CompareOpKernel auto* x = context.Input("X"); auto* y = context.Input("Y"); auto* z = context.Output("Out"); - z->mutable_data(context.GetPlace()); int axis = context.Attr("axis"); ElementwiseComputeEx(context, x, y, axis, Functor(), z); -- GitLab From 89ead8d15101ae20f0bfee2d81d4aecbb67f66dd Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 16 Feb 2018 07:38:17 +0800 Subject: [PATCH 122/217] Feature/understand sentiment parallel do (#7994) * Support parallel test for understand_sentiment * Full test on understand_sentiment * Skip normal tests * Debug CI * Enable benchmark * Revert init.cc * Make CI pass --- paddle/fluid/framework/executor.cc | 1 + .../tests/book/test_understand_sentiment.py | 97 ++++++++++++++++--- 2 files changed, 82 insertions(+), 16 deletions(-) diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index 23eeb276c0..636f67e4a7 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -126,6 +126,7 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id, platform::RecordEvent record_event(op->Type(), pool.Get(place_)); op->Run(*local_scope, place_); + // Wait current device context. VLOG(3) << op->DebugStringEx(local_scope); if (FLAGS_benchmark) { VLOG(2) << "Memory used after operator " + op->Type() + " running: " diff --git a/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py b/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py index 6e0206d41d..af917de8e3 100644 --- a/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py +++ b/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import print_function import unittest import paddle.v2.fluid as fluid @@ -23,7 +24,8 @@ import sys def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32, hid_dim=32): - emb = fluid.layers.embedding(input=data, size=[input_dim, emb_dim]) + emb = fluid.layers.embedding( + input=data, size=[input_dim, emb_dim], is_sparse=True) conv_3 = fluid.nets.sequence_conv_pool( input=emb, num_filters=hid_dim, @@ -41,8 +43,6 @@ def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32, act="softmax") cost = fluid.layers.cross_entropy(input=prediction, label=label) avg_cost = fluid.layers.mean(x=cost) - adam_optimizer = fluid.optimizer.Adam(learning_rate=0.002) - adam_optimizer.minimize(avg_cost) accuracy = fluid.layers.accuracy(input=prediction, label=label) return avg_cost, accuracy, prediction @@ -56,7 +56,8 @@ def stacked_lstm_net(data, stacked_num=3): assert stacked_num % 2 == 1 - emb = fluid.layers.embedding(input=data, size=[input_dim, emb_dim]) + emb = fluid.layers.embedding( + input=data, size=[input_dim, emb_dim], is_sparse=True) # add bias attr # TODO(qijun) linear act @@ -79,8 +80,6 @@ def stacked_lstm_net(data, act='softmax') cost = fluid.layers.cross_entropy(input=prediction, label=label) avg_cost = fluid.layers.mean(x=cost) - adam_optimizer = fluid.optimizer.Adam(learning_rate=0.002) - adam_optimizer.minimize(avg_cost) accuracy = fluid.layers.accuracy(input=prediction, label=label) return avg_cost, accuracy, prediction @@ -93,7 +92,7 @@ def create_random_lodtensor(lod, place, low, high): return res -def train(word_dict, net_method, use_cuda, save_dirname=None): +def train(word_dict, net_method, use_cuda, parallel=False, save_dirname=None): BATCH_SIZE = 128 PASS_NUM = 5 dict_dim = len(word_dict) @@ -102,8 +101,30 @@ def train(word_dict, net_method, use_cuda, save_dirname=None): data = fluid.layers.data( name="words", shape=[1], dtype="int64", lod_level=1) label = fluid.layers.data(name="label", shape=[1], dtype="int64") - cost, acc_out, prediction = net_method( - data, label, input_dim=dict_dim, class_dim=class_dim) + + if not parallel: + cost, acc_out, prediction = net_method( + data, label, input_dim=dict_dim, class_dim=class_dim) + else: + places = fluid.layers.get_places() + pd = fluid.layers.ParallelDo(places) + with pd.do(): + cost, acc, _ = net_method( + pd.read_input(data), + pd.read_input(label), + input_dim=dict_dim, + class_dim=class_dim) + pd.write_output(cost) + pd.write_output(acc) + + cost, acc = pd() + cost = fluid.layers.mean(x=cost) + acc_out = fluid.layers.mean(x=acc) + prediction = None + assert save_dirname is None + + adagrad = fluid.optimizer.Adagrad(learning_rate=0.002) + adagrad.minimize(cost) train_data = paddle.batch( paddle.reader.shuffle( @@ -164,14 +185,16 @@ def infer(use_cuda, save_dirname=None): print("Inference results: ", np_data) -def main(word_dict, net_method, use_cuda): +def main(word_dict, net_method, use_cuda, parallel=False, save_dirname=None): if use_cuda and not fluid.core.is_compiled_with_cuda(): return - # Directory for saving the trained model - save_dirname = "understand_sentiment.inference.model" - - train(word_dict, net_method, use_cuda, save_dirname) + train( + word_dict, + net_method, + use_cuda, + parallel=parallel, + save_dirname=save_dirname) infer(use_cuda, save_dirname) @@ -191,20 +214,62 @@ class TestUnderstandSentiment(unittest.TestCase): def test_conv_cpu(self): with self.new_program_scope(): - main(self.word_dict, net_method=convolution_net, use_cuda=False) + main( + self.word_dict, + net_method=convolution_net, + use_cuda=False, + save_dirname="understand_sentiment.inference.model") + def test_conv_cpu_parallel(self): + with self.new_program_scope(): + main( + self.word_dict, + net_method=convolution_net, + use_cuda=False, + parallel=True) + + @unittest.skip(reason="make CI faster") def test_stacked_lstm_cpu(self): with self.new_program_scope(): main(self.word_dict, net_method=stacked_lstm_net, use_cuda=False) + def test_stacked_lstm_cpu_parallel(self): + with self.new_program_scope(): + main( + self.word_dict, + net_method=stacked_lstm_net, + use_cuda=False, + parallel=True) + def test_conv_gpu(self): with self.new_program_scope(): - main(self.word_dict, net_method=convolution_net, use_cuda=True) + main( + self.word_dict, + net_method=convolution_net, + use_cuda=True, + save_dirname="understand_sentiment.inference.model") + + def test_conv_gpu_parallel(self): + with self.new_program_scope(): + main( + self.word_dict, + net_method=convolution_net, + use_cuda=True, + parallel=True) + @unittest.skip(reason="make CI faster") def test_stacked_lstm_gpu(self): with self.new_program_scope(): main(self.word_dict, net_method=stacked_lstm_net, use_cuda=True) + def test_stacked_lstm_gpu_parallel(self): + with self.new_program_scope(): + main( + self.word_dict, + net_method=stacked_lstm_net, + use_cuda=True, + parallel=True) + if __name__ == '__main__': unittest.main() -- GitLab From 1d9fd1c0069beef175e9de9d2194a5f1620d3ed3 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Fri, 16 Feb 2018 00:10:28 +0000 Subject: [PATCH 123/217] pass test_recognize_digits --- paddle/fluid/operators/conv_op.cc | 5 +++-- paddle/fluid/operators/conv_op.h | 4 ++-- paddle/fluid/operators/parallel_do_op.cc | 7 +++++++ 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index 6b378ec1bc..2ecece7073 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -60,8 +60,9 @@ void ConvOp::InferShape(framework::InferShapeContext* ctx) const { "Due to the settings of paddings, filter_dims and " "dilations, the output size is less than 0, please check " "again."); - output_shape.push_back(OutputSize(in_dims[i + 2], filter_dims[i + 2], - dilations[i], paddings[i], strides[i])); + output_shape.push_back(ConvOutputSize(in_dims[i + 2], filter_dims[i + 2], + dilations[i], paddings[i], + strides[i])); } ctx->SetOutputDim("Output", framework::make_ddim(output_shape)); ctx->ShareLoD("Input", "Output"); diff --git a/paddle/fluid/operators/conv_op.h b/paddle/fluid/operators/conv_op.h index ecbe3d505a..c93c2e73f7 100644 --- a/paddle/fluid/operators/conv_op.h +++ b/paddle/fluid/operators/conv_op.h @@ -28,8 +28,8 @@ using Tensor = framework::Tensor; // Base convolution operator definations for other conv // like operators to reuse the implementation. -inline int OutputSize(int input_size, int filter_size, int dilation, - int padding, int stride) { +inline int ConvOutputSize(int input_size, int filter_size, int dilation, + int padding, int stride) { const int dkernel = dilation * (filter_size - 1) + 1; const int output_size = (input_size + 2 * padding - dkernel) / stride + 1; return output_size; diff --git a/paddle/fluid/operators/parallel_do_op.cc b/paddle/fluid/operators/parallel_do_op.cc index d63962bb52..f09a79ffc5 100644 --- a/paddle/fluid/operators/parallel_do_op.cc +++ b/paddle/fluid/operators/parallel_do_op.cc @@ -256,6 +256,10 @@ class ParallelDoGradOp : public framework::OperatorBase { } } for (auto &s : Outputs(framework::GradVarName(kParameters))) { + if (s == "@EMPTY@") { + continue; + } + VLOG(3) << "Moving " << s; CopyOrShare(*sub_scopes[0]->FindVar(s), place, scope.FindVar(s)); } WaitOnPlaces(places); @@ -266,6 +270,9 @@ class ParallelDoGradOp : public framework::OperatorBase { const std::vector &sub_scopes, const platform::PlaceList &places) const { for (auto &s : Outputs(framework::GradVarName(kParameters))) { + if (s == "@EMPTY@") { + continue; + } VLOG(3) << "Accumulating " << s; if (s == framework::kEmptyVarName) continue; std::string tmp_name; -- GitLab From eb82b5ccc0c5519682ba2836c00e7e5f38426f95 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Fri, 16 Feb 2018 00:22:17 +0000 Subject: [PATCH 124/217] test error clip --- python/paddle/v2/fluid/tests/test_error_clip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/v2/fluid/tests/test_error_clip.py b/python/paddle/v2/fluid/tests/test_error_clip.py index b331f16913..3d6ffe8074 100644 --- a/python/paddle/v2/fluid/tests/test_error_clip.py +++ b/python/paddle/v2/fluid/tests/test_error_clip.py @@ -43,7 +43,7 @@ prog_clip.block(0).var(hidden1.name).set_error_clip( avg_cost_clip = prog_clip.block(0).var(avg_cost.name) fluid.backward.append_backward(loss=avg_cost) fluid.backward.append_backward( - loss=avg_cost_clip, callback=fluid.clip.error_clip_callback) + loss=avg_cost_clip, callbacks=fluid.clip.error_clip_callback) hidden1_grad = prog.block(0).var(hidden1.name + "@GRAD") hidden1_grad_clip = prog_clip.block(0).var(hidden1.name + "@GRAD") -- GitLab From 3494b79c4dee0e5382a2d7ea5f2754ff48893c79 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Fri, 16 Feb 2018 00:23:24 +0000 Subject: [PATCH 125/217] test error clip --- python/paddle/v2/fluid/tests/test_error_clip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/v2/fluid/tests/test_error_clip.py b/python/paddle/v2/fluid/tests/test_error_clip.py index 3d6ffe8074..d577d0014d 100644 --- a/python/paddle/v2/fluid/tests/test_error_clip.py +++ b/python/paddle/v2/fluid/tests/test_error_clip.py @@ -43,7 +43,7 @@ prog_clip.block(0).var(hidden1.name).set_error_clip( avg_cost_clip = prog_clip.block(0).var(avg_cost.name) fluid.backward.append_backward(loss=avg_cost) fluid.backward.append_backward( - loss=avg_cost_clip, callbacks=fluid.clip.error_clip_callback) + loss=avg_cost_clip, callbacks=[fluid.clip.error_clip_callback]) hidden1_grad = prog.block(0).var(hidden1.name + "@GRAD") hidden1_grad_clip = prog_clip.block(0).var(hidden1.name + "@GRAD") -- GitLab From cb06337f9e6503a14d8802ee2355e1aa64db9960 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Fri, 16 Feb 2018 01:07:36 +0000 Subject: [PATCH 126/217] change outputsize func name --- paddle/fluid/operators/conv_op.cc | 5 +++-- paddle/fluid/operators/conv_op.h | 4 ++-- paddle/fluid/operators/im2sequence_op.cc | 8 ++++---- paddle/fluid/operators/im2sequence_op.h | 20 ++++++++++---------- paddle/fluid/operators/pool_op.cc | 4 ++-- paddle/fluid/operators/pool_with_index_op.cc | 4 ++-- paddle/fluid/operators/unpool_op.cc | 6 +++--- 7 files changed, 26 insertions(+), 25 deletions(-) diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index 6b378ec1bc..2ecece7073 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -60,8 +60,9 @@ void ConvOp::InferShape(framework::InferShapeContext* ctx) const { "Due to the settings of paddings, filter_dims and " "dilations, the output size is less than 0, please check " "again."); - output_shape.push_back(OutputSize(in_dims[i + 2], filter_dims[i + 2], - dilations[i], paddings[i], strides[i])); + output_shape.push_back(ConvOutputSize(in_dims[i + 2], filter_dims[i + 2], + dilations[i], paddings[i], + strides[i])); } ctx->SetOutputDim("Output", framework::make_ddim(output_shape)); ctx->ShareLoD("Input", "Output"); diff --git a/paddle/fluid/operators/conv_op.h b/paddle/fluid/operators/conv_op.h index ecbe3d505a..c93c2e73f7 100644 --- a/paddle/fluid/operators/conv_op.h +++ b/paddle/fluid/operators/conv_op.h @@ -28,8 +28,8 @@ using Tensor = framework::Tensor; // Base convolution operator definations for other conv // like operators to reuse the implementation. -inline int OutputSize(int input_size, int filter_size, int dilation, - int padding, int stride) { +inline int ConvOutputSize(int input_size, int filter_size, int dilation, + int padding, int stride) { const int dkernel = dilation * (filter_size - 1) + 1; const int output_size = (input_size + 2 * padding - dkernel) / stride + 1; return output_size; diff --git a/paddle/fluid/operators/im2sequence_op.cc b/paddle/fluid/operators/im2sequence_op.cc index 5bc28e0a52..048391549d 100644 --- a/paddle/fluid/operators/im2sequence_op.cc +++ b/paddle/fluid/operators/im2sequence_op.cc @@ -41,10 +41,10 @@ class Im2SequenceOp : public framework::OperatorWithKernel { int img_height = in_dim[2]; int img_width = in_dim[3]; - int output_height = OutputSize(img_height, kernels[0], paddings[0], - paddings[2], strides[0]); - int output_width = - OutputSize(img_width, kernels[1], paddings[1], paddings[3], strides[1]); + int output_height = Im2SeqOutputSize(img_height, kernels[0], paddings[0], + paddings[2], strides[0]); + int output_width = Im2SeqOutputSize(img_width, kernels[1], paddings[1], + paddings[3], strides[1]); ctx->SetOutputDim("Out", {batch_size * output_height * output_width, img_channels * kernels[0] * kernels[1]}); diff --git a/paddle/fluid/operators/im2sequence_op.h b/paddle/fluid/operators/im2sequence_op.h index 4193819b78..a6a83fefbc 100644 --- a/paddle/fluid/operators/im2sequence_op.h +++ b/paddle/fluid/operators/im2sequence_op.h @@ -26,8 +26,8 @@ namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; -inline int OutputSize(int input_size, int filter_size, int padding_0, - int padding_1, int stride) { +inline int Im2SeqOutputSize(int input_size, int filter_size, int padding_0, + int padding_1, int stride) { const int output_size = (input_size + padding_0 + padding_1 - filter_size) / stride + 1; return output_size; @@ -53,10 +53,10 @@ class Im2SequenceKernel : public framework::OpKernel { auto kernels = ctx.Attr>("kernels"); auto strides = ctx.Attr>("strides"); auto paddings = ctx.Attr>("paddings"); - int output_height = OutputSize(img_height, kernels[0], paddings[0], - paddings[2], strides[0]); - int output_width = - OutputSize(img_width, kernels[1], paddings[1], paddings[3], strides[1]); + int output_height = Im2SeqOutputSize(img_height, kernels[0], paddings[0], + paddings[2], strides[0]); + int output_width = Im2SeqOutputSize(img_width, kernels[1], paddings[1], + paddings[3], strides[1]); const std::vector dilations({1, 1}); @@ -109,10 +109,10 @@ class Im2SequenceGradKernel : public framework::OpKernel { auto kernels = ctx.Attr>("kernels"); auto strides = ctx.Attr>("strides"); auto paddings = ctx.Attr>("paddings"); - int output_height = OutputSize(img_height, kernels[0], paddings[0], - paddings[2], strides[0]); - int output_width = - OutputSize(img_width, kernels[1], paddings[1], paddings[3], strides[1]); + int output_height = Im2SeqOutputSize(img_height, kernels[0], paddings[0], + paddings[2], strides[0]); + int output_width = Im2SeqOutputSize(img_width, kernels[1], paddings[1], + paddings[3], strides[1]); const std::vector dilations({1, 1}); diff --git a/paddle/fluid/operators/pool_op.cc b/paddle/fluid/operators/pool_op.cc index a80b23b8ed..c7729ad132 100644 --- a/paddle/fluid/operators/pool_op.cc +++ b/paddle/fluid/operators/pool_op.cc @@ -17,7 +17,7 @@ limitations under the License. */ namespace paddle { namespace operators { -int OutputSizePool(int input_size, int filter_size, int padding, int stride) { +int PoolOutputSize(int input_size, int filter_size, int padding, int stride) { int output_size = (input_size - filter_size + 2 * padding) / stride + 1; return output_size; } @@ -55,7 +55,7 @@ void PoolOp::InferShape(framework::InferShapeContext *ctx) const { std::vector output_shape({in_x_dims[0], in_x_dims[1]}); for (size_t i = 0; i < ksize.size(); ++i) { output_shape.push_back( - OutputSizePool(in_x_dims[i + 2], ksize[i], paddings[i], strides[i])); + PoolOutputSize(in_x_dims[i + 2], ksize[i], paddings[i], strides[i])); } ctx->SetOutputDim("Out", framework::make_ddim(output_shape)); ctx->ShareLoD("X", "Out"); diff --git a/paddle/fluid/operators/pool_with_index_op.cc b/paddle/fluid/operators/pool_with_index_op.cc index 3a59365d17..4df0a14577 100644 --- a/paddle/fluid/operators/pool_with_index_op.cc +++ b/paddle/fluid/operators/pool_with_index_op.cc @@ -17,7 +17,7 @@ limitations under the License. */ namespace paddle { namespace operators { -inline int OutputSizeMaxPool(int input_size, int filter_size, int padding, +inline int MaxPoolOutputSize(int input_size, int filter_size, int padding, int stride) { int output_size = (input_size - filter_size + 2 * padding) / stride + 1; return output_size; @@ -61,7 +61,7 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel { std::vector output_shape({in_x_dims[0], in_x_dims[1]}); for (size_t i = 0; i < ksize.size(); ++i) { - output_shape.push_back(OutputSizeMaxPool(in_x_dims[i + 2], ksize[i], + output_shape.push_back(MaxPoolOutputSize(in_x_dims[i + 2], ksize[i], paddings[i], strides[i])); } ctx->SetOutputDim("Out", framework::make_ddim(output_shape)); diff --git a/paddle/fluid/operators/unpool_op.cc b/paddle/fluid/operators/unpool_op.cc index d3bd7fda09..0ca7ea00fa 100644 --- a/paddle/fluid/operators/unpool_op.cc +++ b/paddle/fluid/operators/unpool_op.cc @@ -64,7 +64,7 @@ Paper: http://www.matthewzeiler.com/wp-content/uploads/2017/07/iccv2011.pdf } }; -int OutputSize(int input_size, int ksize, int padding, int stride) { +int UnpoolOutputSize(int input_size, int ksize, int padding, int stride) { int output_size = (input_size - 1) * stride - 2 * padding + ksize; return output_size; } @@ -101,8 +101,8 @@ class UnpoolOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(in_x_dims, in_y_dims); std::vector output_shape({in_x_dims[0], in_x_dims[1]}); for (size_t i = 0; i < ksize.size(); ++i) { - output_shape.push_back( - OutputSize(in_x_dims[i + 2], ksize[i], paddings[i], strides[i])); + output_shape.push_back(UnpoolOutputSize(in_x_dims[i + 2], ksize[i], + paddings[i], strides[i])); } ctx->SetOutputDim("Out", framework::make_ddim(output_shape)); } -- GitLab From 74404fadcd5256d321f5440fec9fac44a3c8fc3e Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Thu, 15 Feb 2018 17:08:57 -0800 Subject: [PATCH 127/217] Python implementation for a proposed Go Op. (#8434) * Adding Python boilerplate code for Go op * Add very basic test case * Adding the python logic for go routine * Fix syntax * Changing test to notest * Rename Routine to Go * Combining GoGuard and Go in one class * Modify test * Adding fluid close channel * Fixing __init__.py for calling fluid.go() * Adding stubs for channel methods and updating test case * Removing import * * Adding imports from concurrency --- python/paddle/v2/fluid/__init__.py | 4 +- python/paddle/v2/fluid/concurrency.py | 86 +++++++++++++++++++ .../v2/fluid/tests/notest_concurrency.py | 38 ++++++++ 3 files changed, 127 insertions(+), 1 deletion(-) create mode 100644 python/paddle/v2/fluid/concurrency.py create mode 100644 python/paddle/v2/fluid/tests/notest_concurrency.py diff --git a/python/paddle/v2/fluid/__init__.py b/python/paddle/v2/fluid/__init__.py index 9f710c4a4a..361fb3f5ad 100644 --- a/python/paddle/v2/fluid/__init__.py +++ b/python/paddle/v2/fluid/__init__.py @@ -34,13 +34,15 @@ from data_feeder import DataFeeder from core import LoDTensor, CPUPlace, CUDAPlace from distribute_transpiler import DistributeTranspiler from distribute_transpiler_simple import SimpleDistributeTranspiler +from concurrency import (Go, make_channel, channel_send, channel_recv, + channel_close) import clip from memory_optimization_transpiler import memory_optimize import profiler Tensor = LoDTensor -__all__ = framework.__all__ + executor.__all__ + [ +__all__ = framework.__all__ + executor.__all__ + concurrency.__all__ + [ 'io', 'initializer', 'layers', diff --git a/python/paddle/v2/fluid/concurrency.py b/python/paddle/v2/fluid/concurrency.py new file mode 100644 index 0000000000..5f868b6e86 --- /dev/null +++ b/python/paddle/v2/fluid/concurrency.py @@ -0,0 +1,86 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO: Variables: make_channel +# TODO: Operators: send, close_channel, recv, go, select +from layers.control_flow import BlockGuard +from layer_helper import LayerHelper + +__all__ = [ + 'Go', + 'make_channel', + 'channel_send', + 'channel_recv', + 'channel_close', +] + + +class Go(BlockGuard): + def __init__(self, name=None): + self.helper = LayerHelper("go", name=name) + super(Go, self).__init__(self.helper.main_program) + + def __enter__(self): + super(Go, self).__enter__() + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type is not None: + return False + self.construct_go_op() + return super(Go, self).__exit__(exc_type, exc_val, exc_tb) + + def construct_go_op(self): + main_program = self.helper.main_program + go_block = main_program.current_block() + parent_block = main_program.block(main_program.current_block() + .parent_idx) + + x_name_list = set() + out_vars = set() + for op in go_block.ops: + # Iterate over all operators, get all the inputs + # and add as input to the Go operator. + for iname in op.input_names: + for in_var_name in op.input(iname): + x_name_list.add(in_var_name) + + # Iterate over all operators , get all the outputs + # add to the output list of Go operator only if + # they exist in the parent block. + for oname in op.output_names: + for out_var_name in op.output(oname): + if out_var_name in parent_block.vars: + out_vars.add(parent_block.var(out_var_name)) + + parent_block.append_op( + type='go', + inputs={'X': [parent_block.var(x_name) for x_name in x_name_list]}, + outputs={'Out': out_vars}, + attrs={'sub_block': go_block}) + + +def make_channel(dtype, size=0): + return True + + +def channel_send(channel, value): + return True + + +def channel_recv(channel): + return True + + +def channel_close(channel): + return True diff --git a/python/paddle/v2/fluid/tests/notest_concurrency.py b/python/paddle/v2/fluid/tests/notest_concurrency.py new file mode 100644 index 0000000000..9d87ed9c07 --- /dev/null +++ b/python/paddle/v2/fluid/tests/notest_concurrency.py @@ -0,0 +1,38 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import paddle.v2.fluid as fluid +import paddle.v2.fluid.core as core +from paddle.v2.fluid.executor import Executor + + +class TestRoutineOp(unittest.TestCase): + def test_simple_routine(self): + ch = fluid.make_channel(dtype=bool) + with fluid.Go(): + fluid.channel_send(ch, True) + + result = fluid.channel_recv(ch) + fluid.channel_close(ch) + + cpu = core.CPUPlace() + exe = Executor(cpu) + + outs = exe.run(fetch_list=[result]) + self.assertEqual(outs[0], True) + + +if __name__ == '__main__': + unittest.main() -- GitLab From 74e0eb7267b4eae7016a44fb1fbc62bf2e952bde Mon Sep 17 00:00:00 2001 From: kexinzhao Date: Thu, 15 Feb 2018 17:10:29 -0800 Subject: [PATCH 128/217] make float16 a pod type (#8456) --- paddle/fluid/framework/tensor_impl.h | 5 +++- paddle/fluid/platform/float16.h | 43 ++++++++++++++++++++++----- paddle/fluid/platform/float16_test.cc | 32 ++++++++++++++++---- paddle/fluid/platform/float16_test.cu | 33 ++++++++++++++++++++ 4 files changed, 99 insertions(+), 14 deletions(-) diff --git a/paddle/fluid/framework/tensor_impl.h b/paddle/fluid/framework/tensor_impl.h index 59e6269ea0..638bd0db9d 100644 --- a/paddle/fluid/framework/tensor_impl.h +++ b/paddle/fluid/framework/tensor_impl.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/float16.h" namespace paddle { namespace framework { @@ -52,7 +53,9 @@ struct SizeOfTypeFunctor { }; static inline size_t SizeOfType(std::type_index type) { - SizeOfTypeFunctor functor; + SizeOfTypeFunctor + functor; size_t size = functor(type); PADDLE_ENFORCE(size != 0UL, "Cannot get size of type %s", type.name()); return size; diff --git a/paddle/fluid/platform/float16.h b/paddle/fluid/platform/float16.h index c36bfad4bc..cf6a4b09db 100644 --- a/paddle/fluid/platform/float16.h +++ b/paddle/fluid/platform/float16.h @@ -62,6 +62,7 @@ limitations under the License. */ #define PADDLE_ALIGN(x) __attribute__((aligned(x))) namespace paddle { +namespace platform { // Use PADDLE_ALIGNED(2) to ensure that each float16 will be allocated // and aligned at least on a 2-byte boundary, which leads to efficient @@ -71,11 +72,21 @@ struct PADDLE_ALIGN(2) float16 { public: uint16_t x; - // Constructors - HOSTDEVICE inline float16() : x(0) {} + // The following defaulted special class member functions + // are added to make float16 pass the std::is_trivial test + HOSTDEVICE inline float16() = default; - HOSTDEVICE inline float16(const float16& h) : x(h.x) {} + HOSTDEVICE inline float16(const float16&) = default; + HOSTDEVICE inline float16& operator=(const float16&) = default; + + HOSTDEVICE inline float16(float16&&) = default; + + HOSTDEVICE inline float16& operator=(float16&&) = default; + + HOSTDEVICE inline ~float16() = default; + +// Constructors #ifdef PADDLE_CUDA_FP16 HOSTDEVICE inline explicit float16(const half& h) { #if CUDA_VERSION >= 9000 @@ -136,11 +147,6 @@ struct PADDLE_ALIGN(2) float16 { HOSTDEVICE inline explicit float16(const T& val) : x(float16(static_cast(val)).x) {} - HOSTDEVICE inline float16& operator=(const float16& rhs) { - x = rhs.x; - return *this; - } - // Assignment operators #ifdef PADDLE_CUDA_FP16 HOSTDEVICE inline float16& operator=(const half& rhs) { @@ -727,4 +733,25 @@ HOSTDEVICE inline bool operator>=(const float16& a, const float16& b) { return float(a) >= float(b); } #endif + +} // namespace platform } // namespace paddle + +namespace std { + +// Override the std::is_pod::value for float16 +// The reason is that different compilers implemented std::is_pod based on +// different C++ standards. float16 class is a plain old data in C++11 given +// that it is both trivial and standard_layout. +// However, std::is_pod in nvcc 8.0 host c++ compiler follows C++0x and is +// more restricted in that you cannot provide any customized +// constructor in float16. Hence, we override is_pod here following C++11 +// so that .cu files can be successfully compiled by nvcc. +template <> +struct is_pod { + static const bool value = + is_trivial::value && + is_standard_layout::value; +}; + +} // namespace std diff --git a/paddle/fluid/platform/float16_test.cc b/paddle/fluid/platform/float16_test.cc index bed29dbfa7..b716ad9df4 100644 --- a/paddle/fluid/platform/float16_test.cc +++ b/paddle/fluid/platform/float16_test.cc @@ -10,10 +10,13 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/platform/float16.h" +#include "paddle/fluid/framework/init.h" +#include "paddle/fluid/framework/lod_tensor.h" #include namespace paddle { +namespace platform { TEST(float16, conversion_cpu) { // Explicit conversion from Eigen::half @@ -54,13 +57,9 @@ TEST(float16, conversion_cpu) { EXPECT_EQ(float16(true).x, 0x3c00); EXPECT_EQ(float16(false).x, 0x0000); - // Default constructor - float16 v_def; - EXPECT_EQ(v_def.x, 0x0000); - // Assignment operator float16 v_assign; - v_assign = v_def; + v_assign = float16(0); EXPECT_EQ(v_assign.x, 0x0000); v_assign = Eigen::half(1.0f); EXPECT_EQ(v_assign.x, 0x3c00); @@ -116,4 +115,27 @@ TEST(float16, comparison_cpu) { EXPECT_FALSE(float16(-0.0f) > float16(0.0f)); } +TEST(float16, lod_tensor_cpu) { + framework::LoDTensor lod_tensor; + + std::vector input_data = {float16(1.0f), float16(0.5f), + float16(0.33333f), float16(0.0f)}; + EXPECT_EQ(input_data[0].x, 0x3c00); + EXPECT_EQ(input_data[1].x, 0x3800); + EXPECT_EQ(input_data[2].x, 0x3555); + EXPECT_EQ(input_data[3].x, 0x0000); + + lod_tensor.Resize({4, 1}); + lod_tensor.set_lod(framework::LoD({{0, 2, 4}})); + float16* data_ptr = lod_tensor.mutable_data(CPUPlace()); + + EXPECT_NE(data_ptr, nullptr); + EXPECT_EQ(input_data.size(), static_cast(lod_tensor.numel())); + for (size_t i = 0; i < input_data.size(); ++i) { + data_ptr[i] = input_data[i]; + EXPECT_EQ(data_ptr[i].x, input_data[i].x); + } +} + +} // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/float16_test.cu b/paddle/fluid/platform/float16_test.cu index 7e6c9f58ac..567209df4e 100644 --- a/paddle/fluid/platform/float16_test.cu +++ b/paddle/fluid/platform/float16_test.cu @@ -13,6 +13,8 @@ limitations under the License. */ #include +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/tensor_util.h" #include "paddle/utils/Logging.h" #define ARITHMETIC_KERNEL(op_type, sign) \ @@ -108,6 +110,7 @@ limitations under the License. */ #ifdef PADDLE_CUDA_FP16 namespace paddle { +namespace platform { #if CUDA_VERSION < 9000 ARITHMETIC_KERNEL(Add, +) @@ -209,5 +212,35 @@ TEST(float16, conversion_on_gpu) { EXPECT_EQ(v_assign.x, 0x3c00); } +TEST(float16, lod_tensor_on_gpu) { + framework::LoDTensor src_tensor; + framework::LoDTensor gpu_tensor; + framework::LoDTensor dst_tensor; + + float16* src_ptr = src_tensor.mutable_data( + framework::make_ddim({2, 2}), CPUPlace()); + + float16 arr[4] = {float16(1.0f), float16(0.5f), float16(0.33333f), + float16(0.0f)}; + memcpy(src_ptr, arr, 4 * sizeof(float16)); + + // CPU LoDTensor to GPU LoDTensor + CUDAPlace gpu_place(0); + CUDADeviceContext gpu_ctx(gpu_place); + framework::TensorCopy(src_tensor, gpu_place, gpu_ctx, &gpu_tensor); + + // GPU LoDTensor to CPU LoDTensor + framework::TensorCopy(gpu_tensor, CPUPlace(), gpu_ctx, &dst_tensor); + + // Sync before comparing LoDTensors + gpu_ctx.Wait(); + const float16* dst_ptr = dst_tensor.data(); + ASSERT_NE(src_ptr, dst_ptr); + for (size_t i = 0; i < 4; ++i) { + EXPECT_EQ(src_ptr[i].x, dst_ptr[i].x); + } +} + +} // namespace platform } // namespace paddle #endif // PADDLE_CUDA_FP16 -- GitLab From c7ad26d6a4a37c6a2f0a59408e93fda23315cf94 Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Thu, 15 Feb 2018 21:32:07 -0800 Subject: [PATCH 129/217] [WIP] Move DataType enum inside VarType (#8447) * Move Pod Types from DataType enum to Type enum * Fixed data_type.h * Fix type in TensorDesc * Add comment to framework.proto * Fixed type in data_type.h * Updated format of type in data_type.h * Fix var_desc.h * Fix op_kernel_type.h * Fixed data_type_transform_test.cc * Fix operator.h * Fixed data_type_transform.cc * Fixed op_kernel_type_test.cc * Fix operator.cc * Fixed data_layout_transform_test.cc * Fix var_desc.cc * Fixed assign_value_op.cc * Fixed assign_value_op.h * fixed protobuf.cc * Fix data_layout_transform_test.cc and op_kernel_type_test.cc * Fixed rnn_memory_helper_op.cc * Fix progrma_desc_test.cc * Fixed fill_constant_batch_size_like_op.cc * Fix operator_test.cc * Fixed fill_constant_op.cc * Fixed gaussian_random_op.cc * Fixed uniform_random_op.cc * Fixed edit_distance_op.cc * Fixed fill_constant_batch_size_like_op.cc * Fixed rnn_memory_helper_op.cc * Fixed chunk_eval_op.cc * Fixed assign_value_op.cc * Fixed assign_value_op.h * Fixed cast_op.h * Fixed cast_op.h * Fix fill constant op * Fixed clang for assign_value_op.cc * Fix one_hot_op.h * Fix one_hot_op.cc * Fix fill_op.cc * Fixed sum_op.cc * Fixed sum_op clang * Fix uniform_random_op.cc * Fix gaussian_random_op.cc * Fix backward.cc * Fix protobuf.cc * Fixed prune_test.cc * Fixed op_registry_test.cc * Fix data_device_transform_test.cu * Fix travis error * Fixed one_hot_op.cu * Fixed op_registry_test.cc * Fixed nccl_op.cc * Fixing python tests * Revert "Fixing python tests" This reverts commit fccaa4c5818ed9f379ea1ce4315066cc78076c64. * Fixing Pybind to remove data type * Fixing tensor.py * Updated the new files: * Resolve error in merge conflict of fill_constant_batch_size_like_op.cc --- paddle/fluid/framework/backward.cc | 2 +- .../framework/data_device_transform_test.cu | 4 +- .../framework/data_layout_transform_test.cc | 4 +- paddle/fluid/framework/data_type.h | 54 +++++++++---------- paddle/fluid/framework/data_type_transform.cc | 10 ++-- .../framework/data_type_transform_test.cc | 6 +-- paddle/fluid/framework/framework.proto | 41 +++++++------- paddle/fluid/framework/op_kernel_type.h | 6 +-- paddle/fluid/framework/op_kernel_type_test.cc | 4 +- paddle/fluid/framework/op_registry_test.cc | 8 +-- paddle/fluid/framework/operator.cc | 4 +- paddle/fluid/framework/operator.h | 4 +- paddle/fluid/framework/operator_test.cc | 2 +- paddle/fluid/framework/program_desc_test.cc | 8 +-- paddle/fluid/framework/prune_test.cc | 2 +- paddle/fluid/framework/var_desc.cc | 10 ++-- paddle/fluid/framework/var_desc.h | 9 ++-- paddle/fluid/operators/assign_value_op.cc | 7 +-- paddle/fluid/operators/assign_value_op.h | 4 +- paddle/fluid/operators/cast_op.h | 3 +- paddle/fluid/operators/chunk_eval_op.cc | 2 +- paddle/fluid/operators/edit_distance_op.cc | 2 +- .../fill_constant_batch_size_like_op.cc | 4 +- paddle/fluid/operators/fill_constant_op.cc | 4 +- paddle/fluid/operators/fill_op.cc | 5 +- .../gaussian_random_batch_size_like_op.cc | 4 +- paddle/fluid/operators/gaussian_random_op.cc | 4 +- paddle/fluid/operators/nccl_op.cc | 2 +- paddle/fluid/operators/one_hot_op.cc | 2 +- paddle/fluid/operators/one_hot_op.cu | 3 +- paddle/fluid/operators/one_hot_op.h | 3 +- .../fluid/operators/rnn_memory_helper_op.cc | 4 +- paddle/fluid/operators/sum_op.cc | 3 +- .../uniform_random_batch_size_like_op.cc | 4 +- paddle/fluid/operators/uniform_random_op.cc | 4 +- paddle/fluid/pybind/protobuf.cc | 16 +++--- python/paddle/v2/fluid/backward.py | 2 +- python/paddle/v2/fluid/data_feeder.py | 8 +-- python/paddle/v2/fluid/evaluator.py | 2 +- python/paddle/v2/fluid/framework.py | 29 +++++----- python/paddle/v2/fluid/layers/control_flow.py | 2 +- python/paddle/v2/fluid/layers/nn.py | 2 +- python/paddle/v2/fluid/layers/tensor.py | 14 ++--- .../fluid/memory_optimization_transpiler.py | 14 ++--- .../paddle/v2/fluid/tests/test_cpp_reader.py | 2 +- .../v2/fluid/tests/unittests/op_test.py | 4 +- .../tests/unittests/test_batch_norm_op.py | 4 +- .../v2/fluid/tests/unittests/test_cast_op.py | 4 +- .../v2/fluid/tests/unittests/test_fill_op.py | 2 +- .../tests/unittests/test_layer_norm_op.py | 4 +- .../fluid/tests/unittests/test_one_hot_op.py | 2 +- .../fluid/tests/unittests/test_parameter.py | 2 +- .../tests/unittests/test_protobuf_descs.py | 7 +-- .../v2/fluid/tests/unittests/test_variable.py | 6 +-- 54 files changed, 189 insertions(+), 179 deletions(-) diff --git a/paddle/fluid/framework/backward.cc b/paddle/fluid/framework/backward.cc index 68f4fd4424..1314af2b3d 100644 --- a/paddle/fluid/framework/backward.cc +++ b/paddle/fluid/framework/backward.cc @@ -341,7 +341,7 @@ static void CreateGradVarInBlock( auto* param = block_desc->FindVarRecursive(pname); auto* grad = block_desc->FindVar(arg); if (param == nullptr) { - grad->SetDataType(proto::DataType::FP32); + grad->SetDataType(proto::VarType::FP32); } else { grad->SetDataType(param->GetDataType()); } diff --git a/paddle/fluid/framework/data_device_transform_test.cu b/paddle/fluid/framework/data_device_transform_test.cu index db6687985d..e896a06162 100644 --- a/paddle/fluid/framework/data_device_transform_test.cu +++ b/paddle/fluid/framework/data_device_transform_test.cu @@ -51,10 +51,10 @@ class TestOpWithKernel : public OperatorWithKernel { const ExecutionContext& ctx) const override { if (Attr("use_gpu")) { VLOG(3) << "force use gpu kernel"; - return OpKernelType(proto::DataType::FP32, platform::CUDAPlace(0)); + return OpKernelType(proto::VarType::FP32, platform::CUDAPlace(0)); } else { VLOG(3) << "use default kernel"; - return OpKernelType(proto::DataType::FP32, + return OpKernelType(proto::VarType::FP32, ctx.Input("input")->place()); } } diff --git a/paddle/fluid/framework/data_layout_transform_test.cc b/paddle/fluid/framework/data_layout_transform_test.cc index 73689cc9bc..dd17cac0e1 100644 --- a/paddle/fluid/framework/data_layout_transform_test.cc +++ b/paddle/fluid/framework/data_layout_transform_test.cc @@ -27,9 +27,9 @@ TEST(DataTransform, DataLayoutFunction) { in.mutable_data(make_ddim({2, 3, 1, 2}), place); in.set_layout(DataLayout::kNHWC); - auto kernel_nhwc = OpKernelType(proto::DataType::FP32, place, + auto kernel_nhwc = OpKernelType(proto::VarType::FP32, place, DataLayout::kNHWC, LibraryType::kPlain); - auto kernel_ncwh = OpKernelType(proto::DataType::FP32, place, + auto kernel_ncwh = OpKernelType(proto::VarType::FP32, place, DataLayout::kNCHW, LibraryType::kPlain); TransDataLayout(kernel_nhwc, kernel_ncwh, in, &out); diff --git a/paddle/fluid/framework/data_type.h b/paddle/fluid/framework/data_type.h index 127bbcf5d0..1dec766a34 100644 --- a/paddle/fluid/framework/data_type.h +++ b/paddle/fluid/framework/data_type.h @@ -20,35 +20,35 @@ limitations under the License. */ namespace paddle { namespace framework { -inline proto::DataType ToDataType(std::type_index type) { +inline proto::VarType::Type ToDataType(std::type_index type) { using namespace paddle::framework::proto; if (typeid(float).hash_code() == type.hash_code()) { - return DataType::FP32; + return proto::VarType::FP32; } else if (typeid(double).hash_code() == type.hash_code()) { - return DataType::FP64; + return proto::VarType::FP64; } else if (typeid(int).hash_code() == type.hash_code()) { - return DataType::INT32; + return proto::VarType::INT32; } else if (typeid(int64_t).hash_code() == type.hash_code()) { - return DataType::INT64; + return proto::VarType::INT64; } else if (typeid(bool).hash_code() == type.hash_code()) { - return DataType::BOOL; + return proto::VarType::BOOL; } else { PADDLE_THROW("Not supported"); } } -inline std::type_index ToTypeIndex(proto::DataType type) { +inline std::type_index ToTypeIndex(proto::VarType::Type type) { using namespace paddle::framework::proto; switch (type) { - case DataType::FP32: + case proto::VarType::FP32: return typeid(float); - case DataType::FP64: + case proto::VarType::FP64: return typeid(double); - case DataType::INT32: + case proto::VarType::INT32: return typeid(int); - case DataType::INT64: + case proto::VarType::INT64: return typeid(int64_t); - case DataType::BOOL: + case proto::VarType::BOOL: return typeid(bool); default: PADDLE_THROW("Not support type %d", type); @@ -56,22 +56,22 @@ inline std::type_index ToTypeIndex(proto::DataType type) { } template -inline void VisitDataType(proto::DataType type, Visitor visitor) { +inline void VisitDataType(proto::VarType::Type type, Visitor visitor) { using namespace paddle::framework::proto; switch (type) { - case DataType::FP32: + case proto::VarType::FP32: visitor.template operator()(); break; - case DataType::FP64: + case proto::VarType::FP64: visitor.template operator()(); break; - case DataType::INT32: + case proto::VarType::INT32: visitor.template operator()(); break; - case DataType::INT64: + case proto::VarType::INT64: visitor.template operator()(); break; - case DataType::BOOL: + case proto::VarType::BOOL: visitor.template operator()(); break; default: @@ -79,22 +79,22 @@ inline void VisitDataType(proto::DataType type, Visitor visitor) { } } -inline std::string DataTypeToString(const proto::DataType type) { +inline std::string DataTypeToString(const proto::VarType::Type type) { using namespace paddle::framework::proto; switch (type) { - case DataType::FP16: + case proto::VarType::FP16: return "float16"; - case DataType::FP32: + case proto::VarType::FP32: return "float32"; - case DataType::FP64: + case proto::VarType::FP64: return "float64"; - case DataType::INT16: + case proto::VarType::INT16: return "int16"; - case DataType::INT32: + case proto::VarType::INT32: return "int32"; - case DataType::INT64: + case proto::VarType::INT64: return "int64"; - case DataType::BOOL: + case proto::VarType::BOOL: return "bool"; default: PADDLE_THROW("Not support type %d", type); @@ -102,7 +102,7 @@ inline std::string DataTypeToString(const proto::DataType type) { } inline std::ostream& operator<<(std::ostream& out, - const proto::DataType& type) { + const proto::VarType::Type& type) { out << DataTypeToString(type); return out; } diff --git a/paddle/fluid/framework/data_type_transform.cc b/paddle/fluid/framework/data_type_transform.cc index e5836998e2..54cc1575d8 100644 --- a/paddle/fluid/framework/data_type_transform.cc +++ b/paddle/fluid/framework/data_type_transform.cc @@ -65,19 +65,19 @@ void TransDataType(const OpKernelType& kernel_type_for_var, auto ctx = pool.Get(in.place()); switch (src_type) { - case proto::DataType::FP32: + case proto::VarType::FP32: framework::VisitDataType(dst_type, CastDataType(in, out, ctx)); break; - case proto::DataType::FP64: + case proto::VarType::FP64: framework::VisitDataType(dst_type, CastDataType(in, out, ctx)); break; - case proto::DataType::INT32: + case proto::VarType::INT32: framework::VisitDataType(dst_type, CastDataType(in, out, ctx)); break; - case proto::DataType::INT64: + case proto::VarType::INT64: framework::VisitDataType(dst_type, CastDataType(in, out, ctx)); break; - case proto::DataType::BOOL: + case proto::VarType::BOOL: framework::VisitDataType(dst_type, CastDataType(in, out, ctx)); break; default: diff --git a/paddle/fluid/framework/data_type_transform_test.cc b/paddle/fluid/framework/data_type_transform_test.cc index 444d3b823c..724c8c301f 100644 --- a/paddle/fluid/framework/data_type_transform_test.cc +++ b/paddle/fluid/framework/data_type_transform_test.cc @@ -32,11 +32,11 @@ TEST(DataTypeTransform, CPUTransform) { ptr[i] = i / 3; } - auto kernel_fp32 = OpKernelType(proto::DataType::FP32, place, + auto kernel_fp32 = OpKernelType(proto::VarType::FP32, place, DataLayout::kAnyLayout, LibraryType::kPlain); - auto kernel_fp64 = OpKernelType(proto::DataType::FP64, place, + auto kernel_fp64 = OpKernelType(proto::VarType::FP64, place, DataLayout::kAnyLayout, LibraryType::kPlain); - auto kernel_int32 = OpKernelType(proto::DataType::INT32, place, + auto kernel_int32 = OpKernelType(proto::VarType::INT32, place, DataLayout::kAnyLayout, LibraryType::kPlain); TransDataType(kernel_fp32, kernel_fp64, in, &out); diff --git a/paddle/fluid/framework/framework.proto b/paddle/fluid/framework/framework.proto index fa7f437851..22d0692394 100644 --- a/paddle/fluid/framework/framework.proto +++ b/paddle/fluid/framework/framework.proto @@ -91,33 +91,34 @@ message OpProto { required string comment = 5; } -enum DataType { - BOOL = 0; - INT16 = 1; - INT32 = 2; - INT64 = 3; - FP16 = 4; - FP32 = 5; - FP64 = 6; -} - message VarType { enum Type { - LOD_TENSOR = 1; - SELECTED_ROWS = 2; - FEED_MINIBATCH = 3; - FETCH_LIST = 4; - STEP_SCOPES = 5; - LOD_RANK_TABLE = 6; - LOD_TENSOR_ARRAY = 7; - PLACE_LIST = 8; - READER = 9; + // Pod Types + BOOL = 0; + INT16 = 1; + INT32 = 2; + INT64 = 3; + FP16 = 4; + FP32 = 5; + FP64 = 6; + + // Other types that may need additional descriptions + LOD_TENSOR = 7; + SELECTED_ROWS = 8; + FEED_MINIBATCH = 9; + FETCH_LIST = 10; + STEP_SCOPES = 11; + LOD_RANK_TABLE = 12; + LOD_TENSOR_ARRAY = 13; + PLACE_LIST = 14; + READER = 15; } required Type type = 1; message TensorDesc { - required DataType data_type = 1; + // Should only be PODType. Is enforced in C++ + required Type data_type = 1; repeated int64 dims = 2; // [UNK, 640, 480] is saved as [-1, 640, 480] } optional TensorDesc selected_rows = 2; diff --git a/paddle/fluid/framework/op_kernel_type.h b/paddle/fluid/framework/op_kernel_type.h index 980e4eafaa..3a1036742c 100644 --- a/paddle/fluid/framework/op_kernel_type.h +++ b/paddle/fluid/framework/op_kernel_type.h @@ -40,12 +40,12 @@ struct OpKernelType { // place, data_type, library_type kinds less than 2^8 constexpr static int LEFT_SHIFT = 8; - proto::DataType data_type_; + proto::VarType::Type data_type_; DataLayout data_layout_; platform::Place place_; LibraryType library_type_; - OpKernelType(proto::DataType data_type, platform::Place place, + OpKernelType(proto::VarType::Type data_type, platform::Place place, DataLayout data_layout = DataLayout::kAnyLayout, LibraryType library_type = LibraryType::kPlain) : data_type_(data_type), @@ -53,7 +53,7 @@ struct OpKernelType { place_(place), library_type_(library_type) {} - OpKernelType(proto::DataType data_type, + OpKernelType(proto::VarType::Type data_type, const platform::DeviceContext& dev_ctx, DataLayout data_layout = DataLayout::kAnyLayout, LibraryType library_type = LibraryType::kPlain) diff --git a/paddle/fluid/framework/op_kernel_type_test.cc b/paddle/fluid/framework/op_kernel_type_test.cc index e56fe35c01..d37ce149ce 100644 --- a/paddle/fluid/framework/op_kernel_type_test.cc +++ b/paddle/fluid/framework/op_kernel_type_test.cc @@ -18,7 +18,7 @@ limitations under the License. */ TEST(OpKernelType, ToString) { using OpKernelType = paddle::framework::OpKernelType; - using DataType = paddle::framework::proto::DataType; + using DataType = paddle::framework::proto::VarType; using CPUPlace = paddle::platform::CPUPlace; using DataLayout = paddle::framework::DataLayout; using LibraryType = paddle::framework::LibraryType; @@ -33,7 +33,7 @@ TEST(OpKernelType, ToString) { TEST(OpKernelType, Hash) { using OpKernelType = paddle::framework::OpKernelType; - using DataType = paddle::framework::proto::DataType; + using DataType = paddle::framework::proto::VarType; using CPUPlace = paddle::platform::CPUPlace; using CUDAPlace = paddle::platform::CUDAPlace; using DataLayout = paddle::framework::DataLayout; diff --git a/paddle/fluid/framework/op_registry_test.cc b/paddle/fluid/framework/op_registry_test.cc index b92647e892..0d791c8583 100644 --- a/paddle/fluid/framework/op_registry_test.cc +++ b/paddle/fluid/framework/op_registry_test.cc @@ -226,7 +226,7 @@ class OpWithKernelTest : public OperatorWithKernel { framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { - return framework::OpKernelType(proto::DataType::FP32, ctx.device_context()); + return framework::OpKernelType(proto::VarType::FP32, ctx.device_context()); } }; @@ -290,9 +290,9 @@ class OpWithMultiKernelTest : public OperatorWithKernel { framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { - return framework::OpKernelType( - proto::DataType::FP32, platform::CUDAPlace(0), DataLayout::kAnyLayout, - framework::LibraryType::kCUDNN); + return framework::OpKernelType(proto::VarType::FP32, platform::CUDAPlace(0), + DataLayout::kAnyLayout, + framework::LibraryType::kCUDNN); } }; diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index ff90aba10b..7debdd8525 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -569,7 +569,7 @@ void OperatorWithKernel::RunImpl(const Scope& scope, } } -proto::DataType OperatorWithKernel::IndicateDataType( +proto::VarType::Type OperatorWithKernel::IndicateDataType( const ExecutionContext& ctx) const { auto& scope = ctx.scope(); int data_type = -1; @@ -595,7 +595,7 @@ proto::DataType OperatorWithKernel::IndicateDataType( } } PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input"); - return static_cast(data_type); + return static_cast(data_type); } OpKernelType OperatorWithKernel::GetExpectedKernelType( diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index c2782066ce..41214b41cb 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -394,9 +394,9 @@ class OperatorWithKernel : public OperatorBase { const OpKernelType& expected_kernel_type) const; private: - // indicate kernel DataType by input data. Defaultly all input data must be + // indicate kernel DataType by input data. By default all input data must be // same. - proto::DataType IndicateDataType(const ExecutionContext& ctx) const; + proto::VarType::Type IndicateDataType(const ExecutionContext& ctx) const; void RunImpl(const Scope& scope, const platform::Place& place) const final; }; diff --git a/paddle/fluid/framework/operator_test.cc b/paddle/fluid/framework/operator_test.cc index 08a471e0a1..44ca4d7ca5 100644 --- a/paddle/fluid/framework/operator_test.cc +++ b/paddle/fluid/framework/operator_test.cc @@ -119,7 +119,7 @@ class OpWithKernelTest : public OperatorWithKernel { void InferShape(framework::InferShapeContext* ctx) const override {} OpKernelType GetExpectedKernelType( const ExecutionContext& ctx) const override { - return OpKernelType(proto::DataType::FP32, ctx.GetPlace()); + return OpKernelType(proto::VarType::FP32, ctx.GetPlace()); } }; diff --git a/paddle/fluid/framework/program_desc_test.cc b/paddle/fluid/framework/program_desc_test.cc index d9c4331da1..66618a291b 100644 --- a/paddle/fluid/framework/program_desc_test.cc +++ b/paddle/fluid/framework/program_desc_test.cc @@ -24,13 +24,13 @@ TEST(ProgramDesc, copy_ctor) { auto* x = global_block->Var("X"); x->SetType(proto::VarType::LOD_TENSOR); x->SetLoDLevel(0); - x->SetDataType(proto::FP32); + x->SetDataType(proto::VarType::FP32); x->SetShape({1000, 784}); auto* y = global_block->Var("Y"); y->SetType(proto::VarType::LOD_TENSOR); y->SetLoDLevel(0); - y->SetDataType(proto::FP32); + y->SetDataType(proto::VarType::FP32); y->SetShape({784, 100}); auto* op = global_block->AppendOp(); @@ -86,13 +86,13 @@ TEST(ProgramDescBind, serialize_and_deserialize) { auto* x = global_block->Var("X"); x->SetType(proto::VarType::LOD_TENSOR); x->SetLoDLevel(0); - x->SetDataType(proto::FP32); + x->SetDataType(proto::VarType::FP32); x->SetShape({1000, 784}); auto* y = global_block->Var("Y"); y->SetType(proto::VarType::LOD_TENSOR); y->SetLoDLevel(0); - y->SetDataType(proto::FP32); + y->SetDataType(proto::VarType::FP32); y->SetShape({784, 100}); auto* op = global_block->AppendOp(); diff --git a/paddle/fluid/framework/prune_test.cc b/paddle/fluid/framework/prune_test.cc index b612fe8ad5..0e44b34383 100644 --- a/paddle/fluid/framework/prune_test.cc +++ b/paddle/fluid/framework/prune_test.cc @@ -34,7 +34,7 @@ void AddOp(const std::string &type, const f::VariableNameMap &inputs, for (auto kv : outputs) { for (auto v : kv.second) { auto var = block->Var(v); - var->SetDataType(paddle::framework::proto::DataType::FP32); + var->SetDataType(paddle::framework::proto::VarType::FP32); } } diff --git a/paddle/fluid/framework/var_desc.cc b/paddle/fluid/framework/var_desc.cc index bb2be1ab50..7e3f002b53 100644 --- a/paddle/fluid/framework/var_desc.cc +++ b/paddle/fluid/framework/var_desc.cc @@ -87,12 +87,12 @@ std::vector> VarDesc::GetShapes() const { return res; } -void VarDesc::SetDataType(proto::DataType data_type) { +void VarDesc::SetDataType(proto::VarType::Type data_type) { mutable_tensor_desc()->set_data_type(data_type); } void VarDesc::SetDataTypes( - const std::vector &multiple_data_type) { + const std::vector &multiple_data_type) { if (multiple_data_type.size() != GetTensorDescNum()) { VLOG(3) << "WARNING: The number of given data types(" << multiple_data_type.size() @@ -108,13 +108,13 @@ void VarDesc::SetDataTypes( } } -proto::DataType VarDesc::GetDataType() const { +proto::VarType::Type VarDesc::GetDataType() const { return tensor_desc().data_type(); } -std::vector VarDesc::GetDataTypes() const { +std::vector VarDesc::GetDataTypes() const { std::vector descs = tensor_descs(); - std::vector res; + std::vector res; res.reserve(descs.size()); for (const auto &tensor_desc : descs) { res.push_back(tensor_desc.data_type()); diff --git a/paddle/fluid/framework/var_desc.h b/paddle/fluid/framework/var_desc.h index 013ba446b9..19b8d890c1 100644 --- a/paddle/fluid/framework/var_desc.h +++ b/paddle/fluid/framework/var_desc.h @@ -80,13 +80,14 @@ class VarDesc { std::vector> GetShapes() const; - void SetDataType(proto::DataType data_type); + void SetDataType(proto::VarType::Type data_type); - void SetDataTypes(const std::vector &multiple_data_type); + void SetDataTypes( + const std::vector &multiple_data_type); - proto::DataType GetDataType() const; + proto::VarType::Type GetDataType() const; - std::vector GetDataTypes() const; + std::vector GetDataTypes() const; void SetLoDLevel(int32_t lod_level); diff --git a/paddle/fluid/operators/assign_value_op.cc b/paddle/fluid/operators/assign_value_op.cc index 2985fc28a0..e8123cb1a4 100644 --- a/paddle/fluid/operators/assign_value_op.cc +++ b/paddle/fluid/operators/assign_value_op.cc @@ -36,7 +36,8 @@ class AssignValueOp : public framework::OperatorWithKernel { framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext &ctx) const override { return framework::OpKernelType( - framework::proto::DataType(ctx.Attr("dtype")), ctx.GetPlace()); + framework::proto::VarType::Type(ctx.Attr("dtype")), + ctx.GetPlace()); } }; @@ -49,8 +50,8 @@ class AssignValueOpMaker : public framework::OpProtoAndCheckerMaker { "(vector) " "Shape of values."); AddAttr("dtype", "data type of values") - .InEnum({framework::proto::DataType::INT32, - framework::proto::DataType::FP32}); + .InEnum({framework::proto::VarType::INT32, + framework::proto::VarType::FP32}); AddAttr>("fp32_values", "store the float values") .SetDefault({}); AddAttr>("int32_values", "store the int values") diff --git a/paddle/fluid/operators/assign_value_op.h b/paddle/fluid/operators/assign_value_op.h index d51b215a08..c7b1a55a5c 100644 --- a/paddle/fluid/operators/assign_value_op.h +++ b/paddle/fluid/operators/assign_value_op.h @@ -30,10 +30,10 @@ class AssignValueKernel : public framework::OpKernel { int dtype = ctx.Attr("dtype"); const char* value_name = nullptr; switch (dtype) { - case framework::proto::DataType::INT32: + case framework::proto::VarType::INT32: value_name = "int32_values"; break; - case framework::proto::DataType::FP32: + case framework::proto::VarType::FP32: value_name = "fp32_values"; break; default: diff --git a/paddle/fluid/operators/cast_op.h b/paddle/fluid/operators/cast_op.h index ccfbd09a6b..6220e57f59 100644 --- a/paddle/fluid/operators/cast_op.h +++ b/paddle/fluid/operators/cast_op.h @@ -55,7 +55,8 @@ class CastOpKernel : public framework::OpKernel { auto* in = context.Input("X"); auto* out = context.Output("Out"); framework::VisitDataType( - static_cast(context.Attr("out_dtype")), + static_cast( + context.Attr("out_dtype")), CastOpFunctor( in, out, context.template device_context())); } diff --git a/paddle/fluid/operators/chunk_eval_op.cc b/paddle/fluid/operators/chunk_eval_op.cc index 09d090e187..77d3cffe7c 100644 --- a/paddle/fluid/operators/chunk_eval_op.cc +++ b/paddle/fluid/operators/chunk_eval_op.cc @@ -57,7 +57,7 @@ class ChunkEvalOp : public framework::OperatorWithKernel { protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext &ctx) const override { - return framework::OpKernelType(framework::proto::DataType::FP32, + return framework::OpKernelType(framework::proto::VarType::FP32, platform::CPUPlace()); } }; diff --git a/paddle/fluid/operators/edit_distance_op.cc b/paddle/fluid/operators/edit_distance_op.cc index dbcbfec971..c7f037d2df 100644 --- a/paddle/fluid/operators/edit_distance_op.cc +++ b/paddle/fluid/operators/edit_distance_op.cc @@ -42,7 +42,7 @@ class EditDistanceOp : public framework::OperatorWithKernel { protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext &ctx) const override { - return framework::OpKernelType(framework::proto::DataType::FP32, + return framework::OpKernelType(framework::proto::VarType::FP32, ctx.device_context()); } }; diff --git a/paddle/fluid/operators/fill_constant_batch_size_like_op.cc b/paddle/fluid/operators/fill_constant_batch_size_like_op.cc index 55eca71c8b..72da80baaf 100644 --- a/paddle/fluid/operators/fill_constant_batch_size_like_op.cc +++ b/paddle/fluid/operators/fill_constant_batch_size_like_op.cc @@ -24,7 +24,7 @@ class FillConstantBatchSizeLikeOp : public BatchSizeLikeOp { framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext &ctx) const override { return framework::OpKernelType( - static_cast(ctx.Attr("dtype")), + static_cast(ctx.Attr("dtype")), ctx.device_context()); } }; @@ -36,7 +36,7 @@ class FillConstantBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker { AddAttr("dtype", "(int, default 5 (FP32)) " "Output data type") - .SetDefault(framework::proto::DataType::FP32); + .SetDefault(framework::proto::VarType::FP32); AddAttr("value", "(float, default 0) The value to be filled") .SetDefault(0.0f); AddComment(R"DOC( diff --git a/paddle/fluid/operators/fill_constant_op.cc b/paddle/fluid/operators/fill_constant_op.cc index 0b65c83d3a..07e0a80f8d 100644 --- a/paddle/fluid/operators/fill_constant_op.cc +++ b/paddle/fluid/operators/fill_constant_op.cc @@ -38,7 +38,7 @@ class FillConstantOp : public framework::OperatorBase { void RunImpl(const framework::Scope &scope, const platform::Place &dev_place) const override { auto data_type = - static_cast(Attr("dtype")); + static_cast(Attr("dtype")); auto value = Attr("value"); auto force_cpu = Attr("force_cpu"); auto &out = @@ -64,7 +64,7 @@ class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("dtype", "(int, default 5 (FP32)) " "Output data type") - .SetDefault(framework::proto::DataType::FP32); + .SetDefault(framework::proto::VarType::FP32); AddAttr>("shape", "(vector) The shape of the output"); AddAttr("value", "(float, default 0) The value to be filled") .SetDefault(0.0f); diff --git a/paddle/fluid/operators/fill_op.cc b/paddle/fluid/operators/fill_op.cc index 3b4b409231..ee8a2fc353 100644 --- a/paddle/fluid/operators/fill_op.cc +++ b/paddle/fluid/operators/fill_op.cc @@ -51,7 +51,8 @@ class FillOp : public framework::OperatorBase { "Cannot find variable %s", Output("Out")) .GetMutable()); out.Resize(framework::make_ddim(Attr>("shape"))); - auto dtype = static_cast(Attr("dtype")); + auto dtype = + static_cast(Attr("dtype")); platform::CPUPlace cpu; auto force_cpu = Attr("force_cpu"); out.mutable_data(force_cpu ? cpu : place, framework::ToTypeIndex(dtype)); @@ -93,7 +94,7 @@ Fill an tensor with `value` and `shape`. The type of the tensor is specify by "value", "The float values of tensor, which are flatten in row major"); AddAttr>("shape", "The shape of output tensor"); AddAttr("dtype", "The data type of output tensor, Default is float") - .SetDefault(framework::proto::DataType::FP32); + .SetDefault(framework::proto::VarType::FP32); AddAttr("force_cpu", "Whether the output tensor must be at CPU memory or not. " "Default is false.") diff --git a/paddle/fluid/operators/gaussian_random_batch_size_like_op.cc b/paddle/fluid/operators/gaussian_random_batch_size_like_op.cc index ac516986ad..53c706a83e 100644 --- a/paddle/fluid/operators/gaussian_random_batch_size_like_op.cc +++ b/paddle/fluid/operators/gaussian_random_batch_size_like_op.cc @@ -26,7 +26,7 @@ class GaussianRandomBatchSizeLikeOp : public BatchSizeLikeOp { framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext &ctx) const override { return framework::OpKernelType( - static_cast(ctx.Attr("dtype")), + static_cast(ctx.Attr("dtype")), ctx.GetPlace()); } }; @@ -53,7 +53,7 @@ class GaussianRandomBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker { AddAttr("dtype", "(int, default 5(FP32)) " "Output data type.") - .SetDefault(framework::proto::DataType::FP32); + .SetDefault(framework::proto::VarType::FP32); AddComment(R"DOC( GaussianRandom Operator. diff --git a/paddle/fluid/operators/gaussian_random_op.cc b/paddle/fluid/operators/gaussian_random_op.cc index 7fb2b2c230..4d197637b3 100644 --- a/paddle/fluid/operators/gaussian_random_op.cc +++ b/paddle/fluid/operators/gaussian_random_op.cc @@ -63,7 +63,7 @@ class GaussianRandomOp : public framework::OperatorWithKernel { framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( - static_cast(ctx.Attr("dtype")), + static_cast(ctx.Attr("dtype")), ctx.device_context()); } }; @@ -95,7 +95,7 @@ class GaussianRandomOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("dtype", "(int, default 5(FP32)) " "Output data type.") - .SetDefault(framework::proto::DataType::FP32); + .SetDefault(framework::proto::VarType::FP32); AddComment(R"DOC( GaussianRandom Operator. diff --git a/paddle/fluid/operators/nccl_op.cc b/paddle/fluid/operators/nccl_op.cc index 7f1278f3a5..5ae50590dd 100644 --- a/paddle/fluid/operators/nccl_op.cc +++ b/paddle/fluid/operators/nccl_op.cc @@ -55,7 +55,7 @@ class NCCLInitOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("dtype", "(int, default 5 (FP32)) " "Output data type") - .SetDefault(framework::proto::DataType::FP32); + .SetDefault(framework::proto::VarType::FP32); AddComment(R"DOC( NCCLInit Operator. diff --git a/paddle/fluid/operators/one_hot_op.cc b/paddle/fluid/operators/one_hot_op.cc index 21d3405b70..1d42dfdd76 100644 --- a/paddle/fluid/operators/one_hot_op.cc +++ b/paddle/fluid/operators/one_hot_op.cc @@ -60,7 +60,7 @@ class OneHotOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("dtype", "An integer to specify the data type of one-hot " "vector. The default value is FP32.") - .SetDefault(paddle::framework::proto::DataType::FP32); + .SetDefault(paddle::framework::proto::VarType::FP32); AddComment(R"DOC( One Hot Operator. This operator creates the one-hot representations for input index values. The following example will help to explain the function of this diff --git a/paddle/fluid/operators/one_hot_op.cu b/paddle/fluid/operators/one_hot_op.cu index 87c285df4e..240ac895e2 100644 --- a/paddle/fluid/operators/one_hot_op.cu +++ b/paddle/fluid/operators/one_hot_op.cu @@ -65,7 +65,8 @@ class OneHotCUDAKernel : public framework::OpKernel { int depth = context.Attr("depth"); framework::VisitDataType( - static_cast(context.Attr("dtype")), + static_cast( + context.Attr("dtype")), OneHotOpCUDAFunctor( in, out, depth, context.template device_context())); } diff --git a/paddle/fluid/operators/one_hot_op.h b/paddle/fluid/operators/one_hot_op.h index 1409f8af62..7e77f25089 100644 --- a/paddle/fluid/operators/one_hot_op.h +++ b/paddle/fluid/operators/one_hot_op.h @@ -58,7 +58,8 @@ class OneHotKernel : public framework::OpKernel { int depth = context.Attr("depth"); framework::VisitDataType( - static_cast(context.Attr("dtype")), + static_cast( + context.Attr("dtype")), OneHotOpFunctor( in, out, depth, context.template device_context())); } diff --git a/paddle/fluid/operators/rnn_memory_helper_op.cc b/paddle/fluid/operators/rnn_memory_helper_op.cc index 8ab9f010a2..70f205d887 100644 --- a/paddle/fluid/operators/rnn_memory_helper_op.cc +++ b/paddle/fluid/operators/rnn_memory_helper_op.cc @@ -66,7 +66,7 @@ class RNNMemoryHelperOpInfoMaker : public framework::OpProtoAndCheckerMaker { AddAttr("dtype", "(int, default 5 (FP32)) " "Output data type") - .SetDefault(framework::proto::DataType::FP32); + .SetDefault(framework::proto::VarType::FP32); AddComment(""); } }; @@ -126,7 +126,7 @@ class RNNMemoryHelperGradOpInfoMaker AddAttr("dtype", "(int, default 5 (FP32)) " "Output data type") - .SetDefault(framework::proto::DataType::FP32); + .SetDefault(framework::proto::VarType::FP32); AddComment(""); } }; diff --git a/paddle/fluid/operators/sum_op.cc b/paddle/fluid/operators/sum_op.cc index 7b88387c33..c3abb3ea4a 100644 --- a/paddle/fluid/operators/sum_op.cc +++ b/paddle/fluid/operators/sum_op.cc @@ -73,7 +73,8 @@ class SumOp : public framework::OperatorWithKernel { "Sum operator should have at least one tensor"); return framework::OpKernelType( - static_cast(dtype), ctx.device_context()); + static_cast(dtype), + ctx.device_context()); } else if (x_vars[0]->IsType()) { return framework::OpKernelType( framework::ToDataType( diff --git a/paddle/fluid/operators/uniform_random_batch_size_like_op.cc b/paddle/fluid/operators/uniform_random_batch_size_like_op.cc index fa31dad513..00f00bb403 100644 --- a/paddle/fluid/operators/uniform_random_batch_size_like_op.cc +++ b/paddle/fluid/operators/uniform_random_batch_size_like_op.cc @@ -26,7 +26,7 @@ class UniformRandomBatchSizeLikeOp : public BatchSizeLikeOp { framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext &ctx) const override { return framework::OpKernelType( - static_cast(ctx.Attr("dtype")), + static_cast(ctx.Attr("dtype")), ctx.GetPlace()); } }; @@ -58,7 +58,7 @@ This operator initializes a tensor with the same batch_size as the Input tensor "generate the same random numbers every time.") .SetDefault(0); AddAttr("dtype", "(int, default 5(FP32)) Output tensor data type") - .SetDefault(framework::proto::DataType::FP32); + .SetDefault(framework::proto::VarType::FP32); } }; diff --git a/paddle/fluid/operators/uniform_random_op.cc b/paddle/fluid/operators/uniform_random_op.cc index 3a0a0d6fca..87699362b2 100644 --- a/paddle/fluid/operators/uniform_random_op.cc +++ b/paddle/fluid/operators/uniform_random_op.cc @@ -66,7 +66,7 @@ class UniformRandomOp : public framework::OperatorWithKernel { framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( - static_cast(ctx.Attr("dtype")), + static_cast(ctx.Attr("dtype")), ctx.GetPlace()); } }; @@ -101,7 +101,7 @@ uniform distribution. "generate the same random numbers every time.") .SetDefault(0); AddAttr("dtype", "(int, default 5(FP32)) Output tensor data type") - .SetDefault(framework::proto::DataType::FP32); + .SetDefault(framework::proto::VarType::FP32); } }; } // namespace operators diff --git a/paddle/fluid/pybind/protobuf.cc b/paddle/fluid/pybind/protobuf.cc index 9f97cc5007..99716ccb24 100644 --- a/paddle/fluid/pybind/protobuf.cc +++ b/paddle/fluid/pybind/protobuf.cc @@ -195,15 +195,6 @@ void BindBlockDesc(py::module &m) { } void BindVarDsec(py::module &m) { - py::enum_(m, "DataType", "") - .value("BOOL", proto::DataType::BOOL) - .value("INT16", proto::DataType::INT16) - .value("INT32", proto::DataType::INT32) - .value("INT64", proto::DataType::INT64) - .value("FP16", proto::DataType::FP16) - .value("FP32", proto::DataType::FP32) - .value("FP64", proto::DataType::FP64); - py::class_ var_desc(m, "VarDesc", ""); var_desc .def("name", @@ -233,6 +224,13 @@ void BindVarDsec(py::module &m) { .def("set_persistable", &VarDesc::SetPersistable); py::enum_(var_desc, "VarType", "") + .value("BOOL", proto::VarType::BOOL) + .value("INT16", proto::VarType::INT16) + .value("INT32", proto::VarType::INT32) + .value("INT64", proto::VarType::INT64) + .value("FP16", proto::VarType::FP16) + .value("FP32", proto::VarType::FP32) + .value("FP64", proto::VarType::FP64) .value("LOD_TENSOR", proto::VarType::LOD_TENSOR) .value("SELECTED_ROWS", proto::VarType::SELECTED_ROWS) .value("FEED_MINIBATCH", proto::VarType::FEED_MINIBATCH) diff --git a/python/paddle/v2/fluid/backward.py b/python/paddle/v2/fluid/backward.py index a690c14300..26b35cfc19 100644 --- a/python/paddle/v2/fluid/backward.py +++ b/python/paddle/v2/fluid/backward.py @@ -68,7 +68,7 @@ def _infer_var_data_type_(grad_var_name, block): fwd_var = block.desc.find_var_recursive(fwd_name.encode("ascii")) grad_var.set_dtype(fwd_var.dtype()) else: - grad_var.set_dtype(core.DataType.FP32) + grad_var.set_dtype(core.VarDesc.VarType.FP32) def _all_in_set_(cands, s): diff --git a/python/paddle/v2/fluid/data_feeder.py b/python/paddle/v2/fluid/data_feeder.py index 070bcadd71..ac02401c79 100644 --- a/python/paddle/v2/fluid/data_feeder.py +++ b/python/paddle/v2/fluid/data_feeder.py @@ -27,13 +27,13 @@ class DataToLoDTensorConverter(object): self.place = place self.lod_level = lod_level self.shape = shape - if dtype == core.DataType.FP32: + if dtype == core.VarDesc.VarType.FP32: self.dtype = 'float32' - elif dtype == core.DataType.INT64: + elif dtype == core.VarDesc.VarType.INT64: self.dtype = 'int64' - elif dtype == core.DataType.FP64: + elif dtype == core.VarDesc.VarType.FP64: self.dtype = 'float64' - elif dtype == core.DataType.INT32: + elif dtype == core.VarDesc.VarType.INT32: self.dtype = 'int32' else: raise ValueError("dtype must be any of [int32, float32, int64, " diff --git a/python/paddle/v2/fluid/evaluator.py b/python/paddle/v2/fluid/evaluator.py index 30d87c76c2..1f4618310c 100644 --- a/python/paddle/v2/fluid/evaluator.py +++ b/python/paddle/v2/fluid/evaluator.py @@ -89,7 +89,7 @@ class Evaluator(object): Args: suffix(str): the state suffix. - dtype(str|core.DataType): the state data type + dtype(str|core.VarDesc.VarType): the state data type shape(tuple|list): the shape of state Returns: State variable diff --git a/python/paddle/v2/fluid/framework.py b/python/paddle/v2/fluid/framework.py index dfd7e8047c..fb4cd5b75a 100644 --- a/python/paddle/v2/fluid/framework.py +++ b/python/paddle/v2/fluid/framework.py @@ -67,24 +67,24 @@ def convert_np_dtype_to_dtype_(np_dtype): Args: np_dtype(np.dtype): the data type in numpy - Returns(core.DataType): the data type in Paddle + Returns(core.VarDesc.VarType): the data type in Paddle """ dtype = np.dtype(np_dtype) if dtype == np.float32: - return core.DataType.FP32 + return core.VarDesc.VarType.FP32 elif dtype == np.float64: - return core.DataType.FP64 + return core.VarDesc.VarType.FP64 elif dtype == np.float16: - return core.DataType.FP16 + return core.VarDesc.VarType.FP16 elif dtype == np.int32: - return core.DataType.INT32 + return core.VarDesc.VarType.INT32 elif dtype == np.int16: - return core.DataType.INT16 + return core.VarDesc.VarType.INT16 elif dtype == np.int64: - return core.DataType.INT64 + return core.VarDesc.VarType.INT64 elif dtype == np.bool: - return core.DataType.BOOL + return core.VarDesc.VarType.BOOL else: raise ValueError("Not supported numpy dtype " + str(dtype)) @@ -93,16 +93,19 @@ def dtype_is_floating(dtype): """ Check the data type is floating or not. Args: - dtype(np.dtype|core.DataType): data type. + dtype(np.dtype|core.VarDesc.VarType): data type. Could be numpy format or Paddle format Returns(bool): True if data type is a float value """ - if not isinstance(dtype, core.DataType): + if not isinstance(dtype, core.VarDesc.VarType): dtype = convert_np_dtype_to_dtype_(dtype) - return dtype in [core.DataType.FP16, core.DataType.FP32, core.DataType.FP64] + return dtype in [ + core.VarDesc.VarType.FP16, core.VarDesc.VarType.FP32, + core.VarDesc.VarType.FP64 + ] def _debug_string_(proto, throw_on_error=True): @@ -148,7 +151,7 @@ class Variable(object): framework.proto for details. shape(tuple|list|None): The shape of variable. -1 means the batch size. Some kinds of variable do not contain shape, just set it to None. - dtype(np.dtype|core.DataType|str): The data type of variable. + dtype(np.dtype|core.VarDesc.VarType|str): The data type of variable. lod_level(int): The level of lod tensor. 0 means there is not a time series data. persistable(bool): True if the variable should be saved as check point. @@ -200,7 +203,7 @@ class Variable(object): "shape is {1}; the new shape is {2}. They are not " "matched.".format(self.name, old_shape, shape)) if dtype is not None: - if not isinstance(dtype, core.DataType): + if not isinstance(dtype, core.VarDesc.VarType): dtype = convert_np_dtype_to_dtype_(dtype) if is_new_var: self.desc.set_dtype(dtype) diff --git a/python/paddle/v2/fluid/layers/control_flow.py b/python/paddle/v2/fluid/layers/control_flow.py index 1ca11bb35b..b56a391618 100644 --- a/python/paddle/v2/fluid/layers/control_flow.py +++ b/python/paddle/v2/fluid/layers/control_flow.py @@ -612,7 +612,7 @@ class While(object): if not isinstance(cond, Variable): raise TypeError("condition should be a variable") assert isinstance(cond, Variable) - if cond.dtype != core.DataType.BOOL: + if cond.dtype != core.VarDesc.VarType.BOOL: raise TypeError("condition should be a bool variable") if reduce(lambda a, b: a * b, cond.shape, 1) != 1: raise TypeError("condition should be a bool scalar") diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index d1ac6583dd..c4baa62ccd 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -221,7 +221,7 @@ def embedding(input, :math:`padding_idx < 0`, the padding_idx to use in lookup is :math:`size[0] + dim`. param_attr(ParamAttr): Parameters for this layer - dtype(np.dtype|core.DataType|str): The type of data : float32, float_16, int etc + dtype(np.dtype|core.VarDesc.VarType|str): The type of data : float32, float_16, int etc Returns: Variable: The tensor variable storing the embeddings of the \ diff --git a/python/paddle/v2/fluid/layers/tensor.py b/python/paddle/v2/fluid/layers/tensor.py index db400aad37..97e8f082cf 100644 --- a/python/paddle/v2/fluid/layers/tensor.py +++ b/python/paddle/v2/fluid/layers/tensor.py @@ -17,7 +17,7 @@ from ..param_attr import ParamAttr from ..framework import convert_np_dtype_to_dtype_ from ..framework import Variable from ..initializer import Constant, force_init_on_cpu -from ..core import DataType +from ..core import VarDesc import numpy __all__ = [ @@ -199,10 +199,10 @@ def assign(input, output): attrs={'scale': 1.0}) elif isinstance(input, numpy.ndarray): dtype = convert_np_dtype_to_dtype_(input.dtype) - if dtype == DataType.FP32: + if dtype == VarDesc.VarType.FP32: value_name = "fp32_values" values = [float(v) for v in input.flat] - elif dtype == DataType.INT32: + elif dtype == VarDesc.VarType.INT32: value_name = "int32_values" values = [int(v) for v in input.flat] else: @@ -236,7 +236,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None): Args: shape(tuple|list|None): Shape of the output tensor. - dtype(np.dtype|core.DataType|str): Data type of the output tensor. + dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output tensor. value(float): The constant value used to initialize the output tensor. out(Variable): The output tensor. force_cpu(True|False): data should be on CPU if set true. @@ -285,7 +285,7 @@ def fill_constant_batch_size_like(input, Args: input(Variable): Tensor whose dimensions will be used to get batch size shape(tuple|list|None): Shape of output tensor - dtype(np.dtype|core.DataType|str): Data type of output tensor + dtype(np.dtype|core.VarDesc.VarType|str): Data type of output tensor value(float): Constant value to initialize the output tensor input_dim_idx(int): Index of input's batch size dimension output_dim_idx(int): Index of output's batch size dimension @@ -327,7 +327,7 @@ def ones(shape, dtype, force_cpu=False): Args: shape(tuple|list|None): Shape of output tensor - dtype(np.dtype|core.DataType|str): Data type of output tensor + dtype(np.dtype|core.VarDesc.VarType|str): Data type of output tensor Returns: Variable: The tensor variable storing the output @@ -351,7 +351,7 @@ def zeros(shape, dtype, force_cpu=False): Args: shape(tuple|list|None): Shape of output tensor - dtype(np.dtype|core.DataType|str): Data type of output tensor + dtype(np.dtype|core.VarDesc.VarType|str): Data type of output tensor Returns: Variable: The tensor variable storing the output diff --git a/python/paddle/v2/fluid/memory_optimization_transpiler.py b/python/paddle/v2/fluid/memory_optimization_transpiler.py index 78dc56f849..ee56ccdcf1 100644 --- a/python/paddle/v2/fluid/memory_optimization_transpiler.py +++ b/python/paddle/v2/fluid/memory_optimization_transpiler.py @@ -20,13 +20,13 @@ from backward import _rename_arg_ from . import core dtype_to_size = { - core.DataType.FP16: 2, - core.DataType.FP32: 4, - core.DataType.FP64: 8, - core.DataType.INT16: 2, - core.DataType.INT32: 4, - core.DataType.INT64: 8, - core.DataType.BOOL: 1 + core.VarDesc.VarType.FP16: 2, + core.VarDesc.VarType.FP32: 4, + core.VarDesc.VarType.FP64: 8, + core.VarDesc.VarType.INT16: 2, + core.VarDesc.VarType.INT32: 4, + core.VarDesc.VarType.INT64: 8, + core.VarDesc.VarType.BOOL: 1 } diff --git a/python/paddle/v2/fluid/tests/test_cpp_reader.py b/python/paddle/v2/fluid/tests/test_cpp_reader.py index 8d4f454611..6d2312dbcb 100644 --- a/python/paddle/v2/fluid/tests/test_cpp_reader.py +++ b/python/paddle/v2/fluid/tests/test_cpp_reader.py @@ -22,7 +22,7 @@ block = prog.current_block() random_reader = block.create_var( type=fluid.core.VarDesc.VarType.READER, name="RandomDataGenerator") random_reader.desc.set_dtypes( - [fluid.core.DataType.FP32, fluid.core.DataType.FP32]) + [fluid.core.VarDesc.VarType.FP32, fluid.core.VarDesc.VarType.FP32]) create_random_data_generator_op = block.append_op( type="create_random_data_generator", diff --git a/python/paddle/v2/fluid/tests/unittests/op_test.py b/python/paddle/v2/fluid/tests/unittests/op_test.py index 4761811f0a..d8867550ca 100644 --- a/python/paddle/v2/fluid/tests/unittests/op_test.py +++ b/python/paddle/v2/fluid/tests/unittests/op_test.py @@ -119,9 +119,9 @@ def get_numeric_gradient(place, tensor_to_check = scope.find_var(input_to_check).get_tensor() tensor_size = product(tensor_to_check.get_dims()) tensor_to_check_dtype = tensor_to_check.dtype() - if tensor_to_check_dtype == core.DataType.FP32: + if tensor_to_check_dtype == core.VarDesc.VarType.FP32: tensor_to_check_dtype = np.float32 - elif tensor_to_check_dtype == core.DataType.FP64: + elif tensor_to_check_dtype == core.VarDesc.VarType.FP64: tensor_to_check_dtype = np.float64 else: raise ValueError("Not supported data type " + str( diff --git a/python/paddle/v2/fluid/tests/unittests/test_batch_norm_op.py b/python/paddle/v2/fluid/tests/unittests/test_batch_norm_op.py index 778c7044ce..b7c0cb521a 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_batch_norm_op.py +++ b/python/paddle/v2/fluid/tests/unittests/test_batch_norm_op.py @@ -140,9 +140,9 @@ def set_output_grad(scope, outputs, place, feed_dict=None): grad_tensor = scope.var(grad_var_name(name)).get_tensor() out_dtype = out_tensor.dtype() if data is None: - if out_dtype == core.DataType.FP64: + if out_dtype == core.VarDesc.VarType.FP64: data = np.ones(out_tensor.shape(), dtype=np.float64) - elif out_dtype == core.DataType.FP32: + elif out_dtype == core.VarDesc.VarType.FP32: data = np.ones(out_tensor.shape(), dtype=np.float32) else: raise ValueError("Not supported data type " + str(out_dtype)) diff --git a/python/paddle/v2/fluid/tests/unittests/test_cast_op.py b/python/paddle/v2/fluid/tests/unittests/test_cast_op.py index 44859e2155..3d05a319cd 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_cast_op.py +++ b/python/paddle/v2/fluid/tests/unittests/test_cast_op.py @@ -24,8 +24,8 @@ class TestCastOp(op_test.OpTest): self.inputs = {'X': ipt.astype('float32')} self.outputs = {'Out': ipt.astype('float64')} self.attrs = { - 'in_dtype': int(core.DataType.FP32), - 'out_dtype': int(core.DataType.FP64) + 'in_dtype': int(core.VarDesc.VarType.FP32), + 'out_dtype': int(core.VarDesc.VarType.FP64) } self.op_type = 'cast' diff --git a/python/paddle/v2/fluid/tests/unittests/test_fill_op.py b/python/paddle/v2/fluid/tests/unittests/test_fill_op.py index 34c6401377..c2e3cfe6f3 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_fill_op.py +++ b/python/paddle/v2/fluid/tests/unittests/test_fill_op.py @@ -26,7 +26,7 @@ class TestFillOp(OpTest): self.attrs = { 'value': val.flatten().tolist(), 'shape': [100, 200], - 'dtype': int(core.DataType.FP64) + 'dtype': int(core.VarDesc.VarType.FP64) } self.outputs = {'Out': val.astype('float64')} diff --git a/python/paddle/v2/fluid/tests/unittests/test_layer_norm_op.py b/python/paddle/v2/fluid/tests/unittests/test_layer_norm_op.py index b723b471bc..a1206b3b85 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_layer_norm_op.py +++ b/python/paddle/v2/fluid/tests/unittests/test_layer_norm_op.py @@ -97,9 +97,9 @@ def set_output_grad(scope, outputs, place, feed_dict=None): grad_tensor = scope.var(grad_var_name(name)).get_tensor() out_dtype = out_tensor.dtype() if data is None: - if out_dtype == core.DataType.FP64: + if out_dtype == core.VarDesc.VarType.FP64: data = np.ones(out_tensor.shape(), dtype=np.float64) - elif out_dtype == core.DataType.FP32: + elif out_dtype == core.VarDesc.VarType.FP32: data = np.ones(out_tensor.shape(), dtype=np.float32) else: raise ValueError("Not supported data type " + str(out_dtype)) diff --git a/python/paddle/v2/fluid/tests/unittests/test_one_hot_op.py b/python/paddle/v2/fluid/tests/unittests/test_one_hot_op.py index c93be0efda..b7db30104a 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_one_hot_op.py +++ b/python/paddle/v2/fluid/tests/unittests/test_one_hot_op.py @@ -38,7 +38,7 @@ class TestOneHotOp(OpTest): out[i, x[i]] = 1.0 self.inputs = {'X': (x, x_lod)} - self.attrs = {'depth': depth, 'dtype': int(core.DataType.FP32)} + self.attrs = {'depth': depth, 'dtype': int(core.VarDesc.VarType.FP32)} self.outputs = {'Out': (out, x_lod)} def test_check_output(self): diff --git a/python/paddle/v2/fluid/tests/unittests/test_parameter.py b/python/paddle/v2/fluid/tests/unittests/test_parameter.py index 0ba9235fdb..88356a7ea1 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_parameter.py +++ b/python/paddle/v2/fluid/tests/unittests/test_parameter.py @@ -36,7 +36,7 @@ class TestParameter(unittest.TestCase): self.assertIsNotNone(param) self.assertEqual('fc.w', param.name) self.assertEqual((784, 100), param.shape) - self.assertEqual(core.DataType.FP32, param.dtype) + self.assertEqual(core.VarDesc.VarType.FP32, param.dtype) self.assertEqual(0, param.block.idx) exe = Executor(core.CPUPlace()) p = exe.run(main_program, fetch_list=[param])[0] diff --git a/python/paddle/v2/fluid/tests/unittests/test_protobuf_descs.py b/python/paddle/v2/fluid/tests/unittests/test_protobuf_descs.py index 55d18d2729..c3bef95874 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_protobuf_descs.py +++ b/python/paddle/v2/fluid/tests/unittests/test_protobuf_descs.py @@ -131,8 +131,8 @@ class TestVarDesc(unittest.TestCase): block = program_desc.block(0) var = block.var('my_var') var.set_type(core.VarDesc.VarType.LOD_TENSOR) - var.set_dtype(core.DataType.INT32) - self.assertEqual(core.DataType.INT32, var.dtype()) + var.set_dtype(core.VarDesc.VarType.INT32) + self.assertEqual(core.VarDesc.VarType.INT32, var.dtype()) self.assertEqual(core.VarDesc.VarType.LOD_TENSOR, var.type()) def test_multiple_dtype(self): @@ -141,7 +141,8 @@ class TestVarDesc(unittest.TestCase): var = block.var('my_reader') var.set_type(core.VarDesc.VarType.READER) src_types = [ - core.DataType.INT32, core.DataType.FP64, core.DataType.FP32 + core.VarDesc.VarType.INT32, core.VarDesc.VarType.FP64, + core.VarDesc.VarType.FP32 ] var.set_dtypes(src_types) self.assertEqual(src_types, var.dtypes()) diff --git a/python/paddle/v2/fluid/tests/unittests/test_variable.py b/python/paddle/v2/fluid/tests/unittests/test_variable.py index b06bcfb075..4ae3909d27 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_variable.py +++ b/python/paddle/v2/fluid/tests/unittests/test_variable.py @@ -20,7 +20,7 @@ import numpy as np class TestVariable(unittest.TestCase): def test_np_dtype_convert(self): - DT = core.DataType + DT = core.VarDesc.VarType convert = convert_np_dtype_to_dtype_ self.assertEqual(DT.FP32, convert(np.float32)) self.assertEqual(DT.FP16, convert("float16")) @@ -36,13 +36,13 @@ class TestVariable(unittest.TestCase): w = b.create_var( dtype="float64", shape=[784, 100], lod_level=0, name="fc.w") self.assertNotEqual(str(w), "") - self.assertEqual(core.DataType.FP64, w.dtype) + self.assertEqual(core.VarDesc.VarType.FP64, w.dtype) self.assertEqual((784, 100), w.shape) self.assertEqual("fc.w", w.name) self.assertEqual(0, w.lod_level) w = b.create_var(name='fc.w') - self.assertEqual(core.DataType.FP64, w.dtype) + self.assertEqual(core.VarDesc.VarType.FP64, w.dtype) self.assertEqual((784, 100), w.shape) self.assertEqual("fc.w", w.name) self.assertEqual(0, w.lod_level) -- GitLab From 56d5319261f94a7c1b135ffe904b415cdfe8f4e8 Mon Sep 17 00:00:00 2001 From: emailweixu Date: Fri, 16 Feb 2018 15:05:33 -0800 Subject: [PATCH 130/217] Fix typo Paddle/tools/manylinux1/README.md (#8463) --- tools/manylinux1/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/manylinux1/README.md b/tools/manylinux1/README.md index cb0a9ac22c..898e00bd37 100644 --- a/tools/manylinux1/README.md +++ b/tools/manylinux1/README.md @@ -12,7 +12,7 @@ with newer version compilers cannot work with those with older versions. The suggested building environment is as old as CentOS 5. However, PaddlePaddle relies on CUDA, and the earlies version of [CentOS works with CUDA is 6](https://hub.docker.com/r/nvidia/cuda/). -So, here we provide a Docker image basing on CentOS 6 and CUDA for +So, here we provide a Docker image based on CentOS 6 and CUDA for building PaddlePaddle and making the release supports "as-manylinux as possible." or "sufficiently many Linux" according to [this discussion](https://mail.python.org/pipermail/wheel-builders/2016-July/000175.html). -- GitLab From bd58bf3e035b29c4f98afd49bce604d602412cb3 Mon Sep 17 00:00:00 2001 From: kavyasrinet Date: Fri, 16 Feb 2018 16:22:02 -0800 Subject: [PATCH 131/217] [WIP] Expose Channel in Python and add to VarType (#8465) Add Channel as a VarType --- paddle/fluid/framework/framework.proto | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/paddle/fluid/framework/framework.proto b/paddle/fluid/framework/framework.proto index 22d0692394..724d9793e5 100644 --- a/paddle/fluid/framework/framework.proto +++ b/paddle/fluid/framework/framework.proto @@ -112,6 +112,7 @@ message VarType { LOD_TENSOR_ARRAY = 13; PLACE_LIST = 14; READER = 15; + CHANNEL = 16; } required Type type = 1; @@ -137,6 +138,12 @@ message VarType { message ReaderDesc { repeated LoDTensorDesc lod_tensor = 1; } optional ReaderDesc reader = 5; + + message ChannelDesc { + required Type data_type = 1; + required int64 capacity = 2; + } + optional ChannelDesc channel = 6; } message VarDesc { -- GitLab From 65fd84a5d50877bdc9a6bcbcd3f4f29a1389bfd1 Mon Sep 17 00:00:00 2001 From: kavyasrinet Date: Fri, 16 Feb 2018 16:48:21 -0800 Subject: [PATCH 132/217] Updating Var_desc.md with the updated typing system in Fluid (#8462) * Updating Var_desc.md with the updated typing system in Fluid * Added Channel to VarType --- doc/design/var_desc.md | 46 ++++++++++++++++++++++++++---------------- 1 file changed, 29 insertions(+), 17 deletions(-) diff --git a/doc/design/var_desc.md b/doc/design/var_desc.md index 89fa95326c..6a45af1995 100644 --- a/doc/design/var_desc.md +++ b/doc/design/var_desc.md @@ -1,10 +1,10 @@ ## Background PaddlePaddle divides the description of neural network computation into two stages: compile time and runtime. At compile time, the neural network computation is described as a `ProgramDesc` whereas at runtime an `Executor` interprets the `ProgramDesc` to compute the operations. -PaddlePaddle use proto message to describe compile time program because +PaddlePaddle uses proto message to describe compile time program because : 1. The computation program description must be serializable and saved in a file. -1. During distributed training, the sreialized program will be sent to multiple workers. It should also be possible to break the program into different components, each of which can be executed on different workers. +1. During distributed training, the serialized program will be sent to multiple workers. It should also be possible to break the program into different components, each of which can be executed on a different worker. The computation `Program` consists of nested `Blocks`. Each `Block` will consist of data(i.e. `Variable`) and `Operations`. The concept to represent them is in the table below. @@ -14,28 +14,33 @@ The computation `Program` consists of nested `Blocks`. Each `Block` will consist |Operation|OpDesc(proto)|Operator(cpp)| -## Definition of VarDesc +## Definition of VarType -A VarDesc should have a name, and value. The are two kinds of variable type in compile time, they are `LoDTensor` and `SelectedRows`. +A VarDesc should have a name, type and whether or not it is persistable. The are different kinds of variable types supported in PaddlePaddle, apart from the POD_Types like: `LOD_TENSOR`, `SELECTED_ROWS`, `FEED_MINIBATCH`, `FETCH_LIST`, `STEP_SCOPES`, `LOD_RANK_TABLE`, `LOD_TENSOR_ARRAY`, `PLACE_LIST`, `READER` and `CHANNEL`. These are declared inside `VarType`. A `VarDesc` then looks as the following: ```proto message VarDesc { required string name = 1; - enum VarType { - LOD_TENSOR = 0; - SELECTED_ROWS = 1; - } required VarType type = 2; - optional LoDTensorDesc lod_desc = 3; - optional TensorDesc selected_rows_desc = 4; - optional bool persistable = 5 [ default = false ]; + optional bool persistable = 3 [ default = false ]; } ``` ## Definition of TensorDesc ```proto -enum DataType { +message TensorDesc { + // Should only be PODType. Is enforced in C++ + required Type data_type = 1; + repeated int64 dims = 2; // [UNK, 640, 480] is saved as [-1, 640, 480] +} +``` + +The `Type` here comes from the enum defined inside of `VarType` : + +```proto +enum Type { + // Pod Types BOOL = 0; INT16 = 1; INT32 = 2; @@ -43,11 +48,18 @@ enum DataType { FP16 = 4; FP32 = 5; FP64 = 6; -} -message TensorDesc { - required DataType data_type = 1; - repeated int64 dims = 2; // [UNK, 640, 480] is saved as [-1, 640, 480] + // Other types that may need additional descriptions + LOD_TENSOR = 7; + SELECTED_ROWS = 8; + FEED_MINIBATCH = 9; + FETCH_LIST = 10; + STEP_SCOPES = 11; + LOD_RANK_TABLE = 12; + LOD_TENSOR_ARRAY = 13; + PLACE_LIST = 14; + READER = 15; + CHANNEL = 16; } ``` @@ -58,7 +70,7 @@ A TensorDesc describes `SelectedRows` and `LoDTensor`. For details of `SelectedR ```proto message LoDTensorDesc { required TensorDesc tensor = 1; - optional int lod_level = 2; + optional int32 lod_level = 2 [ default = 0 ]; } ``` -- GitLab From 4b957af2372c861942aa7193223d00f23a5a3318 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Sat, 17 Feb 2018 01:35:58 +0000 Subject: [PATCH 133/217] clean up --- .../v2/fluid/tests/book/test_recognize_digits.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py index 442cf9604a..a0b4774da5 100644 --- a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py +++ b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py @@ -155,7 +155,6 @@ def train(nn_type, use_cuda, parallel, save_dirname, save_param_filename): float(avg_loss_val), float(acc_val))) if math.isnan(float(avg_loss_val)): sys.exit("got NaN loss, training failed.") - exit(0) raise AssertionError("Loss of recognize digits is too large") @@ -231,14 +230,10 @@ def inject_test_method(use_cuda, parallel, nn_type, combine): def inject_all_tests(): - for use_cuda in [True]: - for parallel in [True]: - for nn_type in ['mlp']: + for use_cuda in (False, True): + for parallel in (False, True): + for nn_type in ('mlp', 'conv'): inject_test_method(use_cuda, parallel, nn_type, True) - # for use_cuda in (False, True): - # for parallel in (False, True): - # for nn_type in ('mlp', 'conv'): - # inject_test_method(use_cuda, parallel, nn_type, True) # One unit-test for saving parameters as separate files inject_test_method(False, False, 'mlp', False) -- GitLab From a040239d3a438f48ee05491e9f7a56fa499a2fdb Mon Sep 17 00:00:00 2001 From: Siddharth Goyal Date: Fri, 16 Feb 2018 18:04:16 -0800 Subject: [PATCH 134/217] Add conv test case for inference-recognize digits (#8466) --- paddle/fluid/inference/tests/book/CMakeLists.txt | 2 +- .../inference/tests/book/test_inference_recognize_digits.cc | 2 +- python/paddle/v2/fluid/tests/book/test_recognize_digits.py | 5 +++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/inference/tests/book/CMakeLists.txt b/paddle/fluid/inference/tests/book/CMakeLists.txt index cddd5a786c..c0aba39b97 100644 --- a/paddle/fluid/inference/tests/book/CMakeLists.txt +++ b/paddle/fluid/inference/tests/book/CMakeLists.txt @@ -27,7 +27,7 @@ endfunction(inference_test) inference_test(fit_a_line) inference_test(image_classification ARGS vgg resnet) inference_test(label_semantic_roles) -inference_test(recognize_digits ARGS mlp) +inference_test(recognize_digits ARGS mlp conv) inference_test(recommender_system) #inference_test(rnn_encoder_decoder) inference_test(understand_sentiment) diff --git a/paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc b/paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc index bd71948916..99bee94cb8 100644 --- a/paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc +++ b/paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc @@ -77,7 +77,7 @@ TEST(inference, recognize_digits_combine) { // Use normilized image pixels as input data, // which should be in the range [-1.0, 1.0]. SetupTensor( - input, {1, 28, 28}, static_cast(-1), static_cast(1)); + input, {1, 1, 28, 28}, static_cast(-1), static_cast(1)); std::vector cpu_feeds; cpu_feeds.push_back(&input); diff --git a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py index a0b4774da5..2462d425e1 100644 --- a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py +++ b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py @@ -141,7 +141,7 @@ def train(nn_type, use_cuda, parallel, save_dirname, save_param_filename): # get test acc and loss acc_val = numpy.array(acc_set).mean() avg_loss_val = numpy.array(avg_loss_set).mean() - if float(acc_val) > 0.85: # test acc > 85% + if float(acc_val) > 0.2: # Smaller value to increase CI speed if save_dirname is not None: fluid.io.save_inference_model( save_dirname, ["img"], [prediction], @@ -235,8 +235,9 @@ def inject_all_tests(): for nn_type in ('mlp', 'conv'): inject_test_method(use_cuda, parallel, nn_type, True) - # One unit-test for saving parameters as separate files + # Two unit-test for saving parameters as separate files inject_test_method(False, False, 'mlp', False) + inject_test_method(False, False, 'conv', False) inject_all_tests() -- GitLab From 3c2cafbe1d04843803a6f449389d123ae216758d Mon Sep 17 00:00:00 2001 From: "Yang Yang(Tony)" Date: Tue, 20 Feb 2018 15:31:31 -0800 Subject: [PATCH 135/217] fix parallel do hard coded empty var name (#8469) --- paddle/fluid/operators/parallel_do_op.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/operators/parallel_do_op.cc b/paddle/fluid/operators/parallel_do_op.cc index 6436efe42f..bf4d0476df 100644 --- a/paddle/fluid/operators/parallel_do_op.cc +++ b/paddle/fluid/operators/parallel_do_op.cc @@ -256,7 +256,7 @@ class ParallelDoGradOp : public framework::OperatorBase { } } for (auto &s : Outputs(framework::GradVarName(kParameters))) { - if (s == "@EMPTY@") { + if (s == framework::kEmptyVarName) { continue; } VLOG(3) << "Moving " << s; @@ -270,7 +270,7 @@ class ParallelDoGradOp : public framework::OperatorBase { const std::vector &sub_scopes, const platform::PlaceList &places) const { for (auto &s : Outputs(framework::GradVarName(kParameters))) { - if (s == "@EMPTY@") { + if (s == framework::kEmptyVarName) { continue; } VLOG(3) << "Accumulating " << s; -- GitLab From 46e4f6ffab80d25c549e8f730f8b2594e85ff0d6 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Wed, 21 Feb 2018 06:16:16 -0800 Subject: [PATCH 136/217] small fix --- paddle/fluid/platform/macros.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/platform/macros.h b/paddle/fluid/platform/macros.h index 02a2f53b49..4cc04b0905 100644 --- a/paddle/fluid/platform/macros.h +++ b/paddle/fluid/platform/macros.h @@ -19,7 +19,7 @@ limitations under the License. */ #define DISABLE_COPY_AND_ASSIGN(classname) \ private: \ classname(const classname&) = delete; \ - classname(const classname&&) = delete; \ + classname(classname&&) = delete; \ classname& operator=(const classname&) = delete; \ - classname& operator=(const classname&&) = delete + classname& operator=(classname&&) = delete #endif -- GitLab From 14f8370738236fdd0de2e5f6c6bbf9c6d2d23e6a Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 22 Feb 2018 11:13:23 +0800 Subject: [PATCH 137/217] Add block.fwd_block_id --- paddle/fluid/framework/block_desc.cc | 38 ++++++++++++++++++++------ paddle/fluid/framework/block_desc.h | 8 +++++- paddle/fluid/framework/framework.proto | 1 + paddle/fluid/framework/program_desc.h | 8 +++++- paddle/fluid/operators/while_op.cc | 7 +++-- paddle/fluid/pybind/protobuf.cc | 2 ++ python/paddle/v2/fluid/backward.py | 5 +++- python/paddle/v2/fluid/framework.py | 26 +++++++++++++++--- 8 files changed, 78 insertions(+), 17 deletions(-) diff --git a/paddle/fluid/framework/block_desc.cc b/paddle/fluid/framework/block_desc.cc index 0dd37e7df0..996aefd047 100644 --- a/paddle/fluid/framework/block_desc.cc +++ b/paddle/fluid/framework/block_desc.cc @@ -46,11 +46,25 @@ VarDesc *BlockDesc::FindVarRecursive(const std::string &name) const { if (name == kEmptyVarName) return nullptr; auto it = vars_.find(name); - if (it == vars_.end()) { - return Parent() == kNoneBlockIndex ? nullptr - : ParentBlock()->FindVarRecursive(name); + if (it != vars_.end()) { + return it->second.get(); } - return it->second.get(); + + BlockDesc *tmp = ParentBlock(); + + if (tmp != nullptr) { + auto ptr = tmp->FindVarRecursive(name); + if (ptr != nullptr) { + return ptr; + } + } + + tmp = ForwardBlock(); + if (tmp != nullptr) { + return tmp->FindVarRecursive(name); + } + + return nullptr; } VarDesc &BlockDesc::FindRecursiveOrCreateVar(const std::string &name_bytes) { @@ -136,10 +150,7 @@ void BlockDesc::Flush() { } BlockDesc *BlockDesc::ParentBlock() const { - if (this->desc_->parent_idx() == kNoneBlockIndex) { - return nullptr; - } - return prog_->MutableBlock(static_cast(this->desc_->parent_idx())); + return prog_->MutableBlock(static_cast(desc_->parent_idx())); } proto::BlockDesc *BlockDesc::Proto() { @@ -186,5 +197,16 @@ void BlockDesc::ClearPBVars() { } } +void BlockDesc::SetForwardBlockID(int32_t forward_block_id) { + PADDLE_ENFORCE(!desc_->has_forward_block_idx(), + "Parent block ID has been set to %d. Cannot set to %d", + desc_->forward_block_idx(), forward_block_id); + desc_->set_forward_block_idx(forward_block_id); +} + +BlockDesc *BlockDesc::ForwardBlock() const { + return prog_->MutableBlock(static_cast(desc_->forward_block_idx())); +} + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/block_desc.h b/paddle/fluid/framework/block_desc.h index 4e2b03e245..8345934a71 100644 --- a/paddle/fluid/framework/block_desc.h +++ b/paddle/fluid/framework/block_desc.h @@ -49,6 +49,8 @@ class BlockDesc { int32_t Parent() const { return desc_->parent_idx(); } + int32_t ForwardBlockID() const { return desc_->forward_block_idx(); } + VarDesc *Var(const std::string &name_bytes); VarDesc *FindVar(const std::string &name_bytes) const; @@ -73,6 +75,10 @@ class BlockDesc { BlockDesc *ParentBlock() const; + BlockDesc *ForwardBlock() const; + + void SetForwardBlockID(int32_t forward_block_id); + OpDesc *AppendOp(); void AppendAllocatedOp(std::unique_ptr &&op_desc); @@ -91,7 +97,7 @@ class BlockDesc { proto::BlockDesc *Proto(); - ProgramDesc *Program() { return this->prog_; } + ProgramDesc *Program() const { return this->prog_; } private: void ClearPBOps(); diff --git a/paddle/fluid/framework/framework.proto b/paddle/fluid/framework/framework.proto index 4eb18b4e4d..5b43f5a8a4 100644 --- a/paddle/fluid/framework/framework.proto +++ b/paddle/fluid/framework/framework.proto @@ -158,6 +158,7 @@ message BlockDesc { required int32 parent_idx = 2; repeated VarDesc vars = 3; repeated OpDesc ops = 4; + optional int32 forward_block_idx = 5 [ default = -1 ]; } // Please refer to diff --git a/paddle/fluid/framework/program_desc.h b/paddle/fluid/framework/program_desc.h index 8d4b999ad2..538a037211 100644 --- a/paddle/fluid/framework/program_desc.h +++ b/paddle/fluid/framework/program_desc.h @@ -38,7 +38,13 @@ class ProgramDesc { BlockDesc *AppendBlock(const BlockDesc &parent); - BlockDesc *MutableBlock(size_t idx) { return blocks_[idx].get(); } + BlockDesc *MutableBlock(size_t idx) { + if (idx == static_cast(kNoneBlockIndex)) { + return nullptr; + } else { + return blocks_[idx].get(); + } + } const BlockDesc &Block(size_t idx) const { return *blocks_[idx]; } diff --git a/paddle/fluid/operators/while_op.cc b/paddle/fluid/operators/while_op.cc index 3d5cdeda26..5f51a273dd 100644 --- a/paddle/fluid/operators/while_op.cc +++ b/paddle/fluid/operators/while_op.cc @@ -231,7 +231,8 @@ class WhileGradOpDescMaker : public framework::SingleGradOpDescMaker { while_grad->SetInput(kStepScopes, Output(kStepScopes)); auto *grad_block = this->grad_block_[0]; - auto *fwd_block = grad_block->ParentBlock(); + auto *fwd_block = grad_block->ForwardBlock(); + auto *parent_block = grad_block->ParentBlock(); // Not all of IGs will be generated by inner gradient operators of while op. // Ignore IGs that is not generated by the inside block. @@ -265,8 +266,10 @@ class WhileGradOpDescMaker : public framework::SingleGradOpDescMaker { for (auto &input_name : op->InputArgumentNames()) { // If the input of Op has been recorded or is generated by the forward // block, do not make it as input again. + if (block_ins.find(input_name) != block_ins.end() || - fwd_block->FindVar(input_name) != nullptr) { + fwd_block->FindVar(input_name) != nullptr || + parent_block->FindVar(input_name) != nullptr) { continue; } extra_inputs.insert(input_name); diff --git a/paddle/fluid/pybind/protobuf.cc b/paddle/fluid/pybind/protobuf.cc index 131971099e..01dc53de78 100644 --- a/paddle/fluid/pybind/protobuf.cc +++ b/paddle/fluid/pybind/protobuf.cc @@ -155,6 +155,8 @@ void BindBlockDesc(py::module &m) { py::class_(m, "BlockDesc", "") .def_property_readonly("id", &BlockDesc::ID) .def_property_readonly("parent", &BlockDesc::Parent) + .def("get_forward_block_idx", &BlockDesc::ForwardBlockID) + .def("set_forward_block_idx", &BlockDesc::SetForwardBlockID) .def("append_op", &BlockDesc::AppendOp, py::return_value_policy::reference) .def("prepend_op", &BlockDesc::PrependOp, diff --git a/python/paddle/v2/fluid/backward.py b/python/paddle/v2/fluid/backward.py index 33ff43f693..ba27aaa246 100644 --- a/python/paddle/v2/fluid/backward.py +++ b/python/paddle/v2/fluid/backward.py @@ -298,7 +298,8 @@ def _append_backward_ops_(block, # If the op has its own sub-block, deal with the sub-block first if op.has_attr("sub_block"): sub_block = program.block(op.block_attr("sub_block")) - grad_sub_block = program.create_block(parent_idx=sub_block.idx) + grad_sub_block = program.create_block() + grad_sub_block.set_forward_block_idx(sub_block.idx) cb = _callback_lookup_(op) if cb is not None: if callbacks is None: @@ -310,6 +311,8 @@ def _append_backward_ops_(block, else: _append_backward_ops_(sub_block, sub_block.ops, grad_sub_block, no_grad_dict, grad_to_var, callbacks) + + program.rollback() grad_sub_block_list.append(grad_sub_block.desc) # Getting op's corresponding grad_op diff --git a/python/paddle/v2/fluid/framework.py b/python/paddle/v2/fluid/framework.py index 0e11709296..7ec04013c9 100644 --- a/python/paddle/v2/fluid/framework.py +++ b/python/paddle/v2/fluid/framework.py @@ -678,6 +678,13 @@ class Block(object): def parent_idx(self): return self.desc.parent + @property + def forward_block_idx(self): + return self.desc.get_forward_block_idx() + + def set_forward_block_idx(self, idx): + self.desc.set_forward_block_idx(idx) + @property def idx(self): return self.desc.id @@ -695,11 +702,22 @@ class Block(object): return self.var(name) else: if self.idx == 0: - raise ValueError("var %s is not in block(%d) nor its parents." % - name, self.idx) + raise ValueError( + "var {0} is not in block({1}) nor its parents.".format( + name, self.idx)) else: - parent_block = self.program.block(self.parent_idx) - return parent_block.var_recursive(name) + # DFS + try: + parent_block = self.program.block(self.parent_idx) + return parent_block.var_recursive(name) + except ValueError: + fwd_block = self.program.block( + self.forward_block_idx + ) if self.forward_block_idx != -1 else None + if fwd_block is not None: + return fwd_block.var_recursive(name) + else: + raise def all_parameters(self): return list(self.iter_parameters()) -- GitLab From 0d8192fbec7598bcef46738f9516b6cf39ea51d8 Mon Sep 17 00:00:00 2001 From: jiaozhenyu <35716744+jshower@users.noreply.github.com> Date: Thu, 22 Feb 2018 22:57:17 +0800 Subject: [PATCH 138/217] Update conll05.py the label file in the conll05 dataset has a wrong order --- python/paddle/v2/dataset/conll05.py | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/python/paddle/v2/dataset/conll05.py b/python/paddle/v2/dataset/conll05.py index 23f5a24a1c..7113202a12 100644 --- a/python/paddle/v2/dataset/conll05.py +++ b/python/paddle/v2/dataset/conll05.py @@ -41,6 +41,28 @@ EMB_MD5 = 'bf436eb0faa1f6f9103017f8be57cdb7' UNK_IDX = 0 +def load_label_dict(filename): + d = dict() + tag_dict = set() + with open(filename, 'r') as f: + for i, line in enumerate(f): + line = line.strip() + if line.startswith("B-"): + tag_dict.add(line[2:]) + elif line.startswith("I-"): + tag_dict.add(line[2:]) + else: + continue + index = 0 + for tag in tag_dict: + d["B-" + tag] = index + index += 1 + d["I-" + tag] = index + index += 1 + d["O"] = index + return d + + def load_dict(filename): d = dict() with open(filename, 'r') as f: @@ -188,7 +210,7 @@ def get_dict(): verb_dict = load_dict( paddle.v2.dataset.common.download(VERBDICT_URL, 'conll05st', VERBDICT_MD5)) - label_dict = load_dict( + label_dict = load_label_dict( paddle.v2.dataset.common.download(TRGDICT_URL, 'conll05st', TRGDICT_MD5)) return word_dict, verb_dict, label_dict -- GitLab From d50016b2a74dc9b5ae42012232a3f1cef29f2f8f Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 23 Feb 2018 01:51:44 +0800 Subject: [PATCH 139/217] Remove build warnings in float16.h (#8481) --- paddle/fluid/platform/float16.h | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/paddle/fluid/platform/float16.h b/paddle/fluid/platform/float16.h index cf6a4b09db..5832bd9ce3 100644 --- a/paddle/fluid/platform/float16.h +++ b/paddle/fluid/platform/float16.h @@ -74,17 +74,12 @@ struct PADDLE_ALIGN(2) float16 { // The following defaulted special class member functions // are added to make float16 pass the std::is_trivial test - HOSTDEVICE inline float16() = default; - - HOSTDEVICE inline float16(const float16&) = default; - - HOSTDEVICE inline float16& operator=(const float16&) = default; - - HOSTDEVICE inline float16(float16&&) = default; - - HOSTDEVICE inline float16& operator=(float16&&) = default; - - HOSTDEVICE inline ~float16() = default; + float16() = default; + float16(const float16& o) = default; + float16& operator=(const float16& o) = default; + float16(float16&& o) = default; + float16& operator=(float16&& o) = default; + ~float16() = default; // Constructors #ifdef PADDLE_CUDA_FP16 -- GitLab From 59bfa3e85063ea832b1f48e1bb549a6bc0fbdf5a Mon Sep 17 00:00:00 2001 From: haonanyu Date: Thu, 22 Feb 2018 11:58:26 -0800 Subject: [PATCH 140/217] fix a bug of sub() in layer_math.py --- python/paddle/trainer_config_helpers/layer_math.py | 2 +- .../tests/configs/protostr/math_ops.protostr | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/python/paddle/trainer_config_helpers/layer_math.py b/python/paddle/trainer_config_helpers/layer_math.py index e1c8f0c350..ee84188bac 100644 --- a/python/paddle/trainer_config_helpers/layer_math.py +++ b/python/paddle/trainer_config_helpers/layer_math.py @@ -75,7 +75,7 @@ LayerOutput.__add__ = add def sub(layeroutput, other): if is_compatible_with(other, float): - return slope_intercept_layer(input=layeroutput, intercept=other) + return slope_intercept_layer(input=layeroutput, intercept=-other) if not isinstance(other, LayerOutput): logger.fatal("LayerOutput can only be subtracted with" " another Layeroutput or a number") diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/math_ops.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/math_ops.protostr index eaaf7fd6f5..582207741a 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/protostr/math_ops.protostr +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/math_ops.protostr @@ -230,7 +230,7 @@ layers { input_layer_name: "__mixed_1__" } slope: 1.0 - intercept: 2 + intercept: -2 } layers { name: "__slope_intercept_layer_4__" @@ -411,4 +411,3 @@ sub_models { output_layer_names: "__mixed_3__" is_recurrent_layer_group: false } - -- GitLab From d3162339f6637114d515b4bc448fe4ae8cc81125 Mon Sep 17 00:00:00 2001 From: "Yang Yang(Tony)" Date: Thu, 22 Feb 2018 13:13:49 -0800 Subject: [PATCH 141/217] Update parallel_do.md (#8498) --- doc/design/parallel_do.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/design/parallel_do.md b/doc/design/parallel_do.md index 221af6b6a4..45f7731996 100644 --- a/doc/design/parallel_do.md +++ b/doc/design/parallel_do.md @@ -24,7 +24,7 @@ A vanilla implementation of parallel_do can be shown as the following (`|` means ``` In the forward pass | Split input onto different devices - | Copy parameter to onto different devices + | Copy parameter onto different devices |||| Compute forward pass in parallel | Merge output from different devices @@ -87,7 +87,7 @@ block2 { } ``` -## Proformance Imporvement +## Performance Imporvement There are serial places we can make this parallel_do faster. -- GitLab From 88c22e9d1a809778a7bd83de71c370688cece0b2 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 23 Feb 2018 07:22:07 +0800 Subject: [PATCH 142/217] Speed up elemwise grad (#8402) * Speed up elemwise grad * Fix bug * Add macro for MAX_BLOCK_DIM --- paddle/fluid/operators/elementwise_add_op.h | 62 +---- .../fluid/operators/elementwise_op_function.h | 254 ++++++++++++++++++ 2 files changed, 259 insertions(+), 57 deletions(-) diff --git a/paddle/fluid/operators/elementwise_add_op.h b/paddle/fluid/operators/elementwise_add_op.h index 3c546bf3e4..253964562c 100644 --- a/paddle/fluid/operators/elementwise_add_op.h +++ b/paddle/fluid/operators/elementwise_add_op.h @@ -41,59 +41,8 @@ class ElementwiseAddKernel : public framework::OpKernel { }; template -struct ElementwiseAddGradFunctor { - template - void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz) { - auto dz_e = framework::EigenVector::Flatten(*dz); - if (dx) { - auto dx_e = framework::EigenVector::Flatten(*dx); - dx_e.device(d) = dz_e; - } - if (dy) { - auto dy_e = framework::EigenVector::Flatten(*dy); - dy_e.device(d) = dz_e; - } - } -}; - -template -struct ElementwiseAddBroadCastGradFunctor { - template - void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz, Pre pre, N n) { - auto dz_e = framework::EigenVector::Flatten(*dz); - if (dx) { - auto dx_e = framework::EigenVector::Flatten(*dx); - dx_e.device(d) = dz_e; - } - - if (dy) { - auto dy_e = framework::EigenVector::Flatten(*dy); - dy_e.device(d) = dz_e.reshape(Eigen::DSizes(pre, n)) - .sum(Eigen::array{{0}}); - } - } -}; - -template -struct ElementwiseAddBroadCast2GradFunctor { - template - void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz, Pre pre, N n, - Post post) { - auto dz_e = framework::EigenVector::Flatten(*dz); - if (dx) { - auto dx_e = framework::EigenVector::Flatten(*dx); - dx_e.device(d) = dz_e; - } - - if (dy) { - auto dy_e = framework::EigenVector::Flatten(*dy); - dy_e.device(d) = dz_e.reshape(Eigen::DSizes(pre, n, post)) - .sum(Eigen::array{{0, 2}}); - } - } +struct IdentityGrad { + HOSTDEVICE T operator()(T x, T y, T out, T dout) const { return dout; } }; template @@ -109,10 +58,9 @@ class ElementwiseAddGradKernel : public framework::OpKernel { auto* dx = ctx.Output(framework::GradVarName("X")); auto* dy = ctx.Output(framework::GradVarName("Y")); int axis = ctx.Attr("axis"); - ElementwiseGradCompute, - ElementwiseAddBroadCastGradFunctor, - ElementwiseAddBroadCast2GradFunctor>( - ctx, x, y, out, dout, axis, dx, dy); + ElemwiseGradCompute, IdentityGrad>( + ctx, *x, *y, *out, *dout, axis, dx, dy, IdentityGrad(), + IdentityGrad()); } }; diff --git a/paddle/fluid/operators/elementwise_op_function.h b/paddle/fluid/operators/elementwise_op_function.h index 2a4a611511..2da8c10322 100644 --- a/paddle/fluid/operators/elementwise_op_function.h +++ b/paddle/fluid/operators/elementwise_op_function.h @@ -20,9 +20,11 @@ limitations under the License. */ #ifdef __NVCC__ #include +constexpr int ELEMWISE_MAX_BLOCK_DIM = 1024; #endif #include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/platform/for_range.h" namespace paddle { namespace operators { @@ -311,6 +313,258 @@ EIGEN_FUNCTOR(Mul, EIGEN_MUL); #define EIGEN_DIV(x, y) ((x) / (y)) EIGEN_FUNCTOR(Div, EIGEN_DIV); +template +struct ElemwiseGradNoBroadcast { + const T* x_; + const T* y_; + const T* out_; + const T* dout_; + + HOSTDEVICE void operator()(size_t i) { + if (dx_ != nullptr) { + dx_[i] = dx_op_(x_[i], y_[i], out_[i], dout_[i]); + } + if (dy_ != nullptr) { + dy_[i] = dx_op_(x_[i], y_[i], out_[i], dout_[i]); + } + } + + DX_OP dx_op_; + DY_OP dy_op_; + T* dx_; + T* dy_; +}; + +template +static void ElemwiseGradBroadcast1CPU(const T* x, const T* y, const T* out, + const T* dout, int h, int w, DX_OP dx_op, + DY_OP dy_op, T* dx, T* dy) { + for (int i = 0; i < h; ++i) { + for (int j = 0; j < w; ++j) { + int x_offset = i * w + j; + if (dx != nullptr) { + dx[x_offset] = dx_op(x[x_offset], y[j], out[x_offset], dout[x_offset]); + } + if (dy != nullptr) { + T tmp = dy_op(x[x_offset], y[j], out[x_offset], dout[x_offset]); + if (i == 0) { + dy[j] = tmp; + } else { + dy[j] += tmp; + } + } + } + } +} +#ifdef __NVCC__ +template +static __global__ void ElemwiseGradBroadcast1CUDAKernel( + const T* x, const T* y, const T* out, const T* dout, int h, int w, + DX_OP dx_op, DY_OP dy_op, T* dx, T* dy) { + extern __shared__ char shm_buffer[]; + T* shm = reinterpret_cast(shm_buffer); + + int j = blockIdx.x; + int i = threadIdx.x; + int tid = threadIdx.x; + shm[tid] = 0; + + do { + int x_offset = i * w + j; + if (dx) { + dx[x_offset] = dx_op(x[x_offset], y[j], out[x_offset], dout[x_offset]); + } + if (dy) { + shm[tid] += dy_op(x[x_offset], y[j], out[x_offset], dout[x_offset]); + } + i += ELEMWISE_MAX_BLOCK_DIM; + } while (i < h); + + if (dy) { + __syncthreads(); + + h = h > ELEMWISE_MAX_BLOCK_DIM ? ELEMWISE_MAX_BLOCK_DIM : h; + + // Sum, could be optimized + if (threadIdx.x == 0) { + for (int k = 1; k < h; ++k) { + shm[0] += shm[k]; + } + dy[j] = shm[0]; + } + } +} + +template +static void ElemwiseGradBroadcast1CUDA(cudaStream_t stream, const T* x, + const T* y, const T* out, const T* dout, + int h, int w, DX_OP dx_op, DY_OP dy_op, + T* dx, T* dy) { + int block_size = std::min(ELEMWISE_MAX_BLOCK_DIM, h); + int gird_size = w; + int shared_mem_size = block_size * sizeof(T); + ElemwiseGradBroadcast1CUDAKernel<<>>(x, y, out, dout, h, w, dx_op, + dy_op, dx, dy); +} + +#endif + +template +static void ElemwiseGradBroadcast2CPU(const T* x, const T* y, const T* out, + const T* dout, int pre, int n, int post, + DX_OP dx_op, DY_OP dy_op, T* dx, T* dy) { + for (int i = 0; i < pre; ++i) { + for (int j = 0; j < n; ++j) { + for (int k = 0; k < post; ++k) { + int x_offset = i * n * post + j * post + k; + if (dx != nullptr) { + dx[x_offset] = + dx_op(x[x_offset], y[j], out[x_offset], dout[x_offset]); + } + if (dy != nullptr) { + T tmp = dy_op(x[x_offset], y[j], out[x_offset], dout[x_offset]); + if (i == 0 && k == 0) { + dy[j] = tmp; + } else { + dy[j] += tmp; + } + } + } + } + } +} + +#ifdef __NVCC__ + +template +static __global__ void ElemwiseGradBroadcast2CUDAKernel( + const T* x, const T* y, const T* out, const T* dout, int pre, int n, + int post, DX_OP dx_op, DY_OP dy_op, T* dx, T* dy) { + int tid = threadIdx.x; + int j = blockIdx.x; + + extern __shared__ char shm_buffer[]; + T* shm = reinterpret_cast(shm_buffer); + shm[tid] = 0; + int ttid = tid; + + while (true) { + int i = ttid / post; + int k = ttid % post; + if (i >= pre) break; + + int x_offset = i * n * post + j * post + k; + + if (dx != nullptr) { + dx[x_offset] = dx_op(x[x_offset], y[j], out[x_offset], dout[x_offset]); + } + + if (dy != nullptr) { + shm[tid] += dy_op(x[x_offset], y[j], out[x_offset], dout[x_offset]); + } + + ttid += ELEMWISE_MAX_BLOCK_DIM; + } + + if (dy) { + __syncthreads(); + int h = pre * post; + h = h > ELEMWISE_MAX_BLOCK_DIM ? ELEMWISE_MAX_BLOCK_DIM : h; + + // Sum, could be optimized + if (tid == 0) { + for (int i = 1; i < h; ++i) { + shm[0] += shm[i]; + } + dy[j] = shm[0]; + } + } +} + +template +static void ElemwiseGradBroadcast2CUDA(cudaStream_t stream, const T* x, + const T* y, const T* out, const T* dout, + int pre, int n, int post, DX_OP dx_op, + DY_OP dy_op, T* dx, T* dy) { + int block_size = std::min(ELEMWISE_MAX_BLOCK_DIM, pre * post); + int gird_size = n; + int shared_mem_size = block_size * sizeof(T); + ElemwiseGradBroadcast2CUDAKernel<<>>(x, y, out, dout, pre, n, post, + dx_op, dy_op, dx, dy); +} + +#endif + +template +void ElemwiseGradCompute(const framework::ExecutionContext& ctx, + const framework::Tensor& x, const framework::Tensor& y, + const framework::Tensor& out, + const framework::Tensor& dout, int axis, + framework::Tensor* dx, framework::Tensor* dy, + DX_OP dx_op, DY_OP dy_op) { + if (x.dims() == y.dims()) { + size_t N = static_cast(framework::product(x.dims())); + platform::ForRange for_range( + ctx.template device_context(), N); + for_range(ElemwiseGradNoBroadcast{ + x.data(), y.data(), out.data(), dout.data(), dx_op, dy_op, + dx == nullptr ? nullptr : dx->mutable_data(ctx.GetPlace()), + dy == nullptr ? nullptr : dy->mutable_data(ctx.GetPlace())}); + } else { // Y is a scalar + auto x_dim = x.dims(); + auto y_dim = y.dims(); + + if (y_dim.size() == 1 && y_dim[0] == 1) { + // y is a scalar + auto extended_dims = framework::vectorize(x_dim); + extended_dims.push_back(1); + x_dim = framework::make_ddim(extended_dims); + } + + axis = (axis == -1 ? x_dim.size() - y_dim.size() : axis); + int pre, n, post; + get_mid_dims(x_dim, y_dim, axis, pre, n, post); + if (post == 1) { + int h = pre; + int w = n; + if (platform::is_gpu_place(ctx.GetPlace())) { +#ifdef __NVCC__ + ElemwiseGradBroadcast1CUDA( + ctx.template device_context().stream(), x.data(), + y.data(), out.data(), dout.data(), h, w, dx_op, dy_op, + dx == nullptr ? nullptr : dx->mutable_data(ctx.GetPlace()), + dy == nullptr ? nullptr : dy->mutable_data(ctx.GetPlace())); +#endif + } else { + ElemwiseGradBroadcast1CPU( + x.data(), y.data(), out.data(), dout.data(), h, w, + dx_op, dy_op, + dx == nullptr ? nullptr : dx->mutable_data(ctx.GetPlace()), + dy == nullptr ? nullptr : dy->mutable_data(ctx.GetPlace())); + } + } else { + if (platform::is_gpu_place(ctx.GetPlace())) { +#ifdef __NVCC__ + ElemwiseGradBroadcast2CUDA( + ctx.template device_context().stream(), x.data(), + y.data(), out.data(), dout.data(), pre, n, post, dx_op, + dy_op, + dx == nullptr ? nullptr : dx->mutable_data(ctx.GetPlace()), + dy == nullptr ? nullptr : dy->mutable_data(ctx.GetPlace())); +#endif + } else { + ElemwiseGradBroadcast2CPU( + x.data(), y.data(), out.data(), dout.data(), pre, n, + post, dx_op, dy_op, + dx == nullptr ? nullptr : dx->mutable_data(ctx.GetPlace()), + dy == nullptr ? nullptr : dy->mutable_data(ctx.GetPlace())); + } + } + } +}; + template void ElementwiseGradCompute(const framework::ExecutionContext& ctx, -- GitLab From 77ee8fb2409ab8ff626c2a9456417e47a90c9fe2 Mon Sep 17 00:00:00 2001 From: kavyasrinet Date: Thu, 22 Feb 2018 16:05:41 -0800 Subject: [PATCH 143/217] Exposing Channel to be used as a Variable and integrating with Fluid (#8486) * Adding set_capacity method support * Adding Python for make_channel * Updating notest_concurrency * Write python for make_channel method * Write python for make_channel method * Fix make_channel and test * Placeholder ops for channel send, recv and close * Adding ToTypeIndex method to var_type.h * Add var_type.h to channel: * Added POD_Type to the method * Add CHANNEL to executor * Updated get and set DataType to accomodate Channels * Updating get and set to incorporate channels * Adding CHANNEL as supported VarType in protobuf * Removing unecessary import * Fixing VarDesc to adapt to Channel as VarType * Add channel.h to executor * Remove innclude from channel * Updated var_type to support Channel as var type * Adding get_channel to pybind * Added ChannelHolder * Adding make_channel as an op * Adding ChannelHolder in channel * Fixing typo * Commenting out operators in concurrency * Removing totypeid right now since we don't need it. * Reverting python changes * Fixing typo in framework.py * Modify comments for ReaderHolder --- paddle/fluid/framework/channel.h | 73 ++++++++++++++++++++++++++++++ paddle/fluid/framework/executor.cc | 5 +- paddle/fluid/framework/var_desc.cc | 54 +++++++++++++++++++++- paddle/fluid/framework/var_desc.h | 4 ++ paddle/fluid/framework/var_type.h | 6 +++ paddle/fluid/pybind/protobuf.cc | 2 + paddle/fluid/pybind/pybind.cc | 1 + 7 files changed, 142 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/framework/channel.h b/paddle/fluid/framework/channel.h index 5acf4fb39b..8ca1f2aa47 100644 --- a/paddle/fluid/framework/channel.h +++ b/paddle/fluid/framework/channel.h @@ -15,6 +15,8 @@ limitations under the License. */ #pragma once #include // for size_t +#include +#include "paddle/fluid/platform/enforce.h" namespace paddle { namespace framework { @@ -51,6 +53,77 @@ void CloseChannel(Channel* ch) { ch->Close(); } +/* + * The ChannelHolder class serves two main purposes: + * 1. It acts as a unified wrapper for the different kinds of + * channels, i.e. Buffered and Unbuffered channels. This is + * similar to the ReaderHolder class. + * 2. It also helps us in TypeHiding. This is similar to the + * PlaceHolder implementations in variable.h and tensor.h. + */ +class ChannelHolder { + public: + template + void Reset(size_t buffer_size) { + holder_.reset(new PlaceholderImpl(buffer_size)); + } + + template + bool Send(T* data) { + if (!IsInitialized()) return false; + PADDLE_ENFORCE_EQ(holder_->Type(), std::type_index(typeid(T))); + // Static cast should be safe because we have ensured that types are same + Channel* channel = static_cast*>(holder_->Ptr()); + return channel != nullptr ? channel->Send(data) : false; + } + + template + bool Receive(T* data) { + if (!IsInitialized()) return false; + PADDLE_ENFORCE_EQ(holder_->Type(), std::type_index(typeid(T))); + Channel* channel = static_cast*>(holder_->Ptr()); + return channel != nullptr ? channel->Receive(data) : false; + } + + void close() { + if (IsInitialized()) holder_->Close(); + } + + inline bool IsInitialized() const { return holder_ != nullptr; } + + private: + /** + * @note Placeholder hides type T, so it doesn't appear as a template + * parameter of ChannelHolder. + */ + struct Placeholder { + virtual ~Placeholder() {} + virtual const std::type_index Type() const = 0; + virtual void* Ptr() const = 0; + virtual void Close() const = 0; + std::type_info type_; + }; + + template + struct PlaceholderImpl : public Placeholder { + PlaceholderImpl(size_t buffer_size) : type_(std::type_index(typeid(T))) { + channel_.reset(MakeChannel(buffer_size)); + } + + virtual const std::type_index Type() const { return type_; } + virtual void* Ptr() const { return static_cast(channel_.get()); } + virtual void Close() { + if (channel_) channel_->Close(); + } + + std::unique_ptr*> channel_; + const std::type_index type_; + }; + + // Pointer to a PlaceholderImpl object + std::unique_ptr holder_; +}; + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index 6fd19e804a..0d2691e811 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -17,6 +17,7 @@ limitations under the License. */ #include #include "gflags/gflags.h" +#include "paddle/fluid/framework/channel.h" #include "paddle/fluid/framework/feed_fetch_method.h" #include "paddle/fluid/framework/feed_fetch_type.h" #include "paddle/fluid/framework/lod_rank_table.h" @@ -55,13 +56,15 @@ static void CreateTensor(Variable* var, proto::VarType::Type var_type) { var->GetMutable(); } else if (var_type == proto::VarType::READER) { var->GetMutable(); + } else if (var_type == proto::VarType::CHANNEL) { + var->GetMutable(); } else if (var_type == proto::VarType::NCCL_COM) { // GetMutable will be called in ncclInit } else { PADDLE_THROW( "Variable type %d is not in " "[LOD_TENSOR, SELECTED_ROWS, FEED_MINIBATCH, FETCH_LIST, " - "LOD_RANK_TABLE, PLACE_LIST, READER, NCCL_COM]", + "LOD_RANK_TABLE, PLACE_LIST, READER, CHANNEL, NCCL_COM]", var_type); } } diff --git a/paddle/fluid/framework/var_desc.cc b/paddle/fluid/framework/var_desc.cc index 7e3f002b53..1aa0ae0f7c 100644 --- a/paddle/fluid/framework/var_desc.cc +++ b/paddle/fluid/framework/var_desc.cc @@ -88,7 +88,13 @@ std::vector> VarDesc::GetShapes() const { } void VarDesc::SetDataType(proto::VarType::Type data_type) { - mutable_tensor_desc()->set_data_type(data_type); + switch (desc_.type().type()) { + case proto::VarType::CHANNEL: + mutable_channel_desc()->set_data_type(data_type); + break; + default: + mutable_tensor_desc()->set_data_type(data_type); + } } void VarDesc::SetDataTypes( @@ -109,7 +115,13 @@ void VarDesc::SetDataTypes( } proto::VarType::Type VarDesc::GetDataType() const { - return tensor_desc().data_type(); + switch (desc_.type().type()) { + case proto::VarType::CHANNEL: + return channel_desc().data_type(); + break; + default: + return tensor_desc().data_type(); + } } std::vector VarDesc::GetDataTypes() const { @@ -122,6 +134,17 @@ std::vector VarDesc::GetDataTypes() const { return res; } +void VarDesc::SetCapacity(int64_t capacity) { + switch (desc_.type().type()) { + case proto::VarType::CHANNEL: + desc_.mutable_type()->mutable_channel()->set_capacity(capacity); + break; + default: + PADDLE_THROW("Setting 'capacity' is not supported by the type of var %s.", + this->Name()); + } +} + void VarDesc::SetLoDLevel(int32_t lod_level) { switch (desc_.type().type()) { case proto::VarType::LOD_TENSOR: @@ -191,6 +214,19 @@ std::vector VarDesc::GetLoDLevels() const { } } +const proto::VarType::ChannelDesc &VarDesc::channel_desc() const { + PADDLE_ENFORCE(desc_.has_type(), "The var's type hasn't been set."); + PADDLE_ENFORCE(desc_.type().has_type(), "The var type hasn't been set."); + switch (desc_.type().type()) { + case proto::VarType::CHANNEL: + return desc_.type().channel(); + default: + PADDLE_THROW( + "Getting 'channel_desc' is not supported by the type of var %s.", + this->Name()); + } +} + const proto::VarType::TensorDesc &VarDesc::tensor_desc() const { PADDLE_ENFORCE(desc_.has_type(), "The var's type hasn't been set."); PADDLE_ENFORCE(desc_.type().has_type(), "The var type hasn't been set."); @@ -226,6 +262,20 @@ std::vector VarDesc::tensor_descs() const { } } +proto::VarType::ChannelDesc *VarDesc::mutable_channel_desc() { + PADDLE_ENFORCE(desc_.has_type(), "The var type hasn't been set."); + PADDLE_ENFORCE(desc_.type().has_type(), "The var type hasn't been set."); + switch (desc_.type().type()) { + case proto::VarType::CHANNEL: + return desc_.mutable_type()->mutable_channel(); + default: + PADDLE_THROW( + "Getting 'mutable_channel_desc' is not supported by the type of var " + "%s.", + this->Name()); + } +} + proto::VarType::TensorDesc *VarDesc::mutable_tensor_desc() { PADDLE_ENFORCE(desc_.has_type(), "The var type hasn't been set."); PADDLE_ENFORCE(desc_.type().has_type(), "The var type hasn't been set."); diff --git a/paddle/fluid/framework/var_desc.h b/paddle/fluid/framework/var_desc.h index 19b8d890c1..f62415fda6 100644 --- a/paddle/fluid/framework/var_desc.h +++ b/paddle/fluid/framework/var_desc.h @@ -85,6 +85,8 @@ class VarDesc { void SetDataTypes( const std::vector &multiple_data_type); + void SetCapacity(int64_t capacity); + proto::VarType::Type GetDataType() const; std::vector GetDataTypes() const; @@ -106,8 +108,10 @@ class VarDesc { void SetPersistable(bool persistable) { desc_.set_persistable(persistable); } private: + const proto::VarType::ChannelDesc &channel_desc() const; const proto::VarType::TensorDesc &tensor_desc() const; std::vector tensor_descs() const; + proto::VarType::ChannelDesc *mutable_channel_desc(); proto::VarType::TensorDesc *mutable_tensor_desc(); std::vector mutable_tensor_descs(); diff --git a/paddle/fluid/framework/var_type.h b/paddle/fluid/framework/var_type.h index 960ebff9d7..2b646d78f0 100644 --- a/paddle/fluid/framework/var_type.h +++ b/paddle/fluid/framework/var_type.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include "paddle/fluid/framework/channel.h" #include "paddle/fluid/framework/framework.pb.h" #include "paddle/fluid/framework/lod_rank_table.h" #include "paddle/fluid/framework/lod_tensor.h" @@ -34,6 +35,8 @@ inline proto::VarType::Type ToVarType(std::type_index type) { return proto::VarType_Type_SELECTED_ROWS; } else if (type.hash_code() == typeid(ReaderHolder).hash_code()) { return proto::VarType_Type_READER; + } else if (type.hash_code() == typeid(ChannelHolder).hash_code()) { + return proto::VarType_Type_CHANNEL; } else { PADDLE_THROW("ToVarType:Unsupported type %s", type.name()); } @@ -57,6 +60,9 @@ inline void VisitVarType(const framework::Variable& var, Visitor visitor) { case proto::VarType_Type_READER: visitor(var.Get()); return; + case proto::VarType_Type_CHANNEL: + visitor(var.Get()); + return; default: PADDLE_THROW("Not supported visit type, %d", ToVarType(var.Type())); } diff --git a/paddle/fluid/pybind/protobuf.cc b/paddle/fluid/pybind/protobuf.cc index 4e04151c6a..1a9d7c421b 100644 --- a/paddle/fluid/pybind/protobuf.cc +++ b/paddle/fluid/pybind/protobuf.cc @@ -216,6 +216,7 @@ void BindVarDsec(py::module &m) { .def("set_shapes", &VarDesc::SetShapes) .def("set_dtype", &VarDesc::SetDataType) .def("set_dtypes", &VarDesc::SetDataTypes) + .def("set_capacity", &VarDesc::SetCapacity) .def("shape", &VarDesc::GetShape, py::return_value_policy::reference) .def("shapes", &VarDesc::GetShapes, py::return_value_policy::reference) .def("dtype", &VarDesc::GetDataType, py::return_value_policy::reference) @@ -246,6 +247,7 @@ void BindVarDsec(py::module &m) { .value("STEP_SCOPES", proto::VarType::STEP_SCOPES) .value("LOD_RANK_TABLE", proto::VarType::LOD_RANK_TABLE) .value("LOD_TENSOR_ARRAY", proto::VarType::LOD_TENSOR_ARRAY) + .value("CHANNEL", proto::VarType::CHANNEL) .value("PLACE_LIST", proto::VarType::PLACE_LIST) .value("READER", proto::VarType::READER) .value("NCCL_COM", proto::VarType::NCCL_COM); diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 56c1a935d9..abe2b11449 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -17,6 +17,7 @@ limitations under the License. */ #include // for call_once #include #include "paddle/fluid/framework/backward.h" +#include "paddle/fluid/framework/channel.h" #include "paddle/fluid/framework/executor.h" #include "paddle/fluid/framework/feed_fetch_method.h" #include "paddle/fluid/framework/framework.pb.h" -- GitLab From 0e187bc93e76051e70f0c506e0df5f2359c3d0cc Mon Sep 17 00:00:00 2001 From: chengduo Date: Fri, 23 Feb 2018 08:58:01 +0800 Subject: [PATCH 144/217] fix get_mid_dims annotation (#8490) --- paddle/fluid/operators/elementwise_op_function.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/operators/elementwise_op_function.h b/paddle/fluid/operators/elementwise_op_function.h index 2da8c10322..5c78303530 100644 --- a/paddle/fluid/operators/elementwise_op_function.h +++ b/paddle/fluid/operators/elementwise_op_function.h @@ -35,10 +35,10 @@ namespace operators { * For example: * 1. shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 * pre=2, n=3*4, post=5 - * x.shape(2, 12, 5) * y.shape(1,12,1).broadcast(2,12,5) + * x.shape(2, 12, 5) * y.shape(1, 12, 1).broadcast(2, 12, 5) * 2. shape(X) = (2, 3, 4, 5), shape(Y) = (4,5) * pre=2*3, n=4*5, post=1 - * x.shape(2, 3, 20) * y.shape(1,1,20).broadcast(2,3,20) + * x.shape(6, 20, 1) * y.shape(1, 20, 1).broadcast(6, 20, 1) */ inline void get_mid_dims(const framework::DDim& x_dims, const framework::DDim& y_dims, const int axis, -- GitLab From bf92706c58f8c89db9b670523e8aa4fcd2c067a7 Mon Sep 17 00:00:00 2001 From: qijun Date: Fri, 23 Feb 2018 11:40:30 +0800 Subject: [PATCH 145/217] fix bug in memory optimization transpiler --- .../fluid/memory_optimization_transpiler.py | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/python/paddle/v2/fluid/memory_optimization_transpiler.py b/python/paddle/v2/fluid/memory_optimization_transpiler.py index ee56ccdcf1..6952ca7fe4 100644 --- a/python/paddle/v2/fluid/memory_optimization_transpiler.py +++ b/python/paddle/v2/fluid/memory_optimization_transpiler.py @@ -223,15 +223,15 @@ def get_cfgs(input_program): # Find while/while_grad block pair for grad_id in while_grad_sub_block_ids: - parent_id = pdesc.block(grad_id).parent - if parent_id in while_sub_block_ids: - while_block_id_pair.append((parent_id, grad_id)) - while_sub_block_ids.remove(parent_id) + forward_id = pdesc.block(grad_id).get_forward_block_idx() + if forward_id in while_sub_block_ids: + while_block_id_pair.append((forward_id, grad_id)) + while_sub_block_ids.remove(forward_id) # Get while/while_grad block ops - for parent_id, grad_id in while_block_id_pair: + for forward_id, grad_id in while_block_id_pair: while_block_ops = [] - while_block = pdesc.block(parent_id) + while_block = pdesc.block(forward_id) while_block_op_size = while_block.op_size() for i in range(while_block_op_size): while_block_ops.append(while_block.op(i)) @@ -242,21 +242,21 @@ def get_cfgs(input_program): while_block_ops.append(while_grad_block.op(i)) while_op_output = set() - while_op_output.update(while_op_dict[parent_id].output_arg_names()) + while_op_output.update(while_op_dict[forward_id].output_arg_names()) while_op_output.update(while_op_dict[grad_id].output_arg_names()) ops_list.append((while_block_ops, while_block_op_size, while_op_output)) # Process rest while block ops - for parent_id in while_sub_block_ids: + for forward_id in while_sub_block_ids: while_block_ops = [] - while_block = pdesc.block(parent_id) + while_block = pdesc.block(forward_id) while_block_op_size = while_block.op_size() for i in range(while_block_op_size): while_block_ops.append(while_block.op(i)) while_op_output = set() - while_op_output.update(while_op_dict[parent_id].output_arg_names()) + while_op_output.update(while_op_dict[forward_id].output_arg_names()) ops_list.append((while_block_ops, while_block_op_size, while_op_output)) -- GitLab From 65058cfb7ac07204cbd2dcdc05e845a447fc54f8 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 23 Feb 2018 12:58:32 +0800 Subject: [PATCH 146/217] Change DFS to BFS --- paddle/fluid/framework/block_desc.cc | 38 +++++++++++++++-------- python/paddle/v2/fluid/framework.py | 46 ++++++++++++++++------------ 2 files changed, 51 insertions(+), 33 deletions(-) diff --git a/paddle/fluid/framework/block_desc.cc b/paddle/fluid/framework/block_desc.cc index 996aefd047..1efb775cdc 100644 --- a/paddle/fluid/framework/block_desc.cc +++ b/paddle/fluid/framework/block_desc.cc @@ -16,6 +16,8 @@ limitations under the License. */ #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/program_desc.h" +#include + namespace paddle { namespace framework { @@ -45,23 +47,33 @@ bool BlockDesc::HasVar(const std::string &name) const { VarDesc *BlockDesc::FindVarRecursive(const std::string &name) const { if (name == kEmptyVarName) return nullptr; - auto it = vars_.find(name); - if (it != vars_.end()) { - return it->second.get(); - } + std::queue frontier; + std::unordered_set visited; - BlockDesc *tmp = ParentBlock(); + frontier.push(this); - if (tmp != nullptr) { - auto ptr = tmp->FindVarRecursive(name); - if (ptr != nullptr) { - return ptr; + while (!frontier.empty()) { // BFS + auto cur = frontier.front(); + frontier.pop(); + if (visited.count(cur) != 0) { + continue; + } + auto var = cur->FindVar(name); + if (var != nullptr) { + return var; + } + + auto fwd = cur->ForwardBlock(); + auto parent = cur->ParentBlock(); + + if (fwd != nullptr) { + frontier.push(fwd); + } + if (parent != nullptr) { + frontier.push(parent); } - } - tmp = ForwardBlock(); - if (tmp != nullptr) { - return tmp->FindVarRecursive(name); + visited.insert(cur); } return nullptr; diff --git a/python/paddle/v2/fluid/framework.py b/python/paddle/v2/fluid/framework.py index 7ec04013c9..3ec8d97814 100644 --- a/python/paddle/v2/fluid/framework.py +++ b/python/paddle/v2/fluid/framework.py @@ -698,26 +698,32 @@ class Block(object): return v def var_recursive(self, name): - if self.has_var(name): - return self.var(name) - else: - if self.idx == 0: - raise ValueError( - "var {0} is not in block({1}) nor its parents.".format( - name, self.idx)) - else: - # DFS - try: - parent_block = self.program.block(self.parent_idx) - return parent_block.var_recursive(name) - except ValueError: - fwd_block = self.program.block( - self.forward_block_idx - ) if self.forward_block_idx != -1 else None - if fwd_block is not None: - return fwd_block.var_recursive(name) - else: - raise + frontier = list() + visited = set() + + frontier.append(self) + + prog = self.program + + while len(frontier) != 0: # BFS + cur = frontier[0] + frontier = frontier[1:] + + if id(cur) in visited: + continue + + if cur.has_var(name): + return cur.var(name) + + if cur.parent_idx != -1: + frontier.append(prog.block(cur.parent_idx)) + + if cur.forward_block_idx != -1: + frontier.append(prog.block(cur.forward_block_idx)) + + visited.add(id(cur)) + + raise ValueError("Var {0} is not found recursively".format(name)) def all_parameters(self): return list(self.iter_parameters()) -- GitLab From 574bcdab42c8db34f0f082ffba69aacbea36c36d Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 23 Feb 2018 13:13:38 +0800 Subject: [PATCH 147/217] Add comments --- paddle/fluid/operators/while_op.cc | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/paddle/fluid/operators/while_op.cc b/paddle/fluid/operators/while_op.cc index 5f51a273dd..8b62b242cf 100644 --- a/paddle/fluid/operators/while_op.cc +++ b/paddle/fluid/operators/while_op.cc @@ -261,35 +261,37 @@ class WhileGradOpDescMaker : public framework::SingleGradOpDescMaker { for (auto &o : Output(kOutputs)) { block_ins.insert(o); } - std::unordered_set extra_inputs; + std::unordered_set output_grads; for (const auto *op : grad_block->AllOps()) { for (auto &input_name : op->InputArgumentNames()) { // If the input of Op has been recorded or is generated by the forward // block, do not make it as input again. + // The input is located in I/O or other op's outputs or the variable is + // located in grad_block's parents if (block_ins.find(input_name) != block_ins.end() || - fwd_block->FindVar(input_name) != nullptr || - parent_block->FindVar(input_name) != nullptr) { + (fwd_block->FindVarRecursive(input_name) != nullptr || + parent_block->FindVarRecursive(input_name) != nullptr)) { continue; } - extra_inputs.insert(input_name); + output_grads.insert(input_name); } for (auto &output_name : op->OutputArgumentNames()) { block_ins.insert(output_name); } } - std::vector extra_inputs_list; - extra_inputs_list.resize(extra_inputs.size()); - std::copy(extra_inputs.begin(), extra_inputs.end(), - extra_inputs_list.begin()); - while_grad->SetInput(framework::GradVarName(kOutputs), extra_inputs_list); + std::vector output_grads_list; + output_grads_list.resize(output_grads.size()); + std::copy(output_grads.begin(), output_grads.end(), + output_grads_list.begin()); + while_grad->SetInput(framework::GradVarName(kOutputs), output_grads_list); while_grad->SetAttrMap(this->Attrs()); while_grad->SetBlockAttr(kStepBlock, *grad_block); // record the original output gradient names, since the gradient name of // while operator could be renamed. - while_grad->SetAttr("original_output_grad", extra_inputs_list); + while_grad->SetAttr("original_output_grad", output_grads_list); return std::unique_ptr(while_grad); } -- GitLab From 71053063a105bf64e08ae5826019c05cb7639b3b Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 23 Feb 2018 14:32:35 +0800 Subject: [PATCH 148/217] test Parallel.Do and DynRNN --- python/paddle/v2/fluid/layers/control_flow.py | 3 +- .../tests/book/test_understand_sentiment.py | 57 +++++++++++++++++++ 2 files changed, 59 insertions(+), 1 deletion(-) diff --git a/python/paddle/v2/fluid/layers/control_flow.py b/python/paddle/v2/fluid/layers/control_flow.py index b9ab28a86a..72056cc7cd 100644 --- a/python/paddle/v2/fluid/layers/control_flow.py +++ b/python/paddle/v2/fluid/layers/control_flow.py @@ -652,7 +652,8 @@ class While(object): parent_block.append_op( type='while', inputs={ - 'X': [parent_block.var(x_name) for x_name in x_name_list], + 'X': + [parent_block.var_recursive(x_name) for x_name in x_name_list], 'Condition': [self.cond_var] }, outputs={'Out': out_vars, diff --git a/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py b/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py index af917de8e3..61f46b51c4 100644 --- a/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py +++ b/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py @@ -47,6 +47,46 @@ def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32, return avg_cost, accuracy, prediction +def dyn_rnn_lstm(data, label, input_dim, class_dim=2, emb_dim=32, + lstm_size=128): + emb = fluid.layers.embedding( + input=data, size=[input_dim, emb_dim], is_sparse=True) + sentence = fluid.layers.fc(input=emb, size=lstm_size, act='tanh') + + rnn = fluid.layers.DynamicRNN() + with rnn.block(): + word = rnn.step_input(sentence) + prev_hidden = rnn.memory(value=0.0, shape=[lstm_size]) + prev_cell = rnn.memory(value=0.0, shape=[lstm_size]) + + def gate_common(ipt, hidden, size): + gate0 = fluid.layers.fc(input=ipt, size=size, bias_attr=True) + gate1 = fluid.layers.fc(input=hidden, size=size, bias_attr=False) + return gate0 + gate1 + + forget_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden, + lstm_size)) + input_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden, + lstm_size)) + output_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden, + lstm_size)) + cell_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden, + lstm_size)) + + cell = forget_gate * prev_cell + input_gate * cell_gate + hidden = output_gate * fluid.layers.tanh(x=cell) + rnn.update_memory(prev_cell, cell) + rnn.update_memory(prev_hidden, hidden) + rnn.output(hidden) + + last = fluid.layers.sequence_last_step(rnn()) + prediction = fluid.layers.fc(input=last, size=class_dim, act="softmax") + cost = fluid.layers.cross_entropy(input=prediction, label=label) + avg_cost = fluid.layers.mean(x=cost) + accuracy = fluid.layers.accuracy(input=prediction, label=label) + return avg_cost, accuracy, prediction + + def stacked_lstm_net(data, label, input_dim, @@ -270,6 +310,23 @@ class TestUnderstandSentiment(unittest.TestCase): use_cuda=True, parallel=True) + @unittest.skip(reason='make CI faster') + def test_dynrnn_lstm_gpu(self): + with self.new_program_scope(): + main( + self.word_dict, + net_method=dyn_rnn_lstm, + use_cuda=True, + parallel=False) + + def test_dynrnn_lstm_gpu_parallel(self): + with self.new_program_scope(): + main( + self.word_dict, + net_method=dyn_rnn_lstm, + use_cuda=True, + parallel=True) + if __name__ == '__main__': unittest.main() -- GitLab From bc0f04df4e84f1a79ca97fba362c581a26ac4c86 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=AD=A6=E6=AF=85?= Date: Fri, 23 Feb 2018 15:02:33 +0800 Subject: [PATCH 149/217] Fix fluid dist benchmark document errors (#8512) Fix fluid dist benchmark document errors --- benchmark/cluster/vgg16/README.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/benchmark/cluster/vgg16/README.md b/benchmark/cluster/vgg16/README.md index 11d00b8f85..201d38928c 100644 --- a/benchmark/cluster/vgg16/README.md +++ b/benchmark/cluster/vgg16/README.md @@ -8,10 +8,12 @@ - cpu MHz : 2101.000 - cache size : 20480 KB +### Blas settings + +Setting environment variable: `MKL_NUM_THREADS=1`. + ### Single Node Single Thread -- PServer Count: 10 -- Trainer Count: 20 - Metrics: samples / sec | Batch Size | 32 | 64 | 128 | 256 | @@ -24,7 +26,6 @@ - PServer Count: 10 - Trainer Count: 20 -- Per trainer CPU Core: 1 - Metrics: samples / sec | Batch Size | 32 | 64 | 128 | 256 | -- GitLab From 6e7fee0ee17e47786ca60f37a58133a612272893 Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Thu, 22 Feb 2018 23:51:49 -0800 Subject: [PATCH 150/217] Add unit tests for ChannelHolder (#8510) --- paddle/fluid/framework/channel.h | 5 ++-- paddle/fluid/framework/channel_test.cc | 34 ++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/framework/channel.h b/paddle/fluid/framework/channel.h index 8ca1f2aa47..be57805938 100644 --- a/paddle/fluid/framework/channel.h +++ b/paddle/fluid/framework/channel.h @@ -100,8 +100,7 @@ class ChannelHolder { virtual ~Placeholder() {} virtual const std::type_index Type() const = 0; virtual void* Ptr() const = 0; - virtual void Close() const = 0; - std::type_info type_; + virtual void Close() = 0; }; template @@ -116,7 +115,7 @@ class ChannelHolder { if (channel_) channel_->Close(); } - std::unique_ptr*> channel_; + std::unique_ptr> channel_; const std::type_index type_; }; diff --git a/paddle/fluid/framework/channel_test.cc b/paddle/fluid/framework/channel_test.cc index 953fa40fec..2c4e622bd7 100644 --- a/paddle/fluid/framework/channel_test.cc +++ b/paddle/fluid/framework/channel_test.cc @@ -20,6 +20,7 @@ limitations under the License. */ #include "gtest/gtest.h" using paddle::framework::Channel; +using paddle::framework::ChannelHolder; using paddle::framework::MakeChannel; using paddle::framework::CloseChannel; using paddle::framework::details::Buffered; @@ -508,3 +509,36 @@ TEST(Channel, UnbufferedChannelDestroyUnblocksSendersTest) { auto ch = MakeChannel(0); ChannelDestroyUnblockSenders(ch); } + +void ChannelHolderSendReceive(ChannelHolder *ch) { + unsigned sum_send = 0; + std::thread t([&]() { + for (int i = 0; i < 5; i++) { + EXPECT_EQ(ch->Send(&i), true); + sum_send += i; + } + }); + for (int i = 0; i < 5; i++) { + int recv; + EXPECT_EQ(ch->Receive(&recv), true); + EXPECT_EQ(recv, i); + } + + ch->close(); + t.join(); + EXPECT_EQ(sum_send, 10U); +} + +TEST(ChannelHolder, ChannelHolderBufferedSendReceiveTest) { + ChannelHolder *ch = new ChannelHolder(); + ch->Reset(10); + ChannelHolderSendReceive(ch); + delete ch; +} + +TEST(ChannelHolder, ChannelHolderUnBufferedSendReceiveTest) { + ChannelHolder *ch = new ChannelHolder(); + ch->Reset(0); + ChannelHolderSendReceive(ch); + delete ch; +} -- GitLab From 8c0434c318edcc129e3ca9861aab05e717fda098 Mon Sep 17 00:00:00 2001 From: gongweibao Date: Fri, 23 Feb 2018 16:07:32 +0800 Subject: [PATCH 151/217] Add single node tensorflow benchmark. (#8513) Add single node tensorflow benchmark --- benchmark/cluster/vgg16/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/cluster/vgg16/README.md b/benchmark/cluster/vgg16/README.md index 201d38928c..cd681a1a28 100644 --- a/benchmark/cluster/vgg16/README.md +++ b/benchmark/cluster/vgg16/README.md @@ -20,7 +20,7 @@ Setting environment variable: `MKL_NUM_THREADS=1`. | -- | -- | -- | -- | -- | | PaddlePaddle Fluid | 15.44 | 16.32 | 16.74 | 16.79 | | PaddlePaddle v2 | 15.97 | 17.04 | 17.60 | 17.83 | -| TensorFlow | - | - | - | - | +| TensorFlow | 9.09 | 9.10 | 9.24 | 8.66 | ### Different Batch Size -- GitLab From 7a9098a60e6f65bc714640f1cc94b89f89cfd8fe Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sat, 24 Feb 2018 05:14:57 +0800 Subject: [PATCH 152/217] Add block.fwd_block_id (#8489) * Add block.fwd_block_id * fix bug in memory optimization transpiler * Change DFS to BFS * Add comments --- paddle/fluid/framework/block_desc.cc | 52 +++++++++++++++---- paddle/fluid/framework/block_desc.h | 8 ++- paddle/fluid/framework/framework.proto | 1 + paddle/fluid/framework/program_desc.h | 8 ++- paddle/fluid/operators/while_op.cc | 25 +++++---- paddle/fluid/pybind/protobuf.cc | 2 + python/paddle/v2/fluid/backward.py | 5 +- python/paddle/v2/fluid/framework.py | 42 +++++++++++---- .../fluid/memory_optimization_transpiler.py | 20 +++---- 9 files changed, 122 insertions(+), 41 deletions(-) diff --git a/paddle/fluid/framework/block_desc.cc b/paddle/fluid/framework/block_desc.cc index fbedd6c825..d72b64700f 100644 --- a/paddle/fluid/framework/block_desc.cc +++ b/paddle/fluid/framework/block_desc.cc @@ -16,6 +16,8 @@ limitations under the License. */ #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/program_desc.h" +#include + namespace paddle { namespace framework { @@ -64,12 +66,36 @@ VarDesc *BlockDesc::RenameVar(const std::string &old_name, VarDesc *BlockDesc::FindVarRecursive(const std::string &name) const { if (name == kEmptyVarName) return nullptr; - auto it = vars_.find(name); - if (it == vars_.end()) { - return Parent() == kNoneBlockIndex ? nullptr - : ParentBlock()->FindVarRecursive(name); + std::queue frontier; + std::unordered_set visited; + + frontier.push(this); + + while (!frontier.empty()) { // BFS + auto cur = frontier.front(); + frontier.pop(); + if (visited.count(cur) != 0) { + continue; + } + auto var = cur->FindVar(name); + if (var != nullptr) { + return var; + } + + auto fwd = cur->ForwardBlock(); + auto parent = cur->ParentBlock(); + + if (fwd != nullptr) { + frontier.push(fwd); + } + if (parent != nullptr) { + frontier.push(parent); + } + + visited.insert(cur); } - return it->second.get(); + + return nullptr; } VarDesc &BlockDesc::FindRecursiveOrCreateVar(const std::string &name_bytes) { @@ -155,10 +181,7 @@ void BlockDesc::Flush() { } BlockDesc *BlockDesc::ParentBlock() const { - if (this->desc_->parent_idx() == kNoneBlockIndex) { - return nullptr; - } - return prog_->MutableBlock(static_cast(this->desc_->parent_idx())); + return prog_->MutableBlock(static_cast(desc_->parent_idx())); } proto::BlockDesc *BlockDesc::Proto() { @@ -205,5 +228,16 @@ void BlockDesc::ClearPBVars() { } } +void BlockDesc::SetForwardBlockID(int32_t forward_block_id) { + PADDLE_ENFORCE(!desc_->has_forward_block_idx(), + "Parent block ID has been set to %d. Cannot set to %d", + desc_->forward_block_idx(), forward_block_id); + desc_->set_forward_block_idx(forward_block_id); +} + +BlockDesc *BlockDesc::ForwardBlock() const { + return prog_->MutableBlock(static_cast(desc_->forward_block_idx())); +} + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/block_desc.h b/paddle/fluid/framework/block_desc.h index b2375b53e3..3bd90f3890 100644 --- a/paddle/fluid/framework/block_desc.h +++ b/paddle/fluid/framework/block_desc.h @@ -49,6 +49,8 @@ class BlockDesc { int32_t Parent() const { return desc_->parent_idx(); } + int32_t ForwardBlockID() const { return desc_->forward_block_idx(); } + VarDesc *Var(const std::string &name_bytes); VarDesc *FindVar(const std::string &name_bytes) const; @@ -75,6 +77,10 @@ class BlockDesc { BlockDesc *ParentBlock() const; + BlockDesc *ForwardBlock() const; + + void SetForwardBlockID(int32_t forward_block_id); + OpDesc *AppendOp(); void AppendAllocatedOp(std::unique_ptr &&op_desc); @@ -93,7 +99,7 @@ class BlockDesc { proto::BlockDesc *Proto(); - ProgramDesc *Program() { return this->prog_; } + ProgramDesc *Program() const { return this->prog_; } private: void ClearPBOps(); diff --git a/paddle/fluid/framework/framework.proto b/paddle/fluid/framework/framework.proto index 4eb18b4e4d..5b43f5a8a4 100644 --- a/paddle/fluid/framework/framework.proto +++ b/paddle/fluid/framework/framework.proto @@ -158,6 +158,7 @@ message BlockDesc { required int32 parent_idx = 2; repeated VarDesc vars = 3; repeated OpDesc ops = 4; + optional int32 forward_block_idx = 5 [ default = -1 ]; } // Please refer to diff --git a/paddle/fluid/framework/program_desc.h b/paddle/fluid/framework/program_desc.h index 8d4b999ad2..538a037211 100644 --- a/paddle/fluid/framework/program_desc.h +++ b/paddle/fluid/framework/program_desc.h @@ -38,7 +38,13 @@ class ProgramDesc { BlockDesc *AppendBlock(const BlockDesc &parent); - BlockDesc *MutableBlock(size_t idx) { return blocks_[idx].get(); } + BlockDesc *MutableBlock(size_t idx) { + if (idx == static_cast(kNoneBlockIndex)) { + return nullptr; + } else { + return blocks_[idx].get(); + } + } const BlockDesc &Block(size_t idx) const { return *blocks_[idx]; } diff --git a/paddle/fluid/operators/while_op.cc b/paddle/fluid/operators/while_op.cc index 3d5cdeda26..8b62b242cf 100644 --- a/paddle/fluid/operators/while_op.cc +++ b/paddle/fluid/operators/while_op.cc @@ -231,7 +231,8 @@ class WhileGradOpDescMaker : public framework::SingleGradOpDescMaker { while_grad->SetInput(kStepScopes, Output(kStepScopes)); auto *grad_block = this->grad_block_[0]; - auto *fwd_block = grad_block->ParentBlock(); + auto *fwd_block = grad_block->ForwardBlock(); + auto *parent_block = grad_block->ParentBlock(); // Not all of IGs will be generated by inner gradient operators of while op. // Ignore IGs that is not generated by the inside block. @@ -260,33 +261,37 @@ class WhileGradOpDescMaker : public framework::SingleGradOpDescMaker { for (auto &o : Output(kOutputs)) { block_ins.insert(o); } - std::unordered_set extra_inputs; + std::unordered_set output_grads; for (const auto *op : grad_block->AllOps()) { for (auto &input_name : op->InputArgumentNames()) { // If the input of Op has been recorded or is generated by the forward // block, do not make it as input again. + + // The input is located in I/O or other op's outputs or the variable is + // located in grad_block's parents if (block_ins.find(input_name) != block_ins.end() || - fwd_block->FindVar(input_name) != nullptr) { + (fwd_block->FindVarRecursive(input_name) != nullptr || + parent_block->FindVarRecursive(input_name) != nullptr)) { continue; } - extra_inputs.insert(input_name); + output_grads.insert(input_name); } for (auto &output_name : op->OutputArgumentNames()) { block_ins.insert(output_name); } } - std::vector extra_inputs_list; - extra_inputs_list.resize(extra_inputs.size()); - std::copy(extra_inputs.begin(), extra_inputs.end(), - extra_inputs_list.begin()); - while_grad->SetInput(framework::GradVarName(kOutputs), extra_inputs_list); + std::vector output_grads_list; + output_grads_list.resize(output_grads.size()); + std::copy(output_grads.begin(), output_grads.end(), + output_grads_list.begin()); + while_grad->SetInput(framework::GradVarName(kOutputs), output_grads_list); while_grad->SetAttrMap(this->Attrs()); while_grad->SetBlockAttr(kStepBlock, *grad_block); // record the original output gradient names, since the gradient name of // while operator could be renamed. - while_grad->SetAttr("original_output_grad", extra_inputs_list); + while_grad->SetAttr("original_output_grad", output_grads_list); return std::unique_ptr(while_grad); } diff --git a/paddle/fluid/pybind/protobuf.cc b/paddle/fluid/pybind/protobuf.cc index 1a9d7c421b..b725be7952 100644 --- a/paddle/fluid/pybind/protobuf.cc +++ b/paddle/fluid/pybind/protobuf.cc @@ -155,6 +155,8 @@ void BindBlockDesc(py::module &m) { py::class_(m, "BlockDesc", "") .def_property_readonly("id", &BlockDesc::ID) .def_property_readonly("parent", &BlockDesc::Parent) + .def("get_forward_block_idx", &BlockDesc::ForwardBlockID) + .def("set_forward_block_idx", &BlockDesc::SetForwardBlockID) .def("append_op", &BlockDesc::AppendOp, py::return_value_policy::reference) .def("prepend_op", &BlockDesc::PrependOp, diff --git a/python/paddle/v2/fluid/backward.py b/python/paddle/v2/fluid/backward.py index 33ff43f693..ba27aaa246 100644 --- a/python/paddle/v2/fluid/backward.py +++ b/python/paddle/v2/fluid/backward.py @@ -298,7 +298,8 @@ def _append_backward_ops_(block, # If the op has its own sub-block, deal with the sub-block first if op.has_attr("sub_block"): sub_block = program.block(op.block_attr("sub_block")) - grad_sub_block = program.create_block(parent_idx=sub_block.idx) + grad_sub_block = program.create_block() + grad_sub_block.set_forward_block_idx(sub_block.idx) cb = _callback_lookup_(op) if cb is not None: if callbacks is None: @@ -310,6 +311,8 @@ def _append_backward_ops_(block, else: _append_backward_ops_(sub_block, sub_block.ops, grad_sub_block, no_grad_dict, grad_to_var, callbacks) + + program.rollback() grad_sub_block_list.append(grad_sub_block.desc) # Getting op's corresponding grad_op diff --git a/python/paddle/v2/fluid/framework.py b/python/paddle/v2/fluid/framework.py index 1cb06c52a4..78318dc6d6 100644 --- a/python/paddle/v2/fluid/framework.py +++ b/python/paddle/v2/fluid/framework.py @@ -696,6 +696,13 @@ class Block(object): def parent_idx(self): return self.desc.parent + @property + def forward_block_idx(self): + return self.desc.get_forward_block_idx() + + def set_forward_block_idx(self, idx): + self.desc.set_forward_block_idx(idx) + @property def idx(self): return self.desc.id @@ -709,15 +716,32 @@ class Block(object): return v def var_recursive(self, name): - if self.has_var(name): - return self.var(name) - else: - if self.idx == 0: - raise ValueError("var %s is not in block(%d) nor its parents." % - name, self.idx) - else: - parent_block = self.program.block(self.parent_idx) - return parent_block.var_recursive(name) + frontier = list() + visited = set() + + frontier.append(self) + + prog = self.program + + while len(frontier) != 0: # BFS + cur = frontier[0] + frontier = frontier[1:] + + if id(cur) in visited: + continue + + if cur.has_var(name): + return cur.var(name) + + if cur.parent_idx != -1: + frontier.append(prog.block(cur.parent_idx)) + + if cur.forward_block_idx != -1: + frontier.append(prog.block(cur.forward_block_idx)) + + visited.add(id(cur)) + + raise ValueError("Var {0} is not found recursively".format(name)) def all_parameters(self): return list(self.iter_parameters()) diff --git a/python/paddle/v2/fluid/memory_optimization_transpiler.py b/python/paddle/v2/fluid/memory_optimization_transpiler.py index ee56ccdcf1..6952ca7fe4 100644 --- a/python/paddle/v2/fluid/memory_optimization_transpiler.py +++ b/python/paddle/v2/fluid/memory_optimization_transpiler.py @@ -223,15 +223,15 @@ def get_cfgs(input_program): # Find while/while_grad block pair for grad_id in while_grad_sub_block_ids: - parent_id = pdesc.block(grad_id).parent - if parent_id in while_sub_block_ids: - while_block_id_pair.append((parent_id, grad_id)) - while_sub_block_ids.remove(parent_id) + forward_id = pdesc.block(grad_id).get_forward_block_idx() + if forward_id in while_sub_block_ids: + while_block_id_pair.append((forward_id, grad_id)) + while_sub_block_ids.remove(forward_id) # Get while/while_grad block ops - for parent_id, grad_id in while_block_id_pair: + for forward_id, grad_id in while_block_id_pair: while_block_ops = [] - while_block = pdesc.block(parent_id) + while_block = pdesc.block(forward_id) while_block_op_size = while_block.op_size() for i in range(while_block_op_size): while_block_ops.append(while_block.op(i)) @@ -242,21 +242,21 @@ def get_cfgs(input_program): while_block_ops.append(while_grad_block.op(i)) while_op_output = set() - while_op_output.update(while_op_dict[parent_id].output_arg_names()) + while_op_output.update(while_op_dict[forward_id].output_arg_names()) while_op_output.update(while_op_dict[grad_id].output_arg_names()) ops_list.append((while_block_ops, while_block_op_size, while_op_output)) # Process rest while block ops - for parent_id in while_sub_block_ids: + for forward_id in while_sub_block_ids: while_block_ops = [] - while_block = pdesc.block(parent_id) + while_block = pdesc.block(forward_id) while_block_op_size = while_block.op_size() for i in range(while_block_op_size): while_block_ops.append(while_block.op(i)) while_op_output = set() - while_op_output.update(while_op_dict[parent_id].output_arg_names()) + while_op_output.update(while_op_dict[forward_id].output_arg_names()) ops_list.append((while_block_ops, while_block_op_size, while_op_output)) -- GitLab From d4dabe3e0bc3db35f8599aed5351a4c308014f1a Mon Sep 17 00:00:00 2001 From: "Yang Yang(Tony)" Date: Fri, 23 Feb 2018 15:37:32 -0800 Subject: [PATCH 153/217] framework.py enhancement (#8471) * framework.py enhancement * polish * clean up * enforce the inputs of Operator __init__ of type Variable * python2 assert * reverse assert --- python/paddle/v2/fluid/framework.py | 27 ++++++++++++++++----------- python/paddle/v2/fluid/layers/nn.py | 2 +- 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/python/paddle/v2/fluid/framework.py b/python/paddle/v2/fluid/framework.py index 78318dc6d6..0f6cb90e27 100644 --- a/python/paddle/v2/fluid/framework.py +++ b/python/paddle/v2/fluid/framework.py @@ -152,7 +152,7 @@ class Variable(object): shape(tuple|list|None): The shape of variable. -1 means the batch size. Some kinds of variable do not contain shape, just set it to None. dtype(np.dtype|core.VarDesc.VarType|str): The data type of variable. - lod_level(int): The level of lod tensor. 0 means there is not a time + lod_level(int): The level of lod tensor. 0 means it is not a time series data. persistable(bool): True if the variable should be saved as check point. Defaults to False. @@ -346,7 +346,7 @@ class OpProtoHolder(object): def __init__(self): assert not hasattr( self.__class__, - '_instance'), 'Please use `instance()` to get OpProtoHolder opject!' + '_instance'), 'Please use `instance()` to get OpProtoHolder object!' op_protos = get_all_op_protos() self.op_proto_map = {} for proto in op_protos: @@ -368,8 +368,8 @@ class OpProtoHolder(object): class Operator(object): """ - Python Operator class. The operator represents the build in instructs in a - Block. Users can use the build in instructs to describe their neural + Python Operator class. The operator represents the build in instructions in a + Block. Users can use the build in instructions to describe their neural network. """ @@ -478,7 +478,7 @@ class Operator(object): raise TypeError("'attrs' should be a dict.") for attr in proto.attrs: attr_name = attr.name - if (not attr_name in attrs) or (attrs[attr_name] is None): + if (attr_name not in attrs) or (attrs[attr_name] is None): continue if isinstance(attrs[attr_name], Block): self.desc.set_block_attr(attr_name, attrs[attr_name].desc) @@ -751,7 +751,7 @@ class Block(object): if isinstance(item[1], Parameter)) def create_var(self, *args, **kwargs): - var = Variable(self, *args, **kwargs) + var = Variable(block=self, *args, **kwargs) if 'initializer' in kwargs: kwargs['initializer'](var, self) return var @@ -822,13 +822,13 @@ class Block(object): def append_op(self, *args, **kwargs): op_desc = self.desc.append_op() - op = Operator(self, op_desc, *args, **kwargs) + op = Operator(block=self, desc=op_desc, *args, **kwargs) self.ops.append(op) return op def delete_ops(self, ops): # remove from cpp - # FIXME(typhoonzero): remove only the first occuracy. + # FIXME(typhoonzero): remove only the first occurrence. try: start = list(self.ops).index(ops[0]) end = list(self.ops).index(ops[-1]) @@ -846,6 +846,11 @@ class Block(object): return op def sync_with_cpp(self): + """ + Sync with the desc on the c++ end. + + This method is used to synchronize the c++ desc instance generated by backward. + """ # sync variables from cpp for var in self.desc.all_vars(): if not self.has_var(var.name()): @@ -891,9 +896,9 @@ class Block(object): def copy_param_info_from(self, other): """ - Copy the information of parameters from other block + Copy the information of parameters from the other block Args: - other(Block): other block + other(Block): the other block Returns: None @@ -1239,6 +1244,6 @@ def get_var(name, program=None): if program is None: program = default_main_program() assert isinstance(name, str) - assert isinstance(name, Program) + assert isinstance(program, Program) return program.global_block().var(name) diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index c4baa62ccd..e8b4cec6ee 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -104,7 +104,7 @@ def fc(input, * :math:`X_i`: The input tensor. * :math:`W`: The weights created by this layer. * :math:`b`: The bias parameter created by this layer (if needed). - * :math:`Act`: The activation funtion. + * :math:`Act`: The activation function. * :math:`Out`: The output tensor. Args: -- GitLab From e8cb97b8a2b46b379571c5681359c68576fb3909 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sat, 24 Feb 2018 11:34:05 +0800 Subject: [PATCH 154/217] Moving unique_name to python * Add reset and guard to unique_name --- paddle/fluid/pybind/pybind.cc | 6 -- python/paddle/v2/fluid/__init__.py | 26 ++----- python/paddle/v2/fluid/backward.py | 3 +- python/paddle/v2/fluid/evaluator.py | 5 +- python/paddle/v2/fluid/framework.py | 23 +----- python/paddle/v2/fluid/layer_helper.py | 29 +++++--- python/paddle/v2/fluid/layers/control_flow.py | 36 +++++---- python/paddle/v2/fluid/layers/device.py | 3 +- .../paddle/v2/fluid/layers/math_op_patch.py | 2 +- python/paddle/v2/fluid/optimizer.py | 13 ++-- python/paddle/v2/fluid/unique_name.py | 74 +++++++++++++++++++ 11 files changed, 134 insertions(+), 86 deletions(-) create mode 100644 python/paddle/v2/fluid/unique_name.py diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 56c1a935d9..ef495a0dea 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -48,11 +48,6 @@ PYBIND11_MAKE_OPAQUE(paddle::framework::LoDTensorArray); namespace paddle { namespace pybind { -static size_t UniqueIntegerGenerator(const std::string &prefix) { - static std::unordered_map> generators; - return generators[prefix].fetch_add(1); -} - bool IsCompiledWithCUDA() { #ifndef PADDLE_WITH_CUDA return false; @@ -409,7 +404,6 @@ All parameter, weight, gradient are variables in Paddle. (void (Executor::*)(const ProgramDesc &, Scope *, int, bool, bool)) & Executor::Run); - m.def("unique_integer", UniqueIntegerGenerator); m.def("init_gflags", framework::InitGflags); m.def("init_glog", framework::InitGLOG); m.def("init_devices", &framework::InitDevices); diff --git a/python/paddle/v2/fluid/__init__.py b/python/paddle/v2/fluid/__init__.py index 361fb3f5ad..032dd2683d 100644 --- a/python/paddle/v2/fluid/__init__.py +++ b/python/paddle/v2/fluid/__init__.py @@ -39,30 +39,16 @@ from concurrency import (Go, make_channel, channel_send, channel_recv, import clip from memory_optimization_transpiler import memory_optimize import profiler +import unique_name Tensor = LoDTensor __all__ = framework.__all__ + executor.__all__ + concurrency.__all__ + [ - 'io', - 'initializer', - 'layers', - 'nets', - 'optimizer', - 'learning_rate_decay', - 'backward', - 'regularizer', - 'LoDTensor', - 'CPUPlace', - 'CUDAPlace', - 'Tensor', - 'ParamAttr', - 'WeightNormParamAttr', - 'DataFeeder', - 'clip', - 'SimpleDistributeTranspiler', - 'DistributeTranspiler', - 'memory_optimize', - 'profiler', + 'io', 'initializer', 'layers', 'nets', 'optimizer', 'learning_rate_decay', + 'backward', 'regularizer', 'LoDTensor', 'CPUPlace', 'CUDAPlace', 'Tensor', + 'ParamAttr', 'WeightNormParamAttr', 'DataFeeder', 'clip', + 'SimpleDistributeTranspiler', 'DistributeTranspiler', 'memory_optimize', + 'profiler', 'unique_name' ] diff --git a/python/paddle/v2/fluid/backward.py b/python/paddle/v2/fluid/backward.py index 33ff43f693..f2e345a053 100644 --- a/python/paddle/v2/fluid/backward.py +++ b/python/paddle/v2/fluid/backward.py @@ -16,6 +16,7 @@ from paddle.v2.fluid import framework as framework from . import core import collections import copy +import unique_name __all__ = [ 'append_backward', @@ -388,7 +389,7 @@ def _rename_grad_(block, start_op_idx, grad_to_var, target_grad_map): for name in op_desc.output_arg_names(): if block.desc.find_var(name.encode("ascii")): - new_name = "%s_%s" % (name, core.unique_integer(name)) + new_name = unique_name.generate(name) op_desc.rename_output(name, new_name) var_map[name] = new_name diff --git a/python/paddle/v2/fluid/evaluator.py b/python/paddle/v2/fluid/evaluator.py index 1f4618310c..8cc4905333 100644 --- a/python/paddle/v2/fluid/evaluator.py +++ b/python/paddle/v2/fluid/evaluator.py @@ -15,7 +15,8 @@ import numpy as np import layers -from framework import Program, unique_name, Variable, program_guard +from framework import Program, Variable, program_guard +import unique_name from layer_helper import LayerHelper __all__ = [ @@ -96,7 +97,7 @@ class Evaluator(object): """ state = self.helper.create_variable( - name="_".join([unique_name(self.helper.name), suffix]), + name="_".join([unique_name.generate(self.helper.name), suffix]), persistable=True, dtype=dtype, shape=shape) diff --git a/python/paddle/v2/fluid/framework.py b/python/paddle/v2/fluid/framework.py index 0e11709296..f92e956028 100644 --- a/python/paddle/v2/fluid/framework.py +++ b/python/paddle/v2/fluid/framework.py @@ -20,6 +20,7 @@ import numpy as np import proto.framework_pb2 as framework_pb2 from . import core +import unique_name __all__ = [ 'Block', @@ -47,20 +48,6 @@ def grad_var_name(var_name): return var_name + GRAD_VAR_SUFFIX -def unique_name(prefix): - """ - Generate unique names with prefix - - Args: - prefix(str): The prefix of return string - - Returns(str): A unique string with the prefix - - """ - uid = core.unique_integer(prefix) # unique during whole process. - return "_".join([prefix, str(uid)]) - - def convert_np_dtype_to_dtype_(np_dtype): """ Convert the data type in numpy to the data type in Paddle @@ -175,7 +162,7 @@ class Variable(object): self.error_clip = error_clip if name is None: - name = Variable._unique_var_name_() + name = unique_name.generate('_generated_var') is_new_var = False self.desc = self.block.desc.find_var(name) @@ -303,12 +290,6 @@ class Variable(object): def type(self): return self.desc.type() - @staticmethod - def _unique_var_name_(): - prefix = "_generated_var" - uid = core.unique_integer(prefix) # unique during whole process. - return "_".join([prefix, str(uid)]) - def set_error_clip(self, error_clip): self.error_clip = error_clip diff --git a/python/paddle/v2/fluid/layer_helper.py b/python/paddle/v2/fluid/layer_helper.py index e7abc23f2f..dc4f992ddc 100644 --- a/python/paddle/v2/fluid/layer_helper.py +++ b/python/paddle/v2/fluid/layer_helper.py @@ -15,8 +15,8 @@ import copy import itertools -from framework import Variable, Parameter, default_main_program, default_startup_program, \ - unique_name, dtype_is_floating +from framework import Variable, Parameter, default_main_program, default_startup_program, dtype_is_floating +import unique_name from paddle.v2.fluid.initializer import Constant, Xavier from param_attr import ParamAttr, WeightNormParamAttr @@ -27,7 +27,7 @@ class LayerHelper(object): self.layer_type = layer_type name = self.kwargs.get('name', None) if name is None: - self.kwargs['name'] = unique_name(self.layer_type) + self.kwargs['name'] = unique_name.generate(self.layer_type) @property def name(self): @@ -117,17 +117,20 @@ class LayerHelper(object): block=self.startup_program.global_block()): if out is None: out = block.create_var( - name=unique_name(".".join([self.name, 'weight_norm_norm'])), + name=unique_name.generate(".".join( + [self.name, 'weight_norm_norm'])), dtype=dtype, persistable=False) abs_out = block.create_var( - name=unique_name(".".join([self.name, 'weight_norm_abs'])), + name=unique_name.generate(".".join( + [self.name, 'weight_norm_abs'])), dtype=dtype, persistable=False) block.append_op( type='abs', inputs={'X': x}, outputs={'Out': abs_out}) pow_out = block.create_var( - name=unique_name(".".join([self.name, 'weight_norm_pow'])), + name=unique_name.generate(".".join( + [self.name, 'weight_norm_pow'])), dtype=dtype, persistable=False) block.append_op( @@ -136,7 +139,8 @@ class LayerHelper(object): outputs={'Out': pow_out}, attrs={'factor': float(p)}) sum_out = block.create_var( - name=unique_name(".".join([self.name, 'weight_norm_sum'])), + name=unique_name.generate(".".join( + [self.name, 'weight_norm_sum'])), dtype=dtype, persistable=False) block.append_op( @@ -161,7 +165,7 @@ class LayerHelper(object): block=self.startup_program.global_block()): if out is None: out = block.create_var( - name=unique_name(".".join( + name=unique_name.generate(".".join( [self.name, 'weight_norm_reshape'])), dtype=dtype, persistable=False) @@ -178,7 +182,7 @@ class LayerHelper(object): block=self.startup_program.global_block()): if out is None: out = block.create_var( - name=unique_name(".".join( + name=unique_name.generate(".".join( [self.name, 'weight_norm_transpose'])), dtype=dtype, persistable=False) @@ -196,7 +200,8 @@ class LayerHelper(object): """Computes the norm over all dimensions except dim""" if out is None: out = block.create_var( - name=unique_name(".".join([self.name, 'weight_norm_norm'])), + name=unique_name.generate(".".join( + [self.name, 'weight_norm_norm'])), dtype=dtype, persistable=False) if dim is None: @@ -286,7 +291,7 @@ class LayerHelper(object): assert isinstance(attr, ParamAttr) suffix = 'b' if is_bias else 'w' if attr.name is None: - attr.name = unique_name(".".join([self.name, suffix])) + attr.name = unique_name.generate(".".join([self.name, suffix])) if default_initializer is None and attr.initializer is None: if is_bias: @@ -316,7 +321,7 @@ class LayerHelper(object): def create_tmp_variable(self, dtype, stop_gradient=False): return self.main_program.current_block().create_var( - name=unique_name(".".join([self.name, 'tmp'])), + name=unique_name.generate(".".join([self.name, 'tmp'])), dtype=dtype, persistable=False, stop_gradient=stop_gradient) diff --git a/python/paddle/v2/fluid/layers/control_flow.py b/python/paddle/v2/fluid/layers/control_flow.py index b9ab28a86a..29edf064ba 100644 --- a/python/paddle/v2/fluid/layers/control_flow.py +++ b/python/paddle/v2/fluid/layers/control_flow.py @@ -428,7 +428,8 @@ class StaticRNN(object): raise ValueError( "if init is None, memory at least need shape and batch_ref") parent_block = self.parent_block() - var_name = unique_name("@".join([self.helper.name, "memory_boot"])) + var_name = unique_name.generate("@".join( + [self.helper.name, "memory_boot"])) boot_var = parent_block.create_var( name=var_name, shape=shape, @@ -450,7 +451,7 @@ class StaticRNN(object): return self.memory(init=boot_var) else: pre_mem = self.helper.create_variable( - name=unique_name("@".join([self.helper.name, "mem"])), + name=unique_name.generate("@".join([self.helper.name, "mem"])), dtype=init.dtype, shape=init.shape) self.memories[pre_mem.name] = StaticRNNMemoryLink( @@ -709,7 +710,7 @@ def lod_rank_table(x, level=0): helper = LayerHelper("lod_rank_table", **locals()) table = helper.create_variable( type=core.VarDesc.VarType.LOD_RANK_TABLE, - name=unique_name("lod_rank_table")) + name=unique_name.generate("lod_rank_table")) helper.append_op( type='lod_rank_table', inputs={'X': x}, @@ -807,7 +808,7 @@ def lod_tensor_to_array(x, table): """ helper = LayerHelper("lod_tensor_to_array", **locals()) array = helper.create_variable( - name=unique_name("lod_tensor_to_array"), + name=unique_name.generate("lod_tensor_to_array"), type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, dtype=x.dtype) helper.append_op( @@ -1264,11 +1265,11 @@ class IfElse(object): if id(x) not in self.input_table: parent_block = self.parent_block() out_true = parent_block.create_var( - name=unique_name('ifelse_input' + self.helper.name), + name=unique_name.generate('ifelse_input' + self.helper.name), dtype=x.dtype) out_false = parent_block.create_var( - name=unique_name('ifelse_input' + self.helper.name), + name=unique_name.generate('ifelse_input' + self.helper.name), dtype=x.dtype) parent_block.append_op( type='split_lod_tensor', @@ -1310,7 +1311,8 @@ class IfElse(object): raise TypeError("Each output should be a variable") # create outside tensor outside_out = parent_block.create_var( - name=unique_name("_".join([self.helper.name, 'output'])), + name=unique_name.generate("_".join( + [self.helper.name, 'output'])), dtype=each_out.dtype) out_table.append(outside_out) @@ -1373,7 +1375,7 @@ class DynamicRNN(object): parent_block = self._parent_block_() if self.lod_rank_table is None: self.lod_rank_table = parent_block.create_var( - name=unique_name('lod_rank_table'), + name=unique_name.generate('lod_rank_table'), type=core.VarDesc.VarType.LOD_RANK_TABLE) self.lod_rank_table.stop_gradient = True parent_block.append_op( @@ -1381,7 +1383,8 @@ class DynamicRNN(object): inputs={"X": x}, outputs={"Out": self.lod_rank_table}) self.max_seq_len = parent_block.create_var( - name=unique_name('dynamic_rnn_max_seq_len'), dtype='int64') + name=unique_name.generate('dynamic_rnn_max_seq_len'), + dtype='int64') self.max_seq_len.stop_gradient = False parent_block.append_op( type='max_sequence_len', @@ -1395,7 +1398,7 @@ class DynamicRNN(object): outputs={'Out': self.cond}) input_array = parent_block.create_var( - name=unique_name('dynamic_rnn_input_array'), + name=unique_name.generate('dynamic_rnn_input_array'), type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, dtype=x.dtype) self.input_array.append((input_array, x.dtype)) @@ -1416,7 +1419,7 @@ class DynamicRNN(object): "static_input() must be called after step_input().") parent_block = self._parent_block_() x_reordered = parent_block.create_var( - name=unique_name("dynamic_rnn_static_input_reordered"), + name=unique_name.generate("dynamic_rnn_static_input_reordered"), type=core.VarDesc.VarType.LOD_TENSOR, dtype=x.dtype) parent_block.append_op( @@ -1478,7 +1481,7 @@ class DynamicRNN(object): 'invoked before ' 'memory(init=init, need_reordered=True, ...).') init_reordered = parent_block.create_var( - name=unique_name('dynamic_rnn_mem_init_reordered'), + name=unique_name.generate('dynamic_rnn_mem_init_reordered'), type=core.VarDesc.VarType.LOD_TENSOR, dtype=init.dtype) parent_block.append_op( @@ -1490,7 +1493,7 @@ class DynamicRNN(object): outputs={'Out': [init_reordered]}) init_tensor = init_reordered mem_array = parent_block.create_var( - name=unique_name('dynamic_rnn_mem_array'), + name=unique_name.generate('dynamic_rnn_mem_array'), type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, dtype=init.dtype) parent_block.append_op( @@ -1510,9 +1513,10 @@ class DynamicRNN(object): ) parent_block = self._parent_block_() init = parent_block.create_var( - name=unique_name('mem_init'), dtype=dtype) + name=unique_name.generate('mem_init'), dtype=dtype) arr, dtype = self.input_array[0] - in0 = parent_block.create_var(name=unique_name('in0'), dtype=dtype) + in0 = parent_block.create_var( + name=unique_name.generate('in0'), dtype=dtype) parent_block.append_op( type='read_from_array', inputs={'X': [arr], @@ -1551,7 +1555,7 @@ class DynamicRNN(object): parent_block = self._parent_block_() for each in outputs: outside_array = parent_block.create_var( - name=unique_name("_".join( + name=unique_name.generate("_".join( [self.helper.name, "output_array", each.name])), type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, dtype=each.dtype) diff --git a/python/paddle/v2/fluid/layers/device.py b/python/paddle/v2/fluid/layers/device.py index 3fee263ac0..e0c1aab230 100644 --- a/python/paddle/v2/fluid/layers/device.py +++ b/python/paddle/v2/fluid/layers/device.py @@ -25,7 +25,8 @@ __all__ = ['get_places'] @autodoc() def get_places(device_count=None, device_type=None): helper = LayerHelper('get_places', **locals()) - out_places = helper.create_variable(name=unique_name(helper.name + ".out")) + out_places = helper.create_variable( + name=unique_name.generate(helper.name + ".out")) attrs = dict() if device_count is not None: attrs['device_count'] = int(device_count) diff --git a/python/paddle/v2/fluid/layers/math_op_patch.py b/python/paddle/v2/fluid/layers/math_op_patch.py index 417a01b76f..beebc1a85f 100644 --- a/python/paddle/v2/fluid/layers/math_op_patch.py +++ b/python/paddle/v2/fluid/layers/math_op_patch.py @@ -21,7 +21,7 @@ __all__ = ['monkey_patch_variable'] def monkey_patch_variable(): def unique_tmp_name(): - return unique_name("tmp") + return unique_name.generate("tmp") def safe_get_dtype(var): try: diff --git a/python/paddle/v2/fluid/optimizer.py b/python/paddle/v2/fluid/optimizer.py index ecc42f6215..61febc4e38 100644 --- a/python/paddle/v2/fluid/optimizer.py +++ b/python/paddle/v2/fluid/optimizer.py @@ -17,7 +17,8 @@ from collections import defaultdict import framework import layers from backward import append_backward -from framework import unique_name, program_guard +from framework import program_guard +import unique_name from initializer import Constant from layer_helper import LayerHelper from regularizer import append_regularization_ops @@ -49,7 +50,7 @@ class Optimizer(object): def _create_global_learning_rate(self): if isinstance(self._global_learning_rate, float): self._global_learning_rate = layers.create_global_var( - name=unique_name("learning_rate"), + name=unique_name.generate("learning_rate"), shape=[1], value=float(self._global_learning_rate), dtype='float32', @@ -118,7 +119,7 @@ class Optimizer(object): assert isinstance(self.helper, LayerHelper) var = self.helper.create_global_variable( - name=unique_name(name), + name=unique_name.generate(name), persistable=True, dtype=dtype or param.dtype, type=param.type, @@ -379,7 +380,7 @@ class AdamOptimizer(Optimizer): # Create beta1 and beta2 power tensors beta_shape = [1] self._beta1_pow_acc = self.helper.create_global_variable( - name=unique_name('beta1_pow_acc'), + name=unique_name.generate('beta1_pow_acc'), dtype='float32', shape=beta_shape, lod_level=0, @@ -388,7 +389,7 @@ class AdamOptimizer(Optimizer): self._beta1_pow_acc, initializer=Constant(self._beta1)) self._beta2_pow_acc = self.helper.create_global_variable( - name=unique_name('beta2_pow_acc'), + name=unique_name.generate('beta2_pow_acc'), dtype='float32', shape=beta_shape, lod_level=0, @@ -481,7 +482,7 @@ class AdamaxOptimizer(Optimizer): # Create beta1 power accumulator tensor beta_shape = [1] self._beta1_pow_acc = self.helper.create_global_variable( - name=unique_name('beta1_pow_acc'), + name=unique_name.generate('beta1_pow_acc'), dtype='float32', shape=beta_shape, lod_level=0, diff --git a/python/paddle/v2/fluid/unique_name.py b/python/paddle/v2/fluid/unique_name.py new file mode 100644 index 0000000000..034caac962 --- /dev/null +++ b/python/paddle/v2/fluid/unique_name.py @@ -0,0 +1,74 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import contextlib +import sys + +__all__ = ['generate', 'switch', 'guard', 'UniqueNameGenerator'] + + +class UniqueNameGenerator(object): + """ + Generate unique name with prefix. + + Args: + prefix(str): The generated name prefix. All generated name will be + started with this prefix. + """ + + def __init__(self, prefix=None): + self.ids = collections.defaultdict(int) + if prefix is None: + prefix = "" + self.prefix = prefix + + def __call__(self, key): + """ + Generate unique names with prefix + + Args: + key(str): The key of return string. + + Returns(str): A unique string with the prefix + """ + tmp = self.ids[key] + self.ids[key] += 1 + return self.prefix + "_".join([key, str(tmp)]) + + +generator = UniqueNameGenerator() + + +def generate(prefix): + return generator(prefix) + + +def switch(new_generator=None): + global generator + old = generator + if new_generator is None: + generator = UniqueNameGenerator() + else: + generator = new_generator + return old + + +@contextlib.contextmanager +def guard(new_generator=None): + if isinstance(new_generator, basestring): + new_generator = UniqueNameGenerator(new_generator) + old = switch(new_generator) + yield + switch(old) -- GitLab From bad01596f9bae92d2b3bd0b29be582d0ecad3705 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sat, 24 Feb 2018 12:47:57 +0800 Subject: [PATCH 155/217] rename register macro --- paddle/fluid/operators/compare_op.cc | 18 +++++++++--------- paddle/fluid/operators/compare_op.cu | 8 ++++---- paddle/fluid/operators/compare_op.h | 2 +- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/paddle/fluid/operators/compare_op.cc b/paddle/fluid/operators/compare_op.cc index cdeb28cc1d..46d6d0fd86 100644 --- a/paddle/fluid/operators/compare_op.cc +++ b/paddle/fluid/operators/compare_op.cc @@ -83,7 +83,7 @@ class CompareOp : public framework::OperatorWithKernel { } // namespace operators } // namespace paddle -#define REGISTER_LOGICAL_OP(op_type, _equation) \ +#define REGISTER_COMPARE_OP(op_type, _equation) \ struct _##op_type##Comment { \ static char type[]; \ static char equation[]; \ @@ -96,11 +96,11 @@ class CompareOp : public framework::OperatorWithKernel { ::paddle::operators::CompareOpInferShape<_##op_type##Comment>, \ ::paddle::framework::EmptyGradOpMaker); -REGISTER_LOGICAL_OP(less_than, "Out = X < Y"); -REGISTER_LOGICAL_KERNEL(less_than, CPU, paddle::operators::LessThanFunctor); -REGISTER_LOGICAL_OP(less_equal, "Out = X <= Y"); -REGISTER_LOGICAL_KERNEL(less_equal, CPU, paddle::operators::LessEqualFunctor); -REGISTER_LOGICAL_OP(equal, "Out = X == Y"); -REGISTER_LOGICAL_KERNEL(equal, CPU, paddle::operators::EqualFunctor); -REGISTER_LOGICAL_OP(not_equal, "Out = X != Y"); -REGISTER_LOGICAL_KERNEL(not_equal, CPU, paddle::operators::NotEqualFunctor); +REGISTER_COMPARE_OP(less_than, "Out = X < Y"); +REGISTER_COMPARE_KERNEL(less_than, CPU, paddle::operators::LessThanFunctor); +REGISTER_COMPARE_OP(less_equal, "Out = X <= Y"); +REGISTER_COMPARE_KERNEL(less_equal, CPU, paddle::operators::LessEqualFunctor); +REGISTER_COMPARE_OP(equal, "Out = X == Y"); +REGISTER_COMPARE_KERNEL(equal, CPU, paddle::operators::EqualFunctor); +REGISTER_COMPARE_OP(not_equal, "Out = X != Y"); +REGISTER_COMPARE_KERNEL(not_equal, CPU, paddle::operators::NotEqualFunctor); diff --git a/paddle/fluid/operators/compare_op.cu b/paddle/fluid/operators/compare_op.cu index 2cc0c7c572..c6c83b44b8 100644 --- a/paddle/fluid/operators/compare_op.cu +++ b/paddle/fluid/operators/compare_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/compare_op.h" -REGISTER_LOGICAL_KERNEL(less_than, CUDA, paddle::operators::LessThanFunctor); -REGISTER_LOGICAL_KERNEL(less_equal, CUDA, paddle::operators::LessEqualFunctor); -REGISTER_LOGICAL_KERNEL(equal, CUDA, paddle::operators::EqualFunctor); -REGISTER_LOGICAL_KERNEL(not_equal, CUDA, paddle::operators::NotEqualFunctor); +REGISTER_COMPARE_KERNEL(less_than, CUDA, paddle::operators::LessThanFunctor); +REGISTER_COMPARE_KERNEL(less_equal, CUDA, paddle::operators::LessEqualFunctor); +REGISTER_COMPARE_KERNEL(equal, CUDA, paddle::operators::EqualFunctor); +REGISTER_COMPARE_KERNEL(not_equal, CUDA, paddle::operators::NotEqualFunctor); diff --git a/paddle/fluid/operators/compare_op.h b/paddle/fluid/operators/compare_op.h index 7e78269cf4..6638e5ae9e 100644 --- a/paddle/fluid/operators/compare_op.h +++ b/paddle/fluid/operators/compare_op.h @@ -76,7 +76,7 @@ class CompareOpKernel } // namespace operators } // namespace paddle -#define REGISTER_LOGICAL_KERNEL(op_type, dev, functor) \ +#define REGISTER_COMPARE_KERNEL(op_type, dev, functor) \ REGISTER_OP_##dev##_KERNEL( \ op_type, ::paddle::operators::CompareOpKernel< \ ::paddle::platform::dev##DeviceContext, functor>, \ -- GitLab From ff544169318af1ade478602fccf977c34354f495 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sat, 24 Feb 2018 12:51:24 +0800 Subject: [PATCH 156/217] Add unittest of unique_name * Also follow comments, change prefix to key --- .../fluid/tests/unittests/test_unique_name.py | 43 +++++++++++++++++++ python/paddle/v2/fluid/unique_name.py | 4 +- 2 files changed, 45 insertions(+), 2 deletions(-) create mode 100644 python/paddle/v2/fluid/tests/unittests/test_unique_name.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_unique_name.py b/python/paddle/v2/fluid/tests/unittests/test_unique_name.py new file mode 100644 index 0000000000..e28810c96b --- /dev/null +++ b/python/paddle/v2/fluid/tests/unittests/test_unique_name.py @@ -0,0 +1,43 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import paddle.v2.fluid as fluid + + +class TestUniqueName(unittest.TestCase): + def test_guard(self): + with fluid.unique_name.guard(): + name_1 = fluid.unique_name.generate('') + + with fluid.unique_name.guard(): + name_2 = fluid.unique_name.generate('') + + self.assertEqual(name_1, name_2) + + with fluid.unique_name.guard("A"): + name_1 = fluid.unique_name.generate('') + + with fluid.unique_name.guard('B'): + name_2 = fluid.unique_name.generate('') + + self.assertNotEqual(name_1, name_2) + + def test_generate(self): + with fluid.unique_name.guard(): + name1 = fluid.unique_name.generate('fc') + name2 = fluid.unique_name.generate('fc') + name3 = fluid.unique_name.generate('tmp') + self.assertNotEqual(name1, name2) + self.assertEqual(name1[-2:], name3[-2:]) diff --git a/python/paddle/v2/fluid/unique_name.py b/python/paddle/v2/fluid/unique_name.py index 034caac962..33c53113ae 100644 --- a/python/paddle/v2/fluid/unique_name.py +++ b/python/paddle/v2/fluid/unique_name.py @@ -51,8 +51,8 @@ class UniqueNameGenerator(object): generator = UniqueNameGenerator() -def generate(prefix): - return generator(prefix) +def generate(key): + return generator(key) def switch(new_generator=None): -- GitLab From 63563b2fec9be26af1c73c97e94d455a1f45f5d0 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sat, 24 Feb 2018 13:00:21 +0800 Subject: [PATCH 157/217] Follow comments --- python/paddle/v2/fluid/__init__.py | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/python/paddle/v2/fluid/__init__.py b/python/paddle/v2/fluid/__init__.py index 032dd2683d..39d13d3ab5 100644 --- a/python/paddle/v2/fluid/__init__.py +++ b/python/paddle/v2/fluid/__init__.py @@ -44,11 +44,27 @@ import unique_name Tensor = LoDTensor __all__ = framework.__all__ + executor.__all__ + concurrency.__all__ + [ - 'io', 'initializer', 'layers', 'nets', 'optimizer', 'learning_rate_decay', - 'backward', 'regularizer', 'LoDTensor', 'CPUPlace', 'CUDAPlace', 'Tensor', - 'ParamAttr', 'WeightNormParamAttr', 'DataFeeder', 'clip', - 'SimpleDistributeTranspiler', 'DistributeTranspiler', 'memory_optimize', - 'profiler', 'unique_name' + 'io', + 'initializer', + 'layers', + 'nets', + 'optimizer', + 'learning_rate_decay', + 'backward', + 'regularizer', + 'LoDTensor', + 'CPUPlace', + 'CUDAPlace', + 'Tensor', + 'ParamAttr', + 'WeightNormParamAttr', + 'DataFeeder', + 'clip', + 'SimpleDistributeTranspiler', + 'DistributeTranspiler', + 'memory_optimize', + 'profiler', + 'unique_name', ] -- GitLab From d4e3495cf5978fff9eb80b944c948598cdf48d3a Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sat, 24 Feb 2018 13:07:46 +0800 Subject: [PATCH 158/217] add larger_than and larger_equal op and kernel --- paddle/fluid/operators/compare_op.cc | 5 +++++ paddle/fluid/operators/compare_op.cu | 4 ++++ paddle/fluid/operators/compare_op.h | 12 ++++++++++++ 3 files changed, 21 insertions(+) diff --git a/paddle/fluid/operators/compare_op.cc b/paddle/fluid/operators/compare_op.cc index 46d6d0fd86..a5f40d5482 100644 --- a/paddle/fluid/operators/compare_op.cc +++ b/paddle/fluid/operators/compare_op.cc @@ -100,6 +100,11 @@ REGISTER_COMPARE_OP(less_than, "Out = X < Y"); REGISTER_COMPARE_KERNEL(less_than, CPU, paddle::operators::LessThanFunctor); REGISTER_COMPARE_OP(less_equal, "Out = X <= Y"); REGISTER_COMPARE_KERNEL(less_equal, CPU, paddle::operators::LessEqualFunctor); +REGISTER_COMPARE_OP(larger_than, "Out = X > Y"); +REGISTER_COMPARE_KERNEL(larger_than, CPU, paddle::operators::LargerThanFunctor); +REGISTER_COMPARE_OP(larger_equal, "Out = X >= Y"); +REGISTER_COMPARE_KERNEL(larger_equal, CPU, + paddle::operators::LargerEqualFunctor); REGISTER_COMPARE_OP(equal, "Out = X == Y"); REGISTER_COMPARE_KERNEL(equal, CPU, paddle::operators::EqualFunctor); REGISTER_COMPARE_OP(not_equal, "Out = X != Y"); diff --git a/paddle/fluid/operators/compare_op.cu b/paddle/fluid/operators/compare_op.cu index c6c83b44b8..32d92a9066 100644 --- a/paddle/fluid/operators/compare_op.cu +++ b/paddle/fluid/operators/compare_op.cu @@ -16,5 +16,9 @@ limitations under the License. */ REGISTER_COMPARE_KERNEL(less_than, CUDA, paddle::operators::LessThanFunctor); REGISTER_COMPARE_KERNEL(less_equal, CUDA, paddle::operators::LessEqualFunctor); +REGISTER_COMPARE_KERNEL(larger_than, CUDA, + paddle::operators::LargerThanFunctor); +REGISTER_COMPARE_KERNEL(larger_equal, CUDA, + paddle::operators::LargerEqualFunctor); REGISTER_COMPARE_KERNEL(equal, CUDA, paddle::operators::EqualFunctor); REGISTER_COMPARE_KERNEL(not_equal, CUDA, paddle::operators::NotEqualFunctor); diff --git a/paddle/fluid/operators/compare_op.h b/paddle/fluid/operators/compare_op.h index 6638e5ae9e..b4546f27b1 100644 --- a/paddle/fluid/operators/compare_op.h +++ b/paddle/fluid/operators/compare_op.h @@ -34,6 +34,18 @@ struct LessEqualFunctor { HOSTDEVICE bool operator()(const T& a, const T& b) const { return a <= b; } }; +template +struct LargerThanFunctor { + using ELEM_TYPE = T; + HOSTDEVICE bool operator()(const T& a, const T& b) const { return a > b; } +}; + +template +struct LargerEqualFunctor { + using ELEM_TYPE = T; + HOSTDEVICE bool operator()(const T& a, const T& b) const { return a >= b; } +}; + template struct EqualFunctor { using ELEM_TYPE = T; -- GitLab From 28d07e3cb88737a0abe95d3ed4b4660e3a832dc2 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sat, 24 Feb 2018 13:15:05 +0800 Subject: [PATCH 159/217] add python part of compare op --- paddle/fluid/operators/compare_op.cc | 11 ++++++----- paddle/fluid/operators/compare_op.cu | 8 ++++---- paddle/fluid/operators/compare_op.h | 4 ++-- python/paddle/v2/fluid/layers/math_op_patch.py | 4 +++- .../v2/fluid/tests/unittests/test_compare_op.py | 3 +++ 5 files changed, 18 insertions(+), 12 deletions(-) diff --git a/paddle/fluid/operators/compare_op.cc b/paddle/fluid/operators/compare_op.cc index a5f40d5482..86f7046058 100644 --- a/paddle/fluid/operators/compare_op.cc +++ b/paddle/fluid/operators/compare_op.cc @@ -100,11 +100,12 @@ REGISTER_COMPARE_OP(less_than, "Out = X < Y"); REGISTER_COMPARE_KERNEL(less_than, CPU, paddle::operators::LessThanFunctor); REGISTER_COMPARE_OP(less_equal, "Out = X <= Y"); REGISTER_COMPARE_KERNEL(less_equal, CPU, paddle::operators::LessEqualFunctor); -REGISTER_COMPARE_OP(larger_than, "Out = X > Y"); -REGISTER_COMPARE_KERNEL(larger_than, CPU, paddle::operators::LargerThanFunctor); -REGISTER_COMPARE_OP(larger_equal, "Out = X >= Y"); -REGISTER_COMPARE_KERNEL(larger_equal, CPU, - paddle::operators::LargerEqualFunctor); +REGISTER_COMPARE_OP(greater_than, "Out = X > Y"); +REGISTER_COMPARE_KERNEL(greater_than, CPU, + paddle::operators::GreaterThanFunctor); +REGISTER_COMPARE_OP(greater_equal, "Out = X >= Y"); +REGISTER_COMPARE_KERNEL(greater_equal, CPU, + paddle::operators::GreaterEqualFunctor); REGISTER_COMPARE_OP(equal, "Out = X == Y"); REGISTER_COMPARE_KERNEL(equal, CPU, paddle::operators::EqualFunctor); REGISTER_COMPARE_OP(not_equal, "Out = X != Y"); diff --git a/paddle/fluid/operators/compare_op.cu b/paddle/fluid/operators/compare_op.cu index 32d92a9066..1bf85c64fb 100644 --- a/paddle/fluid/operators/compare_op.cu +++ b/paddle/fluid/operators/compare_op.cu @@ -16,9 +16,9 @@ limitations under the License. */ REGISTER_COMPARE_KERNEL(less_than, CUDA, paddle::operators::LessThanFunctor); REGISTER_COMPARE_KERNEL(less_equal, CUDA, paddle::operators::LessEqualFunctor); -REGISTER_COMPARE_KERNEL(larger_than, CUDA, - paddle::operators::LargerThanFunctor); -REGISTER_COMPARE_KERNEL(larger_equal, CUDA, - paddle::operators::LargerEqualFunctor); +REGISTER_COMPARE_KERNEL(greater_than, CUDA, + paddle::operators::GreaterThanFunctor); +REGISTER_COMPARE_KERNEL(greater_equal, CUDA, + paddle::operators::GreaterEqualFunctor); REGISTER_COMPARE_KERNEL(equal, CUDA, paddle::operators::EqualFunctor); REGISTER_COMPARE_KERNEL(not_equal, CUDA, paddle::operators::NotEqualFunctor); diff --git a/paddle/fluid/operators/compare_op.h b/paddle/fluid/operators/compare_op.h index b4546f27b1..1cbabdaf67 100644 --- a/paddle/fluid/operators/compare_op.h +++ b/paddle/fluid/operators/compare_op.h @@ -35,13 +35,13 @@ struct LessEqualFunctor { }; template -struct LargerThanFunctor { +struct GreaterThanFunctor { using ELEM_TYPE = T; HOSTDEVICE bool operator()(const T& a, const T& b) const { return a > b; } }; template -struct LargerEqualFunctor { +struct GreaterEqualFunctor { using ELEM_TYPE = T; HOSTDEVICE bool operator()(const T& a, const T& b) const { return a >= b; } }; diff --git a/python/paddle/v2/fluid/layers/math_op_patch.py b/python/paddle/v2/fluid/layers/math_op_patch.py index 417a01b76f..c92eb94f09 100644 --- a/python/paddle/v2/fluid/layers/math_op_patch.py +++ b/python/paddle/v2/fluid/layers/math_op_patch.py @@ -157,7 +157,9 @@ def monkey_patch_variable(): ("__eq__", "equal", False), ("__ne__", "not_equal", False), ("__lt__", "less_than", False), - ("__le__", "less_equal", False)): + ("__le__", "less_equal", False), + ("__gt__", "greater_than", False), + ("__ge__", "greater_equal", False)): setattr(Variable, method_name, _elemwise_method_creator_(method_name, op_type, reverse)) diff --git a/python/paddle/v2/fluid/tests/unittests/test_compare_op.py b/python/paddle/v2/fluid/tests/unittests/test_compare_op.py index 83d57639ca..405afebae8 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_compare_op.py +++ b/python/paddle/v2/fluid/tests/unittests/test_compare_op.py @@ -38,7 +38,10 @@ def create_test_class(op_type, typename, callback): for _type_name in {'float32', 'float64', 'int32', 'int64'}: create_test_class('less_than', _type_name, lambda _a, _b: _a < _b) create_test_class('less_equal', _type_name, lambda _a, _b: _a <= _b) + create_test_class('greater_than', _type_name, lambda _a, _b: _a > _b) + create_test_class('greater_equal', _type_name, lambda _a, _b: _a >= _b) create_test_class('equal', _type_name, lambda _a, _b: _a == _b) + create_test_class('not_equal', _type_name, lambda _a, _b: _a != _b) if __name__ == '__main__': unittest.main() -- GitLab From cf7c745c486bfc89543c6c9c6abc19ce1374548b Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Sat, 24 Feb 2018 14:49:04 +0800 Subject: [PATCH 160/217] change type of BN's 'mean' and 'variance' from persistable variable to Parameter --- python/paddle/v2/fluid/layers/nn.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index e8b4cec6ee..3453dd945d 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -1519,21 +1519,21 @@ def batch_norm(input, bias = helper.create_parameter( attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True) - mean = helper.create_global_variable( - name=moving_mean_name, - dtype=input.dtype, + mean = helper.create_parameter( + attr=ParamAttr( + name=moving_mean_name, initializer=Constant(0.0), trainable=False), shape=param_shape, - persistable=True, - stop_gradient=True) - helper.set_variable_initializer(var=mean, initializer=Constant(0.0)) + dtype=input.dtype) + mean.stop_gradient = True - variance = helper.create_global_variable( - name=moving_variance_name, - dtype=input.dtype, + variance = helper.create_parameter( + attr=ParamAttr( + name=moving_variance_name, + initializer=Constant(1.0), + trainable=False), shape=param_shape, - persistable=True, - stop_gradient=True) - helper.set_variable_initializer(var=variance, initializer=Constant(1.0)) + dtype=input.dtype) + variance.stop_gradient = True # create output # mean and mean_out share the same memory -- GitLab From 9b5be6ef43c31ccb6f9c306c0c44a8f19a72a24f Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Sat, 24 Feb 2018 15:13:18 +0800 Subject: [PATCH 161/217] fix short connection again --- paddle/fluid/framework/executor.cc | 6 +++--- paddle/fluid/framework/framework.proto | 5 ++++- paddle/fluid/operators/detail/grpc_client.cc | 4 ++-- paddle/fluid/operators/nccl_op.cc | 2 +- paddle/fluid/operators/send_op.cc | 20 ++++++++++++++++++- paddle/fluid/pybind/protobuf.cc | 2 +- .../paddle/v2/fluid/distribute_transpiler.py | 3 +-- 7 files changed, 31 insertions(+), 11 deletions(-) diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index 0d2691e811..88863ab99e 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -58,13 +58,13 @@ static void CreateTensor(Variable* var, proto::VarType::Type var_type) { var->GetMutable(); } else if (var_type == proto::VarType::CHANNEL) { var->GetMutable(); - } else if (var_type == proto::VarType::NCCL_COM) { - // GetMutable will be called in ncclInit + } else if (var_type == proto::VarType::RAW) { + // GetMutable will be called in operator } else { PADDLE_THROW( "Variable type %d is not in " "[LOD_TENSOR, SELECTED_ROWS, FEED_MINIBATCH, FETCH_LIST, " - "LOD_RANK_TABLE, PLACE_LIST, READER, CHANNEL, NCCL_COM]", + "LOD_RANK_TABLE, PLACE_LIST, READER, CHANNEL, RAW]", var_type); } } diff --git a/paddle/fluid/framework/framework.proto b/paddle/fluid/framework/framework.proto index 5b43f5a8a4..23064541a0 100644 --- a/paddle/fluid/framework/framework.proto +++ b/paddle/fluid/framework/framework.proto @@ -113,7 +113,10 @@ message VarType { PLACE_LIST = 14; READER = 15; CHANNEL = 16; - NCCL_COM = 17; + // Any runtime decided variable type is raw + // raw variables should manage their own allocations + // in operators likc nccl_op + RAW = 17; } required Type type = 1; diff --git a/paddle/fluid/operators/detail/grpc_client.cc b/paddle/fluid/operators/detail/grpc_client.cc index ee9044b1f5..7266f32764 100644 --- a/paddle/fluid/operators/detail/grpc_client.cc +++ b/paddle/fluid/operators/detail/grpc_client.cc @@ -177,8 +177,8 @@ std::shared_ptr RPCClient::GetChannel(const std::string& ep) { args.SetMaxSendMessageSize(std::numeric_limits::max()); args.SetMaxReceiveMessageSize(std::numeric_limits::max()); - auto ch = std::shared_ptr( - grpc::CreateCustomChannel(ep, grpc::InsecureChannelCredentials(), args)); + auto ch = + grpc::CreateCustomChannel(ep, grpc::InsecureChannelCredentials(), args); channels_[ep] = ch; return ch; diff --git a/paddle/fluid/operators/nccl_op.cc b/paddle/fluid/operators/nccl_op.cc index 0994bba782..9185666c56 100644 --- a/paddle/fluid/operators/nccl_op.cc +++ b/paddle/fluid/operators/nccl_op.cc @@ -65,7 +65,7 @@ class NCCLInitOpVarTypeInference : public framework::VarTypeInference { framework::BlockDesc *block) const override { auto out_var_name = op_desc.Output("Communicator").front(); auto &out_var = block->FindRecursiveOrCreateVar(out_var_name); - auto var_type = framework::proto::VarType::NCCL_COM; + auto var_type = framework::proto::VarType::RAW; out_var.SetType(var_type); } }; diff --git a/paddle/fluid/operators/send_op.cc b/paddle/fluid/operators/send_op.cc index 58850bf566..178976f96f 100644 --- a/paddle/fluid/operators/send_op.cc +++ b/paddle/fluid/operators/send_op.cc @@ -121,9 +121,27 @@ This operator will send tensor to recv_op at the parameter server. } }; +class SendOpVarTypeInference : public framework::VarTypeInference { + public: + void operator()(const framework::OpDesc& op_desc, + framework::BlockDesc* block) const override { + auto out_var_name = op_desc.Output("RPCClient").front(); + auto& out_var = block->FindRecursiveOrCreateVar(out_var_name); + auto var_type = framework::proto::VarType::RAW; + out_var.SetType(var_type); + } +}; + +class SendOpShapeInference : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext* ctx) const override {} +}; + } // namespace operators } // namespace paddle namespace ops = paddle::operators; -REGISTER_OPERATOR(send, ops::SendOp, ops::SendOpMaker); +REGISTER_OPERATOR(send, ops::SendOp, paddle::framework::EmptyGradOpMaker, + ops::SendOpMaker, ops::SendOpVarTypeInference, + ops::SendOpShapeInference); diff --git a/paddle/fluid/pybind/protobuf.cc b/paddle/fluid/pybind/protobuf.cc index b725be7952..b0a2497d91 100644 --- a/paddle/fluid/pybind/protobuf.cc +++ b/paddle/fluid/pybind/protobuf.cc @@ -252,7 +252,7 @@ void BindVarDsec(py::module &m) { .value("CHANNEL", proto::VarType::CHANNEL) .value("PLACE_LIST", proto::VarType::PLACE_LIST) .value("READER", proto::VarType::READER) - .value("NCCL_COM", proto::VarType::NCCL_COM); + .value("RAW", proto::VarType::RAW); } void BindOpDesc(py::module &m) { diff --git a/python/paddle/v2/fluid/distribute_transpiler.py b/python/paddle/v2/fluid/distribute_transpiler.py index 2fcf3753c5..8da9ca290b 100644 --- a/python/paddle/v2/fluid/distribute_transpiler.py +++ b/python/paddle/v2/fluid/distribute_transpiler.py @@ -226,8 +226,7 @@ class DistributeTranspiler: rpc_client_var = program.global_block().create_var( name="RPC_CLIENT_VAR", persistable=True, - dtype='float32', # dtype and shape is not used in fact - shape=[0]) + type=core.VarDesc.VarType.RAW) # create send_op program.global_block().append_op( -- GitLab From c4e8f6c65ba655afa367ceaf8bd7a493c2780a89 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Sat, 24 Feb 2018 15:17:02 +0800 Subject: [PATCH 162/217] add an introduction of API --- doc/api/index_cn.rst | 10 ---------- doc/api/index_en.rst | 1 + doc/api/overview.rst | 4 ++++ 3 files changed, 5 insertions(+), 10 deletions(-) delete mode 100644 doc/api/index_cn.rst create mode 100644 doc/api/overview.rst diff --git a/doc/api/index_cn.rst b/doc/api/index_cn.rst deleted file mode 100644 index 84f9097a6c..0000000000 --- a/doc/api/index_cn.rst +++ /dev/null @@ -1,10 +0,0 @@ -API -=== - -.. toctree:: - :maxdepth: 1 - - 模型配置 - 数据访问 - 训练与应用 - v2/fluid.rst diff --git a/doc/api/index_en.rst b/doc/api/index_en.rst index e6f632e1a5..77337982be 100644 --- a/doc/api/index_en.rst +++ b/doc/api/index_en.rst @@ -4,6 +4,7 @@ API .. toctree:: :maxdepth: 1 + overview.rst v2/model_configs.rst v2/data.rst v2/run_logic.rst diff --git a/doc/api/overview.rst b/doc/api/overview.rst new file mode 100644 index 0000000000..953d2db2b3 --- /dev/null +++ b/doc/api/overview.rst @@ -0,0 +1,4 @@ +API Overview +============ + +TBD -- GitLab From 3959023099f25c590ec72f701976c7b4e1233174 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sat, 24 Feb 2018 16:28:13 +0800 Subject: [PATCH 163/217] Enhance layer_function_generator * Generated functions can take `*args` as inputs. --- .../v2/fluid/layers/layer_function_generator.py | 5 ++++- python/paddle/v2/fluid/layers/tensor.py | 4 ++-- .../v2/fluid/tests/book/notest_rnn_encoder_decoer.py | 2 +- python/paddle/v2/fluid/tests/book/test_fit_a_line.py | 2 +- .../v2/fluid/tests/book/test_image_classification.py | 2 +- .../v2/fluid/tests/book/test_label_semantic_roles.py | 2 +- .../v2/fluid/tests/book/test_machine_translation.py | 2 +- .../v2/fluid/tests/book/test_recognize_digits.py | 6 +++--- .../v2/fluid/tests/book/test_recommender_system.py | 2 +- .../v2/fluid/tests/book/test_understand_sentiment.py | 10 +++++----- python/paddle/v2/fluid/tests/book/test_word2vec.py | 4 ++-- .../tests/book_distribute/notest_dist_fit_a_line.py | 2 +- .../notest_dist_image_classification.py | 2 +- .../notest_dist_label_semantic_roles.py | 2 +- .../tests/book_distribute/notest_dist_word2vec.py | 2 +- .../book_distribute/notest_machine_translation.py | 2 +- .../notest_recognize_digits_conv_dist.py | 2 +- .../notest_recognize_digits_mlp_dist.py | 2 +- .../notest_recommender_system_dist.py | 2 +- .../notest_understand_sentiment_conv_dist.py | 2 +- .../notest_understand_sentiment_dynamic_lstm.py | 2 +- .../test_memopt_fit_a_line.py | 2 +- .../test_memopt_image_classification_train.py | 2 +- .../test_memopt_machine_translation.py | 2 +- python/paddle/v2/fluid/tests/demo/fc_gan.py | 4 ++-- python/paddle/v2/fluid/tests/test_error_clip.py | 2 +- python/paddle/v2/fluid/tests/test_gradient_clip.py | 2 +- .../paddle/v2/fluid/tests/test_mnist_if_else_op.py | 4 ++-- .../tests/unittests/test_array_read_write_op.py | 12 ++++++------ .../v2/fluid/tests/unittests/test_calc_gradient.py | 2 +- .../fluid/tests/unittests/test_conditional_block.py | 2 +- .../paddle/v2/fluid/tests/unittests/test_dyn_rnn.py | 4 ++-- .../tests/unittests/test_dynrnn_gradient_check.py | 4 ++-- .../tests/unittests/test_dynrnn_static_input.py | 2 +- .../fluid/tests/unittests/test_inference_model_io.py | 2 +- .../paddle/v2/fluid/tests/unittests/test_layers.py | 10 +++++----- .../tests/unittests/test_lod_tensor_array_ops.py | 2 +- .../unittests/test_memory_optimization_transpiler.py | 2 +- .../v2/fluid/tests/unittests/test_parallel_op.py | 6 +++--- .../paddle/v2/fluid/tests/unittests/test_print_op.py | 2 +- .../paddle/v2/fluid/tests/unittests/test_profiler.py | 2 +- .../v2/fluid/tests/unittests/test_recurrent_op.py | 8 ++++---- .../paddle/v2/fluid/tests/unittests/test_registry.py | 2 +- .../fluid/tests/unittests/test_shrink_rnn_memory.py | 2 +- .../unittests/test_split_and_merge_lod_tensor_op.py | 2 +- .../paddle/v2/fluid/tests/unittests/test_while_op.py | 2 +- 46 files changed, 75 insertions(+), 72 deletions(-) diff --git a/python/paddle/v2/fluid/layers/layer_function_generator.py b/python/paddle/v2/fluid/layers/layer_function_generator.py index 88c9ae31b7..16a401dc7b 100644 --- a/python/paddle/v2/fluid/layers/layer_function_generator.py +++ b/python/paddle/v2/fluid/layers/layer_function_generator.py @@ -155,7 +155,7 @@ def generate_layer_fn(op_type): return dtype - def func(**kwargs): + def func(*args, **kwargs): helper = LayerHelper(op_type, **kwargs) dtype = infer_and_check_dtype(op_proto, **kwargs) @@ -166,6 +166,9 @@ def generate_layer_fn(op_type): val = kwargs.pop(name, []) if not isinstance(val, list) and not isinstance(val, tuple): val = [val] + if len(val) == 0 and len(args) != 0: + val = args[0] + args = args[1:] inputs[ipt.name] = val outputs = dict() diff --git a/python/paddle/v2/fluid/layers/tensor.py b/python/paddle/v2/fluid/layers/tensor.py index 97e8f082cf..8100e8f034 100644 --- a/python/paddle/v2/fluid/layers/tensor.py +++ b/python/paddle/v2/fluid/layers/tensor.py @@ -160,8 +160,8 @@ def sums(input, out=None): a0 = layers.array_read(array=tmp, i=i) i = layers.increment(x=i) a1 = layers.array_read(array=tmp, i=i) - mean_a0 = layers.mean(x=a0) - mean_a1 = layers.mean(x=a1) + mean_a0 = layers.mean(a0) + mean_a1 = layers.mean(a1) a_sum = layers.sums(input=[mean_a0, mean_a1]) """ helper = LayerHelper('sum', **locals()) diff --git a/python/paddle/v2/fluid/tests/book/notest_rnn_encoder_decoer.py b/python/paddle/v2/fluid/tests/book/notest_rnn_encoder_decoer.py index c7db70f1b1..0054bb6bec 100644 --- a/python/paddle/v2/fluid/tests/book/notest_rnn_encoder_decoer.py +++ b/python/paddle/v2/fluid/tests/book/notest_rnn_encoder_decoer.py @@ -147,7 +147,7 @@ def seq_to_seq_net(): label = fluid.layers.data( name='label_sequence', shape=[1], dtype='int64', lod_level=1) cost = fluid.layers.cross_entropy(input=prediction, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) return avg_cost, prediction diff --git a/python/paddle/v2/fluid/tests/book/test_fit_a_line.py b/python/paddle/v2/fluid/tests/book/test_fit_a_line.py index a66c2c3c2f..77cffd4de9 100644 --- a/python/paddle/v2/fluid/tests/book/test_fit_a_line.py +++ b/python/paddle/v2/fluid/tests/book/test_fit_a_line.py @@ -29,7 +29,7 @@ def train(use_cuda, save_dirname): y = fluid.layers.data(name='y', shape=[1], dtype='float32') cost = fluid.layers.square_error_cost(input=y_predict, label=y) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) sgd_optimizer.minimize(avg_cost) diff --git a/python/paddle/v2/fluid/tests/book/test_image_classification.py b/python/paddle/v2/fluid/tests/book/test_image_classification.py index 734ab3e4fb..b99a4285aa 100644 --- a/python/paddle/v2/fluid/tests/book/test_image_classification.py +++ b/python/paddle/v2/fluid/tests/book/test_image_classification.py @@ -110,7 +110,7 @@ def train(net_type, use_cuda, save_dirname): predict = fluid.layers.fc(input=net, size=classdim, act='softmax') cost = fluid.layers.cross_entropy(input=predict, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) acc = fluid.layers.accuracy(input=predict, label=label) # Test program diff --git a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py index b790246ec1..e513a658ff 100644 --- a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py @@ -164,7 +164,7 @@ def train(use_cuda, save_dirname=None): label=target, param_attr=fluid.ParamAttr( name='crfw', learning_rate=mix_hidden_lr)) - avg_cost = fluid.layers.mean(x=crf_cost) + avg_cost = fluid.layers.mean(crf_cost) # TODO(qiao) # check other optimizers and check why out will be NAN diff --git a/python/paddle/v2/fluid/tests/book/test_machine_translation.py b/python/paddle/v2/fluid/tests/book/test_machine_translation.py index d3405a9601..ee6a5d61df 100644 --- a/python/paddle/v2/fluid/tests/book/test_machine_translation.py +++ b/python/paddle/v2/fluid/tests/book/test_machine_translation.py @@ -178,7 +178,7 @@ def train_main(use_cuda, is_sparse): label = pd.data( name="target_language_next_word", shape=[1], dtype='int64', lod_level=1) cost = pd.cross_entropy(input=rnn_out, label=label) - avg_cost = pd.mean(x=cost) + avg_cost = pd.mean(cost) optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4) optimizer.minimize(avg_cost) diff --git a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py index 2462d425e1..aeeea1177e 100644 --- a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py +++ b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py @@ -48,7 +48,7 @@ BATCH_SIZE = 64 def loss_net(hidden, label): prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') loss = fluid.layers.cross_entropy(input=prediction, label=label) - avg_loss = fluid.layers.mean(x=loss) + avg_loss = fluid.layers.mean(loss) acc = fluid.layers.accuracy(input=prediction, label=label) return prediction, avg_loss, acc @@ -101,8 +101,8 @@ def train(nn_type, use_cuda, parallel, save_dirname, save_param_filename): avg_loss, acc = pd() # get mean loss and acc through every devices. - avg_loss = fluid.layers.mean(x=avg_loss) - acc = fluid.layers.mean(x=acc) + avg_loss = fluid.layers.mean(avg_loss) + acc = fluid.layers.mean(acc) else: prediction, avg_loss, acc = net_conf(img, label) diff --git a/python/paddle/v2/fluid/tests/book/test_recommender_system.py b/python/paddle/v2/fluid/tests/book/test_recommender_system.py index 1a7d8d57ff..a5adc3507b 100644 --- a/python/paddle/v2/fluid/tests/book/test_recommender_system.py +++ b/python/paddle/v2/fluid/tests/book/test_recommender_system.py @@ -147,7 +147,7 @@ def model(): label = layers.data(name='score', shape=[1], dtype='float32') square_cost = layers.square_error_cost(input=scale_infer, label=label) - avg_cost = layers.mean(x=square_cost) + avg_cost = layers.mean(square_cost) return scale_infer, avg_cost diff --git a/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py b/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py index 61f46b51c4..cdd233a5b6 100644 --- a/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py +++ b/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py @@ -42,7 +42,7 @@ def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32, size=class_dim, act="softmax") cost = fluid.layers.cross_entropy(input=prediction, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) accuracy = fluid.layers.accuracy(input=prediction, label=label) return avg_cost, accuracy, prediction @@ -82,7 +82,7 @@ def dyn_rnn_lstm(data, label, input_dim, class_dim=2, emb_dim=32, last = fluid.layers.sequence_last_step(rnn()) prediction = fluid.layers.fc(input=last, size=class_dim, act="softmax") cost = fluid.layers.cross_entropy(input=prediction, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) accuracy = fluid.layers.accuracy(input=prediction, label=label) return avg_cost, accuracy, prediction @@ -119,7 +119,7 @@ def stacked_lstm_net(data, size=class_dim, act='softmax') cost = fluid.layers.cross_entropy(input=prediction, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) accuracy = fluid.layers.accuracy(input=prediction, label=label) return avg_cost, accuracy, prediction @@ -158,8 +158,8 @@ def train(word_dict, net_method, use_cuda, parallel=False, save_dirname=None): pd.write_output(acc) cost, acc = pd() - cost = fluid.layers.mean(x=cost) - acc_out = fluid.layers.mean(x=acc) + cost = fluid.layers.mean(cost) + acc_out = fluid.layers.mean(acc) prediction = None assert save_dirname is None diff --git a/python/paddle/v2/fluid/tests/book/test_word2vec.py b/python/paddle/v2/fluid/tests/book/test_word2vec.py index 9bd8f90c5e..ac23bd7284 100644 --- a/python/paddle/v2/fluid/tests/book/test_word2vec.py +++ b/python/paddle/v2/fluid/tests/book/test_word2vec.py @@ -118,7 +118,7 @@ def train(use_cuda, is_sparse, parallel, save_dirname): size=dict_size, act='softmax') cost = fluid.layers.cross_entropy(input=predict_word, label=words[4]) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) return avg_cost, predict_word word_dict = paddle.dataset.imikolov.build_dict() @@ -143,7 +143,7 @@ def train(use_cuda, is_sparse, parallel, save_dirname): ])) pd.write_output(avg_cost) - avg_cost = fluid.layers.mean(x=pd()) + avg_cost = fluid.layers.mean(pd()) sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) sgd_optimizer.minimize(avg_cost) diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_fit_a_line.py b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_fit_a_line.py index c443c4e0b7..164327d8f0 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_fit_a_line.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_fit_a_line.py @@ -24,7 +24,7 @@ y_predict = fluid.layers.fc(input=x, size=1, act=None) y = fluid.layers.data(name='y', shape=[1], dtype='float32') cost = fluid.layers.square_error_cost(input=y_predict, label=y) -avg_cost = fluid.layers.mean(x=cost) +avg_cost = fluid.layers.mean(cost) sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost) diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_image_classification.py b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_image_classification.py index 298ecfc386..6ba06a6038 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_image_classification.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_image_classification.py @@ -114,7 +114,7 @@ else: predict = fluid.layers.fc(input=net, size=classdim, act='softmax') cost = fluid.layers.cross_entropy(input=predict, label=label) -avg_cost = fluid.layers.mean(x=cost) +avg_cost = fluid.layers.mean(cost) optimizer = fluid.optimizer.Adam(learning_rate=0.001) optimize_ops, params_grads = optimizer.minimize(avg_cost) diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py index 1210bf1d84..fa4bf33cea 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py @@ -154,7 +154,7 @@ def main(): label=target, param_attr=fluid.ParamAttr( name='crfw', learning_rate=mix_hidden_lr)) - avg_cost = fluid.layers.mean(x=crf_cost) + avg_cost = fluid.layers.mean(crf_cost) # TODO(qiao) # check other optimizers and check why out will be NAN diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_word2vec.py b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_word2vec.py index 0d5ad98850..aff4c53ebc 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_word2vec.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_dist_word2vec.py @@ -65,7 +65,7 @@ concat_embed = fluid.layers.concat( hidden1 = fluid.layers.fc(input=concat_embed, size=HIDDEN_SIZE, act='sigmoid') predict_word = fluid.layers.fc(input=hidden1, size=dict_size, act='softmax') cost = fluid.layers.cross_entropy(input=predict_word, label=next_word) -avg_cost = fluid.layers.mean(x=cost) +avg_cost = fluid.layers.mean(cost) sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost) train_reader = paddle.batch( diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_machine_translation.py b/python/paddle/v2/fluid/tests/book_distribute/notest_machine_translation.py index 15d2d40979..5406bd9113 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_machine_translation.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_machine_translation.py @@ -94,7 +94,7 @@ def main(): label = layers.data( name="target_language_next_word", shape=[1], dtype='int64', lod_level=1) cost = layers.cross_entropy(input=rnn_out, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4) optimize_ops, params_grads = optimizer.minimize(avg_cost) diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py b/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py index 1c1fffc589..f6623099cb 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py @@ -37,7 +37,7 @@ conv_pool_2 = fluid.nets.simple_img_conv_pool( predict = fluid.layers.fc(input=conv_pool_2, size=10, act="softmax") cost = fluid.layers.cross_entropy(input=predict, label=label) -avg_cost = fluid.layers.mean(x=cost) +avg_cost = fluid.layers.mean(cost) optimizer = fluid.optimizer.Adam(learning_rate=0.01) optimize_ops, params_grads = optimizer.minimize(avg_cost) diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py b/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py index c442ada6e3..f2d32cb99d 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py @@ -32,7 +32,7 @@ predict = fluid.layers.fc(input=hidden2, size=10, act='softmax') label = fluid.layers.data(name='y', shape=[1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict, label=label) -avg_cost = fluid.layers.mean(x=cost) +avg_cost = fluid.layers.mean(cost) optimizer = fluid.optimizer.Momentum(learning_rate=0.001, momentum=0.9) optimize_ops, params_grads = optimizer.minimize(avg_cost) diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_recommender_system_dist.py b/python/paddle/v2/fluid/tests/book_distribute/notest_recommender_system_dist.py index 363c7102c7..907b09a38b 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_recommender_system_dist.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_recommender_system_dist.py @@ -117,7 +117,7 @@ def model(): label = layers.data(name='score', shape=[1], dtype='float32') square_cost = layers.square_error_cost(input=scale_infer, label=label) - avg_cost = layers.mean(x=square_cost) + avg_cost = layers.mean(square_cost) return avg_cost diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py b/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py index c5c0856c31..f95b4a9a02 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py @@ -38,7 +38,7 @@ def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32, size=class_dim, act="softmax") cost = fluid.layers.cross_entropy(input=prediction, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) adam_optimizer = fluid.optimizer.Adam(learning_rate=0.002) optimize_ops, params_grads = adam_optimizer.minimize(avg_cost) accuracy = fluid.evaluator.Accuracy(input=prediction, label=label) diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py b/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py index 99e2c2bbac..5212319435 100644 --- a/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py @@ -49,7 +49,7 @@ def stacked_lstm_net(data, size=class_dim, act='softmax') cost = fluid.layers.cross_entropy(input=prediction, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) adam_optimizer = fluid.optimizer.Adam(learning_rate=0.002) optimize_ops, params_grads = adam_optimizer.minimize(avg_cost) accuracy = fluid.evaluator.Accuracy(input=prediction, label=label) diff --git a/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py b/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py index 944f8af086..04ab2d1d07 100644 --- a/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py +++ b/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py @@ -30,7 +30,7 @@ y_predict = fluid.layers.fc(input=x, size=1, act=None) y = fluid.layers.data(name='y', shape=[1], dtype='float32') cost = fluid.layers.square_error_cost(input=y_predict, label=y) -avg_cost = fluid.layers.mean(x=cost) +avg_cost = fluid.layers.mean(cost) sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1) sgd_optimizer.minimize(avg_cost) diff --git a/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py b/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py index a556904107..307e6035f4 100644 --- a/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py +++ b/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py @@ -117,7 +117,7 @@ else: predict = fluid.layers.fc(input=net, size=classdim, act='softmax') cost = fluid.layers.cross_entropy(input=predict, label=label) -avg_cost = fluid.layers.mean(x=cost) +avg_cost = fluid.layers.mean(cost) optimizer = fluid.optimizer.Adam(learning_rate=0.001) opts = optimizer.minimize(avg_cost) diff --git a/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py b/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py index 4c1eae861b..3de46e8c27 100644 --- a/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py +++ b/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py @@ -100,7 +100,7 @@ def main(): label = layers.data( name="target_language_next_word", shape=[1], dtype='int64', lod_level=1) cost = layers.cross_entropy(input=rnn_out, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4) optimizer.minimize(avg_cost) diff --git a/python/paddle/v2/fluid/tests/demo/fc_gan.py b/python/paddle/v2/fluid/tests/demo/fc_gan.py index 67921db04a..a0d3721ea4 100644 --- a/python/paddle/v2/fluid/tests/demo/fc_gan.py +++ b/python/paddle/v2/fluid/tests/demo/fc_gan.py @@ -96,7 +96,7 @@ def main(): x=D(img), label=fluid.layers.data( name='label', shape=[1], dtype='float32')) - d_loss = fluid.layers.mean(x=d_loss) + d_loss = fluid.layers.mean(d_loss) with fluid.program_guard(dg_program, startup_program): noise = fluid.layers.data( @@ -107,7 +107,7 @@ def main(): x=D(g_img), label=fluid.layers.fill_constant_batch_size_like( input=noise, dtype='float32', shape=[-1, 1], value=1.0)) - dg_loss = fluid.layers.mean(x=dg_loss) + dg_loss = fluid.layers.mean(dg_loss) opt = fluid.optimizer.Adam(learning_rate=LEARNING_RATE) diff --git a/python/paddle/v2/fluid/tests/test_error_clip.py b/python/paddle/v2/fluid/tests/test_error_clip.py index d577d0014d..99b69c1625 100644 --- a/python/paddle/v2/fluid/tests/test_error_clip.py +++ b/python/paddle/v2/fluid/tests/test_error_clip.py @@ -33,7 +33,7 @@ with fluid.program_guard(main_program=prog): label = fluid.layers.data(name='y', shape=[1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) prog_clip = prog.clone() prog_clip.block(0).var(hidden1.name).set_error_clip( diff --git a/python/paddle/v2/fluid/tests/test_gradient_clip.py b/python/paddle/v2/fluid/tests/test_gradient_clip.py index 792262df84..c20863ddb2 100644 --- a/python/paddle/v2/fluid/tests/test_gradient_clip.py +++ b/python/paddle/v2/fluid/tests/test_gradient_clip.py @@ -30,7 +30,7 @@ with fluid.program_guard(main_program=prog): label = fluid.layers.data(name='y', shape=[1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) prog_clip = prog.clone() diff --git a/python/paddle/v2/fluid/tests/test_mnist_if_else_op.py b/python/paddle/v2/fluid/tests/test_mnist_if_else_op.py index 75a651cf27..e5a1406b93 100644 --- a/python/paddle/v2/fluid/tests/test_mnist_if_else_op.py +++ b/python/paddle/v2/fluid/tests/test_mnist_if_else_op.py @@ -56,7 +56,7 @@ class TestMNISTIfElseOp(unittest.TestCase): prob = layers.merge_lod_tensor( in_true=true_out, in_false=false_out, mask=cond, x=image) loss = layers.cross_entropy(input=prob, label=label) - avg_loss = layers.mean(x=loss) + avg_loss = layers.mean(loss) optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9) optimizer.minimize(avg_loss, startup_prog) @@ -113,7 +113,7 @@ class TestMNISTIfElseOp(unittest.TestCase): prob = ie() loss = layers.cross_entropy(input=prob[0], label=label) - avg_loss = layers.mean(x=loss) + avg_loss = layers.mean(loss) optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9) optimizer.minimize(avg_loss, startup_prog) diff --git a/python/paddle/v2/fluid/tests/unittests/test_array_read_write_op.py b/python/paddle/v2/fluid/tests/unittests/test_array_read_write_op.py index 8917b9b906..e04f682ece 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_array_read_write_op.py +++ b/python/paddle/v2/fluid/tests/unittests/test_array_read_write_op.py @@ -49,15 +49,15 @@ class TestArrayReadWrite(unittest.TestCase): i = layers.increment(x=i) a2 = layers.array_read(array=arr, i=i) - mean_a0 = layers.mean(x=a0) - mean_a1 = layers.mean(x=a1) - mean_a2 = layers.mean(x=a2) + mean_a0 = layers.mean(a0) + mean_a1 = layers.mean(a1) + mean_a2 = layers.mean(a2) a_sum = layers.sums(input=[mean_a0, mean_a1, mean_a2]) - mean_x0 = layers.mean(x=x[0]) - mean_x1 = layers.mean(x=x[1]) - mean_x2 = layers.mean(x=x[2]) + mean_x0 = layers.mean(x[0]) + mean_x1 = layers.mean(x[1]) + mean_x2 = layers.mean(x[2]) x_sum = layers.sums(input=[mean_x0, mean_x1, mean_x2]) diff --git a/python/paddle/v2/fluid/tests/unittests/test_calc_gradient.py b/python/paddle/v2/fluid/tests/unittests/test_calc_gradient.py index 1b38dcf343..1b0de31ae0 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_calc_gradient.py +++ b/python/paddle/v2/fluid/tests/unittests/test_calc_gradient.py @@ -26,7 +26,7 @@ class TestCalcGradient(unittest.TestCase): x = layers.create_parameter(dtype="float32", shape=[5, 10]) y = layers.create_parameter(dtype="float32", shape=[10, 8]) mul_out = layers.mul(x=x, y=y) - mean_out = layers.mean(x=mul_out) + mean_out = layers.mean(mul_out) a = calc_gradient(mean_out, mul_out) b = calc_gradient(mean_out, x) place = fluid.CPUPlace() diff --git a/python/paddle/v2/fluid/tests/unittests/test_conditional_block.py b/python/paddle/v2/fluid/tests/unittests/test_conditional_block.py index 58ac267203..f605e13d21 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_conditional_block.py +++ b/python/paddle/v2/fluid/tests/unittests/test_conditional_block.py @@ -39,7 +39,7 @@ class ConditionalBlock(unittest.TestCase): outs = exe.run(feed={'X': x}, fetch_list=[out])[0] print outs - loss = layers.mean(x=out) + loss = layers.mean(out) append_backward(loss=loss) outs = exe.run( feed={'X': x}, diff --git a/python/paddle/v2/fluid/tests/unittests/test_dyn_rnn.py b/python/paddle/v2/fluid/tests/unittests/test_dyn_rnn.py index 1571572fc6..23a1555208 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_dyn_rnn.py +++ b/python/paddle/v2/fluid/tests/unittests/test_dyn_rnn.py @@ -81,7 +81,7 @@ class TestDynRNN(unittest.TestCase): logits = fluid.layers.fc(input=last, size=1, act=None) loss = fluid.layers.sigmoid_cross_entropy_with_logits( x=logits, label=label) - loss = fluid.layers.mean(x=loss) + loss = fluid.layers.mean(loss) sgd = fluid.optimizer.SGD(1e-4) sgd.minimize(loss=loss) cpu = fluid.CPUPlace() @@ -119,7 +119,7 @@ class TestDynRNN(unittest.TestCase): label = fluid.layers.data(name='label', shape=[1], dtype='float32') loss = fluid.layers.sigmoid_cross_entropy_with_logits( x=logits, label=label) - loss = fluid.layers.mean(x=loss) + loss = fluid.layers.mean(loss) sgd = fluid.optimizer.Adam(1e-3) sgd.minimize(loss=loss) diff --git a/python/paddle/v2/fluid/tests/unittests/test_dynrnn_gradient_check.py b/python/paddle/v2/fluid/tests/unittests/test_dynrnn_gradient_check.py index 8b01ec730a..182b025be5 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_dynrnn_gradient_check.py +++ b/python/paddle/v2/fluid/tests/unittests/test_dynrnn_gradient_check.py @@ -272,7 +272,7 @@ class TestSimpleMul(SeedFixedTestCase): out = rnn() out = fluid.layers.sequence_pool(out, pool_type='last') - loss = fluid.layers.mean(x=out) + loss = fluid.layers.mean(out) fluid.backward.append_backward(loss) cpu = fluid.CPUPlace() @@ -348,7 +348,7 @@ class TestSimpleMulWithMemory(SeedFixedTestCase): out = rnn() last = fluid.layers.sequence_pool(input=out, pool_type='last') - loss = fluid.layers.mean(x=last) + loss = fluid.layers.mean(last) fluid.backward.append_backward(loss) cpu = fluid.CPUPlace() diff --git a/python/paddle/v2/fluid/tests/unittests/test_dynrnn_static_input.py b/python/paddle/v2/fluid/tests/unittests/test_dynrnn_static_input.py index d2f05dcd14..b21ac8e800 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_dynrnn_static_input.py +++ b/python/paddle/v2/fluid/tests/unittests/test_dynrnn_static_input.py @@ -125,7 +125,7 @@ class TestDyRnnStaticInput(unittest.TestCase): return static_input_step_outs last = fluid.layers.sequence_pool(input=rnn(), pool_type='last') - loss = fluid.layers.mean(x=last) + loss = fluid.layers.mean(last) append_backward(loss) static_input_grad = self._program.global_block().var( framework.grad_var_name('static_input_tensor')) diff --git a/python/paddle/v2/fluid/tests/unittests/test_inference_model_io.py b/python/paddle/v2/fluid/tests/unittests/test_inference_model_io.py index e381312ccc..62abe99aa2 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_inference_model_io.py +++ b/python/paddle/v2/fluid/tests/unittests/test_inference_model_io.py @@ -38,7 +38,7 @@ class TestBook(unittest.TestCase): y_predict = layers.fc(input=x, size=1, act=None) cost = layers.square_error_cost(input=y_predict, label=y) - avg_cost = layers.mean(x=cost) + avg_cost = layers.mean(cost) sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001) sgd_optimizer.minimize(avg_cost, init_program) diff --git a/python/paddle/v2/fluid/tests/unittests/test_layers.py b/python/paddle/v2/fluid/tests/unittests/test_layers.py index e757598bba..149ac347ce 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_layers.py +++ b/python/paddle/v2/fluid/tests/unittests/test_layers.py @@ -30,7 +30,7 @@ class TestBook(unittest.TestCase): y_predict = layers.fc(input=x, size=1, act=None) y = layers.data(name='y', shape=[1], dtype='float32') cost = layers.square_error_cost(input=y_predict, label=y) - avg_cost = layers.mean(x=cost) + avg_cost = layers.mean(cost) self.assertIsNotNone(avg_cost) program.append_backward(avg_cost) @@ -49,7 +49,7 @@ class TestBook(unittest.TestCase): act='softmax', param_attr=["sftmax.w1", "sftmax.w2"]) cost = layers.cross_entropy(input=predict, label=label) - avg_cost = layers.mean(x=cost) + avg_cost = layers.mean(cost) self.assertIsNotNone(avg_cost) print(str(program)) @@ -92,7 +92,7 @@ class TestBook(unittest.TestCase): predict = layers.fc(input=conv_pool_2, size=10, act="softmax") cost = layers.cross_entropy(input=predict, label=label) - avg_cost = layers.mean(x=cost) + avg_cost = layers.mean(cost) program.append_backward(avg_cost) @@ -140,7 +140,7 @@ class TestBook(unittest.TestCase): size=dict_size, act='softmax') cost = layers.cross_entropy(input=predict_word, label=next_word) - avg_cost = layers.mean(x=cost) + avg_cost = layers.mean(cost) self.assertIsNotNone(avg_cost) print(str(program)) @@ -287,7 +287,7 @@ class TestBook(unittest.TestCase): num_total_classes=dict_size, param_attr='nce.w', bias_attr='nce.b') - avg_loss = layers.mean(x=loss) + avg_loss = layers.mean(loss) self.assertIsNotNone(avg_loss) print(str(default_main_program())) diff --git a/python/paddle/v2/fluid/tests/unittests/test_lod_tensor_array_ops.py b/python/paddle/v2/fluid/tests/unittests/test_lod_tensor_array_ops.py index ebc0a2f714..8c59bbb407 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_lod_tensor_array_ops.py +++ b/python/paddle/v2/fluid/tests/unittests/test_lod_tensor_array_ops.py @@ -182,7 +182,7 @@ class TestCPULoDTensorArrayOpGrad(unittest.TestCase): array = layers.lod_tensor_to_array(x, table) result = layers.array_to_lod_tensor(array, table) - mean = layers.mean(x=result) + mean = layers.mean(result) append_backward(mean) diff --git a/python/paddle/v2/fluid/tests/unittests/test_memory_optimization_transpiler.py b/python/paddle/v2/fluid/tests/unittests/test_memory_optimization_transpiler.py index a276db581e..9d5f90c627 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_memory_optimization_transpiler.py +++ b/python/paddle/v2/fluid/tests/unittests/test_memory_optimization_transpiler.py @@ -29,7 +29,7 @@ class TestControlFlowGraph(unittest.TestCase): y_predict = layers.fc(input=x, size=1, act=None) y = layers.data(name='y', shape=[1], dtype='float32') cost = layers.square_error_cost(input=y_predict, label=y) - avg_cost = layers.mean(x=cost) + avg_cost = layers.mean(cost) opt = optimizer.SGD(learning_rate=0.001) opt = opt.minimize(avg_cost) diff --git a/python/paddle/v2/fluid/tests/unittests/test_parallel_op.py b/python/paddle/v2/fluid/tests/unittests/test_parallel_op.py index d65752608b..8ace41020e 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_parallel_op.py +++ b/python/paddle/v2/fluid/tests/unittests/test_parallel_op.py @@ -127,7 +127,7 @@ class BaseParallelForTest(unittest.TestCase): data = next(generator) loss = generator.send(data) self.assertIsNotNone(loss) - avg_loss = fluid.layers.mean(x=loss) + avg_loss = fluid.layers.mean(loss) fluid.backward.append_backward(loss=avg_loss) exe = fluid.Executor(place) @@ -170,7 +170,7 @@ class ParallelOpTest(BaseParallelForTest): x = fluid.layers.data(shape=[784], dtype='float32', name='img') x = yield x hidden = fluid.layers.fc(input=x, size=200, param_attr='fc1.w') - loss = fluid.layers.mean(x=hidden) + loss = fluid.layers.mean(hidden) yield loss def test_simple_fc(self): @@ -200,7 +200,7 @@ class ParallelOpTestMultipleInput(BaseParallelForTest): hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w') hidden2 = fluid.layers.fc(input=hidden1, size=200, param_attr='fc2.w') hidden3 = fluid.layers.fc(input=hidden2, size=200, param_attr='fc3.w') - loss = fluid.layers.mean(x=hidden3) + loss = fluid.layers.mean(hidden3) yield loss def test_simple_fc(self): diff --git a/python/paddle/v2/fluid/tests/unittests/test_print_op.py b/python/paddle/v2/fluid/tests/unittests/test_print_op.py index 1e49ce994b..d11e3aeddf 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_print_op.py +++ b/python/paddle/v2/fluid/tests/unittests/test_print_op.py @@ -35,7 +35,7 @@ class TestPrintOpCPU(unittest.TestCase): x.stop_gradient = False printed = layers.Print(input=x, **kargs) if only_forward: return printed - loss = layers.mean(x=printed) + loss = layers.mean(printed) append_backward(loss=loss) return loss diff --git a/python/paddle/v2/fluid/tests/unittests/test_profiler.py b/python/paddle/v2/fluid/tests/unittests/test_profiler.py index 62bfb2b8e2..b4b8a58286 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_profiler.py +++ b/python/paddle/v2/fluid/tests/unittests/test_profiler.py @@ -54,7 +54,7 @@ class TestProfiler(unittest.TestCase): predict = fluid.layers.fc(input=hidden2, size=10, act='softmax') label = fluid.layers.data(name='y', shape=[1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict, label=label) - avg_cost = fluid.layers.mean(x=cost) + avg_cost = fluid.layers.mean(cost) accuracy = fluid.evaluator.Accuracy(input=predict, label=label) optimizer = fluid.optimizer.Momentum(learning_rate=0.001, momentum=0.9) diff --git a/python/paddle/v2/fluid/tests/unittests/test_recurrent_op.py b/python/paddle/v2/fluid/tests/unittests/test_recurrent_op.py index 177d8fc65f..0e747936fd 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_recurrent_op.py +++ b/python/paddle/v2/fluid/tests/unittests/test_recurrent_op.py @@ -127,7 +127,7 @@ class RecurrentOpTest1(unittest.TestCase): self.output_shape = (self.sent_len, self.batch_size, self.input_dim) self.py_rnn = PySimpleRNN1(self.input_shape, self.output_shape) - self.output = layers.mean(x=self.create_rnn_op(), **self.p_info) + self.output = layers.mean(self.create_rnn_op(), **self.p_info) def create_rnn_op(self): x = layers.data( @@ -261,7 +261,7 @@ class RecurrentOpTest2(RecurrentOpTest1): self.output_shape = (self.sent_len, self.batch_size, self.input_dim) self.py_rnn = PySimpleRNN2(self.input_shape, self.output_shape) - self.output = layers.mean(x=self.create_rnn_op(), **self.p_info) + self.output = layers.mean(self.create_rnn_op(), **self.p_info) def create_rnn_op(self): x = layers.data( @@ -360,7 +360,7 @@ class RecurrentOpMultipleMemoryTest(RecurrentOpTest1): self.py_rnn = RecurrentOpMultipleMemoryTest.PySimpleRNN3( self.input_shape, self.output_shape) - self.output = layers.mean(x=self.create_rnn_op(), **self.p_info) + self.output = layers.mean(self.create_rnn_op(), **self.p_info) def create_rnn_op(self): x = layers.data( @@ -444,7 +444,7 @@ class RecurrentOpNoMemBootTest(RecurrentOpTest1): self.output_shape = (self.sent_len, self.batch_size, self.input_dim) self.py_rnn = RecurrentOpNoMemBootTest.PySimpleRNN4(self.input_shape, self.output_shape) - self.output = layers.mean(x=self.create_rnn_op(), **self.p_info) + self.output = layers.mean(self.create_rnn_op(), **self.p_info) print self.main_program def create_rnn_op(self): diff --git a/python/paddle/v2/fluid/tests/unittests/test_registry.py b/python/paddle/v2/fluid/tests/unittests/test_registry.py index 82527a6ec7..b0ec218ab3 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_registry.py +++ b/python/paddle/v2/fluid/tests/unittests/test_registry.py @@ -22,7 +22,7 @@ class TestRegistry(unittest.TestCase): @decorators.prog_scope() def test_registry_layer(self): x = fluid.layers.data(name='X', shape=[10, 10], dtype='float32') - output = fluid.layers.mean(x=x) + output = fluid.layers.mean(x) place = fluid.CPUPlace() exe = fluid.Executor(place) diff --git a/python/paddle/v2/fluid/tests/unittests/test_shrink_rnn_memory.py b/python/paddle/v2/fluid/tests/unittests/test_shrink_rnn_memory.py index 48874ba8a5..c991fbbaa2 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_shrink_rnn_memory.py +++ b/python/paddle/v2/fluid/tests/unittests/test_shrink_rnn_memory.py @@ -39,7 +39,7 @@ class TestShrinkRNNMemoryBase(unittest.TestCase): i = layers.increment(x=i) i.stop_gradient = True self.mem3 = layers.shrink_memory(x=self.mem2, i=i, table=table) - mem3_mean = layers.mean(x=self.mem3) + mem3_mean = layers.mean(self.mem3) append_backward(loss=mem3_mean) self.x_grad = self.main_program.global_block().var('x@GRAD') diff --git a/python/paddle/v2/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py b/python/paddle/v2/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py index 48e6756a86..8cfbd7881a 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py +++ b/python/paddle/v2/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py @@ -145,7 +145,7 @@ class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase): input=x, mask=y, level=level) out = layers.merge_lod_tensor( in_true=out_true, in_false=out_false, mask=y, x=x, level=level) - mean = layers.mean(x=out) + mean = layers.mean(out) append_backward(mean) diff --git a/python/paddle/v2/fluid/tests/unittests/test_while_op.py b/python/paddle/v2/fluid/tests/unittests/test_while_op.py index 3fa1d5e0ed..3d2a9faf32 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_while_op.py +++ b/python/paddle/v2/fluid/tests/unittests/test_while_op.py @@ -58,7 +58,7 @@ class TestWhileOp(unittest.TestCase): layers.less_than(x=i, y=array_len, cond=cond) sum_result = layers.array_read(array=mem_array, i=i) - loss = layers.mean(x=sum_result) + loss = layers.mean(sum_result) append_backward(loss) -- GitLab From 44e301541256b966104bc1c67cab8638a0dafcfe Mon Sep 17 00:00:00 2001 From: QI JUN Date: Sat, 24 Feb 2018 16:29:59 +0800 Subject: [PATCH 164/217] fix nccl version (#8540) * fix nccl version * enable nccl test --- Dockerfile | 3 ++- paddle/fluid/platform/nccl_test.cu | 3 --- paddle/scripts/docker/build.sh | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/Dockerfile b/Dockerfile index 6ac9901ac6..60e76c7f2e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -22,7 +22,8 @@ COPY ./paddle/scripts/docker/root/ /root/ RUN apt-get update && \ apt-get install -y \ - git python-pip python-dev openssh-server bison libnccl-dev \ + git python-pip python-dev openssh-server bison \ + libnccl2=2.1.2-1+cuda8.0 libnccl-dev=2.1.2-1+cuda8.0 \ wget unzip unrar tar xz-utils bzip2 gzip coreutils ntp \ curl sed grep graphviz libjpeg-dev zlib1g-dev \ python-matplotlib gcc-4.8 g++-4.8 \ diff --git a/paddle/fluid/platform/nccl_test.cu b/paddle/fluid/platform/nccl_test.cu index 212ea8517e..32a293796c 100644 --- a/paddle/fluid/platform/nccl_test.cu +++ b/paddle/fluid/platform/nccl_test.cu @@ -129,9 +129,6 @@ TEST(NCCL, all_reduce) { } // namespace paddle int main(int argc, char** argv) { - // FIXME(tonyyang-svail): - // Due to the driver issue on our CI, disable for now - return 0; dev_count = paddle::platform::GetCUDADeviceCount(); if (dev_count <= 1) { LOG(WARNING) diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index 56fa138786..8ec3d0c657 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -171,7 +171,7 @@ EOF EOF if [[ ${WITH_GPU} == "ON" ]]; then - NCCL_DEPS="apt-get install -y libnccl-dev &&" + NCCL_DEPS="apt-get install -y libnccl2=2.1.2-1+cuda8.0 libnccl-dev=2.1.2-1+cuda8.0 &&" else NCCL_DEPS="" fi -- GitLab From 28ff1cdaa6d0d6f6dabb0668218d99fc88260008 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sat, 24 Feb 2018 16:42:17 +0800 Subject: [PATCH 165/217] create learning rate for each program --- python/paddle/v2/fluid/optimizer.py | 50 ++++++++++++++++++----------- 1 file changed, 31 insertions(+), 19 deletions(-) diff --git a/python/paddle/v2/fluid/optimizer.py b/python/paddle/v2/fluid/optimizer.py index 61febc4e38..0b3e019d80 100644 --- a/python/paddle/v2/fluid/optimizer.py +++ b/python/paddle/v2/fluid/optimizer.py @@ -36,10 +36,15 @@ class Optimizer(object): """ def __init__(self, learning_rate, global_step=None, regularization=None): - assert learning_rate is not None + if not isinstance(learning_rate, float) and \ + not isinstance(learning_rate, framework.Variable): + raise ValueError("learning rate should be float or Variable") self._global_step = global_step self.regularization = regularization - self._global_learning_rate = learning_rate + self._learning_rate = learning_rate + # each program should have a independent learning rate + # program -> Variable(learning_rate) + self._learning_rate_map = defaultdict(lambda: None) # Dictionary of accumulators. Some optimizer subclasses need to # allocate and manage extra variables associated with the parameters # to train. These variables are called accumulators. @@ -48,26 +53,33 @@ class Optimizer(object): self.helper = None def _create_global_learning_rate(self): - if isinstance(self._global_learning_rate, float): - self._global_learning_rate = layers.create_global_var( - name=unique_name.generate("learning_rate"), - shape=[1], - value=float(self._global_learning_rate), - dtype='float32', - persistable=True) - - if not isinstance(self._global_learning_rate, framework.Variable): - raise ValueError("learning rate should be a Variable, " - "actual type is %s", - type(self._global_learning_rate)) - - @property - def global_learning_rate(self): + lr = self.global_learning_rate() + + if isinstance(lr, framework.Variable): + return + else: + if not isinstance(self._learning_rate, float): + raise ValueError( + "learning rate variable is create outside optimizer," + "can not create new learning rate variable for new program") + + # create learning rate in the current main program + self._learning_rate_map[framework.default_main_program( + )] = layers.create_global_var( + name=unique_name.generate("learning_rate"), + shape=[1], + value=float(self._learning_rate), + dtype='float32', + persistable=True) + + def global_learning_rate(self, program=None): """ get global decayed learning rate :return: """ - return self._global_learning_rate + if program is None: + program = framework.default_main_program() + return self._learning_rate_map[program] def _append_optimize_op(self, block, param_and_grad): """ append optimize operator to block and return all the added optimize_op @@ -78,7 +90,7 @@ class Optimizer(object): # create learning rate variable for every parameter param = param_and_grad[0] param_lr = param.optimize_attr['learning_rate'] - return self._global_learning_rate * param_lr + return self.global_learning_rate() * param_lr def _create_accumulators(self, block, parameters): """Create all accumulators needed by the parameters -- GitLab From b11956a0b56a293cfcf96e9810e1490c33ea7b07 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Sat, 24 Feb 2018 17:10:24 +0800 Subject: [PATCH 166/217] move Fluid API code out of V2 API code --- python/paddle/{v2 => }/fluid/.gitignore | 0 python/paddle/{v2 => }/fluid/__init__.py | 0 python/paddle/{v2 => }/fluid/backward.py | 0 python/paddle/{v2 => }/fluid/clip.py | 0 python/paddle/{v2 => }/fluid/concurrency.py | 0 python/paddle/{v2 => }/fluid/data_feeder.py | 0 python/paddle/{v2 => }/fluid/debuger.py | 0 python/paddle/{v2 => }/fluid/default_scope_funcs.py | 0 python/paddle/{v2 => }/fluid/distribute_transpiler.py | 0 python/paddle/{v2 => }/fluid/distribute_transpiler_simple.py | 0 python/paddle/{v2 => }/fluid/distributed_spliter.py | 0 python/paddle/{v2 => }/fluid/evaluator.py | 0 python/paddle/{v2 => }/fluid/executor.py | 0 python/paddle/{v2 => }/fluid/framework.py | 0 python/paddle/{v2 => }/fluid/graphviz.py | 0 python/paddle/{v2 => }/fluid/initializer.py | 0 python/paddle/{v2 => }/fluid/io.py | 0 python/paddle/{v2 => }/fluid/layer_helper.py | 0 python/paddle/{v2 => }/fluid/layers/__init__.py | 0 python/paddle/{v2 => }/fluid/layers/control_flow.py | 0 python/paddle/{v2 => }/fluid/layers/detection.py | 0 python/paddle/{v2 => }/fluid/layers/device.py | 0 python/paddle/{v2 => }/fluid/layers/io.py | 0 python/paddle/{v2 => }/fluid/layers/layer_function_generator.py | 0 python/paddle/{v2 => }/fluid/layers/math_op_patch.py | 0 python/paddle/{v2 => }/fluid/layers/nn.py | 0 python/paddle/{v2 => }/fluid/layers/ops.py | 0 python/paddle/{v2 => }/fluid/layers/tensor.py | 0 python/paddle/{v2 => }/fluid/learning_rate_decay.py | 0 python/paddle/{v2 => }/fluid/memory_optimization_transpiler.py | 0 python/paddle/{v2 => }/fluid/net_drawer.py | 0 python/paddle/{v2 => }/fluid/nets.py | 0 python/paddle/{v2 => }/fluid/op.py | 0 python/paddle/{v2 => }/fluid/optimizer.py | 0 python/paddle/{v2 => }/fluid/param_attr.py | 0 python/paddle/{v2 => }/fluid/profiler.py | 0 python/paddle/{v2 => }/fluid/regularizer.py | 0 python/paddle/{v2 => }/fluid/tests/.gitignore | 0 python/paddle/{v2 => }/fluid/tests/CMakeLists.txt | 0 python/paddle/{v2 => }/fluid/tests/__init__.py | 0 python/paddle/{v2 => }/fluid/tests/book/.gitignore | 0 python/paddle/{v2 => }/fluid/tests/book/CMakeLists.txt | 0 python/paddle/{v2 => }/fluid/tests/book/__init__.py | 0 .../paddle/{v2 => }/fluid/tests/book/notest_rnn_encoder_decoer.py | 0 python/paddle/{v2 => }/fluid/tests/book/test_fit_a_line.py | 0 .../paddle/{v2 => }/fluid/tests/book/test_image_classification.py | 0 .../paddle/{v2 => }/fluid/tests/book/test_label_semantic_roles.py | 0 .../paddle/{v2 => }/fluid/tests/book/test_machine_translation.py | 0 python/paddle/{v2 => }/fluid/tests/book/test_recognize_digits.py | 0 .../paddle/{v2 => }/fluid/tests/book/test_recommender_system.py | 0 .../paddle/{v2 => }/fluid/tests/book/test_understand_sentiment.py | 0 python/paddle/{v2 => }/fluid/tests/book/test_word2vec.py | 0 python/paddle/{v2 => }/fluid/tests/book_distribute/CMakeLists.txt | 0 .../fluid/tests/book_distribute/notest_dist_fit_a_line.py | 0 .../tests/book_distribute/notest_dist_image_classification.py | 0 .../tests/book_distribute/notest_dist_label_semantic_roles.py | 0 .../{v2 => }/fluid/tests/book_distribute/notest_dist_word2vec.py | 0 .../fluid/tests/book_distribute/notest_machine_translation.py | 0 .../tests/book_distribute/notest_recognize_digits_conv_dist.py | 0 .../tests/book_distribute/notest_recognize_digits_mlp_dist.py | 0 .../fluid/tests/book_distribute/notest_recommender_system_dist.py | 0 .../book_distribute/notest_understand_sentiment_conv_dist.py | 0 .../book_distribute/notest_understand_sentiment_dynamic_lstm.py | 0 .../{v2 => }/fluid/tests/book_memory_optimization/CMakeLists.txt | 0 .../tests/book_memory_optimization/test_memopt_fit_a_line.py | 0 .../test_memopt_image_classification_train.py | 0 .../book_memory_optimization/test_memopt_machine_translation.py | 0 python/paddle/{v2 => }/fluid/tests/demo/fc_gan.py | 0 python/paddle/{v2 => }/fluid/tests/notest_concurrency.py | 0 python/paddle/{v2 => }/fluid/tests/notest_csp.py | 0 python/paddle/{v2 => }/fluid/tests/test_cpp_reader.py | 0 python/paddle/{v2 => }/fluid/tests/test_data_feeder.py | 0 python/paddle/{v2 => }/fluid/tests/test_detection.py | 0 python/paddle/{v2 => }/fluid/tests/test_error_clip.py | 0 python/paddle/{v2 => }/fluid/tests/test_gradient_clip.py | 0 python/paddle/{v2 => }/fluid/tests/test_mnist_if_else_op.py | 0 .../{v2 => }/fluid/tests/test_python_operator_overriding.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/CMakeLists.txt | 0 python/paddle/{v2 => }/fluid/tests/unittests/__init__.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/decorators.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/op_test.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_accuracy_op.py | 0 .../paddle/{v2 => }/fluid/tests/unittests/test_activation_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_adadelta_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_adagrad_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_adam_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_adamax_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_array_read_write_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_assign_op.py | 0 .../paddle/{v2 => }/fluid/tests/unittests/test_assign_value_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_auc_op.py | 0 .../paddle/{v2 => }/fluid/tests/unittests/test_batch_norm_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_beam_search_decode_op.py | 0 .../paddle/{v2 => }/fluid/tests/unittests/test_beam_search_op.py | 0 .../fluid/tests/unittests/test_bilinear_tensor_product_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_bipartite_match_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_box_coder_op.py | 0 .../paddle/{v2 => }/fluid/tests/unittests/test_calc_gradient.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_cast_op.py | 0 .../paddle/{v2 => }/fluid/tests/unittests/test_chunk_eval_op.py | 0 .../paddle/{v2 => }/fluid/tests/unittests/test_clip_by_norm_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_clip_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_compare_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_concat_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_cond_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_conditional_block.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_const_value.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_conv2d_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_conv2d_transpose_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_conv3d_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_conv3d_transpose_op.py | 0 .../paddle/{v2 => }/fluid/tests/unittests/test_conv_shift_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_cos_sim_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_create_op_doc_string.py | 0 .../paddle/{v2 => }/fluid/tests/unittests/test_crf_decoding_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_crop_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_cross_entropy_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_ctc_align.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_cumsum_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_decayed_adagrad_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_default_scope_funcs.py | 0 .../{v2 => }/fluid/tests/unittests/test_detection_map_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_detection_output_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_dropout_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_dyn_rnn.py | 0 .../{v2 => }/fluid/tests/unittests/test_dynrnn_gradient_check.py | 0 .../{v2 => }/fluid/tests/unittests/test_dynrnn_static_input.py | 0 .../{v2 => }/fluid/tests/unittests/test_edit_distance_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_elementwise_add_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_elementwise_div_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_elementwise_max_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_elementwise_min_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_elementwise_mul_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_elementwise_pow_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_elementwise_sub_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_exception.py | 0 .../{v2 => }/fluid/tests/unittests/test_executor_and_mul.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_expand_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_feed_fetch_method.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_fetch_var.py | 0 .../tests/unittests/test_fill_constant_batch_size_like_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_fill_constant_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_fill_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_fill_zeros_like_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_framework_debug_str.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_ftrl_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_gather_op.py | 0 .../tests/unittests/test_gaussian_random_batch_size_like_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_gaussian_random_op.py | 0 .../paddle/{v2 => }/fluid/tests/unittests/test_get_places_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_gru_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_gru_unit_op.py | 0 .../paddle/{v2 => }/fluid/tests/unittests/test_hinge_loss_op.py | 0 .../paddle/{v2 => }/fluid/tests/unittests/test_huber_loss_op.py | 0 .../paddle/{v2 => }/fluid/tests/unittests/test_im2sequence_op.py | 0 .../fluid/tests/unittests/test_image_classification_layer.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_infer_shape.py | 0 .../{v2 => }/fluid/tests/unittests/test_inference_model_io.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_initializer.py | 0 .../{v2 => }/fluid/tests/unittests/test_iou_similarity_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_is_empty_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_l1_norm_op.py | 0 .../paddle/{v2 => }/fluid/tests/unittests/test_label_smooth_op.py | 0 .../paddle/{v2 => }/fluid/tests/unittests/test_layer_norm_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_layers.py | 0 .../{v2 => }/fluid/tests/unittests/test_learning_rate_decay.py | 0 .../{v2 => }/fluid/tests/unittests/test_linear_chain_crf_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_lod_array_length_op.py | 0 .../paddle/{v2 => }/fluid/tests/unittests/test_lod_rank_table.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_lod_reset_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_lod_tensor_array.py | 0 .../{v2 => }/fluid/tests/unittests/test_lod_tensor_array_ops.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_log_loss_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_logical_op.py | 0 .../paddle/{v2 => }/fluid/tests/unittests/test_lookup_table_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_lrn_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_lstm_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_lstm_unit_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_lstmp_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_margin_rank_loss_op.py | 0 .../paddle/{v2 => }/fluid/tests/unittests/test_math_op_patch.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_matmul_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_maxout_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_mean_op.py | 0 .../fluid/tests/unittests/test_memory_optimization_transpiler.py | 0 .../{v2 => }/fluid/tests/unittests/test_mine_hard_examples_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_minus_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_modified_huber_loss_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_momentum_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_mul_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_multiclass_nms_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_multihead_attention.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_multiplex_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_nce.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_net.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_norm_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_normalization_wrapper.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_one_hot_op.py | 0 .../paddle/{v2 => }/fluid/tests/unittests/test_op_support_gpu.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_operator.py | 0 .../paddle/{v2 => }/fluid/tests/unittests/test_operator_desc.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_optimizer.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_pad_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_parallel_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_parameter.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_pool2d_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_pool3d_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_pool_max_op.py | 0 .../fluid/tests/unittests/test_positive_negative_pair_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_precision_recall_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_prelu_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_print_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_prior_box_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_profiler.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_program.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_protobuf.py | 0 .../paddle/{v2 => }/fluid/tests/unittests/test_protobuf_descs.py | 0 .../{v2 => }/fluid/tests/unittests/test_proximal_adagrad_op.py | 0 .../paddle/{v2 => }/fluid/tests/unittests/test_proximal_gd_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_rank_loss_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_recurrent_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_recv_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_reduce_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_registry.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_regularizer.py | 0 .../{v2 => }/fluid/tests/unittests/test_reorder_lod_tensor.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_reshape_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_rmsprop_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_rnn_memory_helper_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_roi_pool_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_row_conv_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_scale_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_scatter_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_scope.py | 0 .../paddle/{v2 => }/fluid/tests/unittests/test_selected_rows.py | 0 .../paddle/{v2 => }/fluid/tests/unittests/test_seq_concat_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_seq_conv.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_seq_pool.py | 0 .../{v2 => }/fluid/tests/unittests/test_sequence_erase_op.py | 0 .../paddle/{v2 => }/fluid/tests/unittests/test_sequence_expand.py | 0 .../{v2 => }/fluid/tests/unittests/test_sequence_reshape.py | 0 .../{v2 => }/fluid/tests/unittests/test_sequence_slice_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_sequence_softmax_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_sgd_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_shrink_rnn_memory.py | 0 .../tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_sign_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_smooth_l1_loss_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_softmax_op.py | 0 .../fluid/tests/unittests/test_softmax_with_cross_entropy_op.py | 0 .../fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_split_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_split_selected_rows_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_split_var.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_spp_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_squared_l2_distance_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_squared_l2_norm_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_sum_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_switch.py | 0 .../{v2 => }/fluid/tests/unittests/test_target_assign_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_tensor.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_top_k_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_transpose_op.py | 0 .../tests/unittests/test_uniform_random_batch_size_like_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_uniform_random_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_unique_name.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_unpool_op.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_variable.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_warpctc_op.py | 0 .../{v2 => }/fluid/tests/unittests/test_weight_normalization.py | 0 python/paddle/{v2 => }/fluid/tests/unittests/test_while_op.py | 0 python/paddle/{v2 => }/fluid/unique_name.py | 0 272 files changed, 0 insertions(+), 0 deletions(-) rename python/paddle/{v2 => }/fluid/.gitignore (100%) rename python/paddle/{v2 => }/fluid/__init__.py (100%) rename python/paddle/{v2 => }/fluid/backward.py (100%) rename python/paddle/{v2 => }/fluid/clip.py (100%) rename python/paddle/{v2 => }/fluid/concurrency.py (100%) rename python/paddle/{v2 => }/fluid/data_feeder.py (100%) rename python/paddle/{v2 => }/fluid/debuger.py (100%) rename python/paddle/{v2 => }/fluid/default_scope_funcs.py (100%) rename python/paddle/{v2 => }/fluid/distribute_transpiler.py (100%) rename python/paddle/{v2 => }/fluid/distribute_transpiler_simple.py (100%) rename python/paddle/{v2 => }/fluid/distributed_spliter.py (100%) rename python/paddle/{v2 => }/fluid/evaluator.py (100%) rename python/paddle/{v2 => }/fluid/executor.py (100%) rename python/paddle/{v2 => }/fluid/framework.py (100%) rename python/paddle/{v2 => }/fluid/graphviz.py (100%) rename python/paddle/{v2 => }/fluid/initializer.py (100%) rename python/paddle/{v2 => }/fluid/io.py (100%) rename python/paddle/{v2 => }/fluid/layer_helper.py (100%) rename python/paddle/{v2 => }/fluid/layers/__init__.py (100%) rename python/paddle/{v2 => }/fluid/layers/control_flow.py (100%) rename python/paddle/{v2 => }/fluid/layers/detection.py (100%) rename python/paddle/{v2 => }/fluid/layers/device.py (100%) rename python/paddle/{v2 => }/fluid/layers/io.py (100%) rename python/paddle/{v2 => }/fluid/layers/layer_function_generator.py (100%) rename python/paddle/{v2 => }/fluid/layers/math_op_patch.py (100%) rename python/paddle/{v2 => }/fluid/layers/nn.py (100%) rename python/paddle/{v2 => }/fluid/layers/ops.py (100%) rename python/paddle/{v2 => }/fluid/layers/tensor.py (100%) rename python/paddle/{v2 => }/fluid/learning_rate_decay.py (100%) rename python/paddle/{v2 => }/fluid/memory_optimization_transpiler.py (100%) rename python/paddle/{v2 => }/fluid/net_drawer.py (100%) rename python/paddle/{v2 => }/fluid/nets.py (100%) rename python/paddle/{v2 => }/fluid/op.py (100%) rename python/paddle/{v2 => }/fluid/optimizer.py (100%) rename python/paddle/{v2 => }/fluid/param_attr.py (100%) rename python/paddle/{v2 => }/fluid/profiler.py (100%) rename python/paddle/{v2 => }/fluid/regularizer.py (100%) rename python/paddle/{v2 => }/fluid/tests/.gitignore (100%) rename python/paddle/{v2 => }/fluid/tests/CMakeLists.txt (100%) rename python/paddle/{v2 => }/fluid/tests/__init__.py (100%) rename python/paddle/{v2 => }/fluid/tests/book/.gitignore (100%) rename python/paddle/{v2 => }/fluid/tests/book/CMakeLists.txt (100%) rename python/paddle/{v2 => }/fluid/tests/book/__init__.py (100%) rename python/paddle/{v2 => }/fluid/tests/book/notest_rnn_encoder_decoer.py (100%) rename python/paddle/{v2 => }/fluid/tests/book/test_fit_a_line.py (100%) rename python/paddle/{v2 => }/fluid/tests/book/test_image_classification.py (100%) rename python/paddle/{v2 => }/fluid/tests/book/test_label_semantic_roles.py (100%) rename python/paddle/{v2 => }/fluid/tests/book/test_machine_translation.py (100%) rename python/paddle/{v2 => }/fluid/tests/book/test_recognize_digits.py (100%) rename python/paddle/{v2 => }/fluid/tests/book/test_recommender_system.py (100%) rename python/paddle/{v2 => }/fluid/tests/book/test_understand_sentiment.py (100%) rename python/paddle/{v2 => }/fluid/tests/book/test_word2vec.py (100%) rename python/paddle/{v2 => }/fluid/tests/book_distribute/CMakeLists.txt (100%) rename python/paddle/{v2 => }/fluid/tests/book_distribute/notest_dist_fit_a_line.py (100%) rename python/paddle/{v2 => }/fluid/tests/book_distribute/notest_dist_image_classification.py (100%) rename python/paddle/{v2 => }/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py (100%) rename python/paddle/{v2 => }/fluid/tests/book_distribute/notest_dist_word2vec.py (100%) rename python/paddle/{v2 => }/fluid/tests/book_distribute/notest_machine_translation.py (100%) rename python/paddle/{v2 => }/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py (100%) rename python/paddle/{v2 => }/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py (100%) rename python/paddle/{v2 => }/fluid/tests/book_distribute/notest_recommender_system_dist.py (100%) rename python/paddle/{v2 => }/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py (100%) rename python/paddle/{v2 => }/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py (100%) rename python/paddle/{v2 => }/fluid/tests/book_memory_optimization/CMakeLists.txt (100%) rename python/paddle/{v2 => }/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py (100%) rename python/paddle/{v2 => }/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py (100%) rename python/paddle/{v2 => }/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py (100%) rename python/paddle/{v2 => }/fluid/tests/demo/fc_gan.py (100%) rename python/paddle/{v2 => }/fluid/tests/notest_concurrency.py (100%) rename python/paddle/{v2 => }/fluid/tests/notest_csp.py (100%) rename python/paddle/{v2 => }/fluid/tests/test_cpp_reader.py (100%) rename python/paddle/{v2 => }/fluid/tests/test_data_feeder.py (100%) rename python/paddle/{v2 => }/fluid/tests/test_detection.py (100%) rename python/paddle/{v2 => }/fluid/tests/test_error_clip.py (100%) rename python/paddle/{v2 => }/fluid/tests/test_gradient_clip.py (100%) rename python/paddle/{v2 => }/fluid/tests/test_mnist_if_else_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/test_python_operator_overriding.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/CMakeLists.txt (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/__init__.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/decorators.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/op_test.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_accuracy_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_activation_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_adadelta_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_adagrad_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_adam_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_adamax_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_array_read_write_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_assign_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_assign_value_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_auc_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_batch_norm_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_beam_search_decode_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_beam_search_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_bilinear_tensor_product_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_bipartite_match_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_box_coder_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_calc_gradient.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_cast_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_chunk_eval_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_clip_by_norm_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_clip_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_compare_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_concat_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_cond_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_conditional_block.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_const_value.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_conv2d_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_conv2d_transpose_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_conv3d_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_conv3d_transpose_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_conv_shift_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_cos_sim_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_create_op_doc_string.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_crf_decoding_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_crop_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_cross_entropy_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_ctc_align.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_cumsum_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_decayed_adagrad_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_default_scope_funcs.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_detection_map_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_detection_output_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_dropout_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_dyn_rnn.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_dynrnn_gradient_check.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_dynrnn_static_input.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_edit_distance_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_elementwise_add_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_elementwise_div_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_elementwise_max_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_elementwise_min_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_elementwise_mul_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_elementwise_pow_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_elementwise_sub_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_exception.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_executor_and_mul.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_expand_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_feed_fetch_method.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_fetch_var.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_fill_constant_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_fill_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_fill_zeros_like_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_framework_debug_str.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_ftrl_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_gather_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_gaussian_random_batch_size_like_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_gaussian_random_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_get_places_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_gru_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_gru_unit_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_hinge_loss_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_huber_loss_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_im2sequence_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_image_classification_layer.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_infer_shape.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_inference_model_io.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_initializer.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_iou_similarity_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_is_empty_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_l1_norm_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_label_smooth_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_layer_norm_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_layers.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_learning_rate_decay.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_linear_chain_crf_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_lod_array_length_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_lod_rank_table.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_lod_reset_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_lod_tensor_array.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_lod_tensor_array_ops.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_log_loss_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_logical_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_lookup_table_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_lrn_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_lstm_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_lstm_unit_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_lstmp_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_margin_rank_loss_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_math_op_patch.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_matmul_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_maxout_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_mean_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_memory_optimization_transpiler.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_mine_hard_examples_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_minus_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_modified_huber_loss_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_momentum_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_mul_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_multiclass_nms_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_multihead_attention.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_multiplex_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_nce.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_net.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_norm_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_normalization_wrapper.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_one_hot_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_op_support_gpu.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_operator.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_operator_desc.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_optimizer.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_pad_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_parallel_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_parameter.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_pool2d_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_pool3d_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_pool_max_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_positive_negative_pair_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_precision_recall_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_prelu_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_print_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_prior_box_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_profiler.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_program.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_protobuf.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_protobuf_descs.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_proximal_adagrad_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_proximal_gd_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_rank_loss_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_recurrent_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_recv_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_reduce_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_registry.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_regularizer.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_reorder_lod_tensor.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_reshape_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_rmsprop_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_rnn_memory_helper_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_roi_pool_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_row_conv_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_scale_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_scatter_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_scope.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_selected_rows.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_seq_concat_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_seq_conv.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_seq_pool.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_sequence_erase_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_sequence_expand.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_sequence_reshape.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_sequence_slice_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_sequence_softmax_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_sgd_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_shrink_rnn_memory.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_sign_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_smooth_l1_loss_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_softmax_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_split_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_split_selected_rows_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_split_var.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_spp_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_squared_l2_distance_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_squared_l2_norm_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_sum_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_switch.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_target_assign_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_tensor.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_top_k_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_transpose_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_uniform_random_batch_size_like_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_uniform_random_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_unique_name.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_unpool_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_variable.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_warpctc_op.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_weight_normalization.py (100%) rename python/paddle/{v2 => }/fluid/tests/unittests/test_while_op.py (100%) rename python/paddle/{v2 => }/fluid/unique_name.py (100%) diff --git a/python/paddle/v2/fluid/.gitignore b/python/paddle/fluid/.gitignore similarity index 100% rename from python/paddle/v2/fluid/.gitignore rename to python/paddle/fluid/.gitignore diff --git a/python/paddle/v2/fluid/__init__.py b/python/paddle/fluid/__init__.py similarity index 100% rename from python/paddle/v2/fluid/__init__.py rename to python/paddle/fluid/__init__.py diff --git a/python/paddle/v2/fluid/backward.py b/python/paddle/fluid/backward.py similarity index 100% rename from python/paddle/v2/fluid/backward.py rename to python/paddle/fluid/backward.py diff --git a/python/paddle/v2/fluid/clip.py b/python/paddle/fluid/clip.py similarity index 100% rename from python/paddle/v2/fluid/clip.py rename to python/paddle/fluid/clip.py diff --git a/python/paddle/v2/fluid/concurrency.py b/python/paddle/fluid/concurrency.py similarity index 100% rename from python/paddle/v2/fluid/concurrency.py rename to python/paddle/fluid/concurrency.py diff --git a/python/paddle/v2/fluid/data_feeder.py b/python/paddle/fluid/data_feeder.py similarity index 100% rename from python/paddle/v2/fluid/data_feeder.py rename to python/paddle/fluid/data_feeder.py diff --git a/python/paddle/v2/fluid/debuger.py b/python/paddle/fluid/debuger.py similarity index 100% rename from python/paddle/v2/fluid/debuger.py rename to python/paddle/fluid/debuger.py diff --git a/python/paddle/v2/fluid/default_scope_funcs.py b/python/paddle/fluid/default_scope_funcs.py similarity index 100% rename from python/paddle/v2/fluid/default_scope_funcs.py rename to python/paddle/fluid/default_scope_funcs.py diff --git a/python/paddle/v2/fluid/distribute_transpiler.py b/python/paddle/fluid/distribute_transpiler.py similarity index 100% rename from python/paddle/v2/fluid/distribute_transpiler.py rename to python/paddle/fluid/distribute_transpiler.py diff --git a/python/paddle/v2/fluid/distribute_transpiler_simple.py b/python/paddle/fluid/distribute_transpiler_simple.py similarity index 100% rename from python/paddle/v2/fluid/distribute_transpiler_simple.py rename to python/paddle/fluid/distribute_transpiler_simple.py diff --git a/python/paddle/v2/fluid/distributed_spliter.py b/python/paddle/fluid/distributed_spliter.py similarity index 100% rename from python/paddle/v2/fluid/distributed_spliter.py rename to python/paddle/fluid/distributed_spliter.py diff --git a/python/paddle/v2/fluid/evaluator.py b/python/paddle/fluid/evaluator.py similarity index 100% rename from python/paddle/v2/fluid/evaluator.py rename to python/paddle/fluid/evaluator.py diff --git a/python/paddle/v2/fluid/executor.py b/python/paddle/fluid/executor.py similarity index 100% rename from python/paddle/v2/fluid/executor.py rename to python/paddle/fluid/executor.py diff --git a/python/paddle/v2/fluid/framework.py b/python/paddle/fluid/framework.py similarity index 100% rename from python/paddle/v2/fluid/framework.py rename to python/paddle/fluid/framework.py diff --git a/python/paddle/v2/fluid/graphviz.py b/python/paddle/fluid/graphviz.py similarity index 100% rename from python/paddle/v2/fluid/graphviz.py rename to python/paddle/fluid/graphviz.py diff --git a/python/paddle/v2/fluid/initializer.py b/python/paddle/fluid/initializer.py similarity index 100% rename from python/paddle/v2/fluid/initializer.py rename to python/paddle/fluid/initializer.py diff --git a/python/paddle/v2/fluid/io.py b/python/paddle/fluid/io.py similarity index 100% rename from python/paddle/v2/fluid/io.py rename to python/paddle/fluid/io.py diff --git a/python/paddle/v2/fluid/layer_helper.py b/python/paddle/fluid/layer_helper.py similarity index 100% rename from python/paddle/v2/fluid/layer_helper.py rename to python/paddle/fluid/layer_helper.py diff --git a/python/paddle/v2/fluid/layers/__init__.py b/python/paddle/fluid/layers/__init__.py similarity index 100% rename from python/paddle/v2/fluid/layers/__init__.py rename to python/paddle/fluid/layers/__init__.py diff --git a/python/paddle/v2/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py similarity index 100% rename from python/paddle/v2/fluid/layers/control_flow.py rename to python/paddle/fluid/layers/control_flow.py diff --git a/python/paddle/v2/fluid/layers/detection.py b/python/paddle/fluid/layers/detection.py similarity index 100% rename from python/paddle/v2/fluid/layers/detection.py rename to python/paddle/fluid/layers/detection.py diff --git a/python/paddle/v2/fluid/layers/device.py b/python/paddle/fluid/layers/device.py similarity index 100% rename from python/paddle/v2/fluid/layers/device.py rename to python/paddle/fluid/layers/device.py diff --git a/python/paddle/v2/fluid/layers/io.py b/python/paddle/fluid/layers/io.py similarity index 100% rename from python/paddle/v2/fluid/layers/io.py rename to python/paddle/fluid/layers/io.py diff --git a/python/paddle/v2/fluid/layers/layer_function_generator.py b/python/paddle/fluid/layers/layer_function_generator.py similarity index 100% rename from python/paddle/v2/fluid/layers/layer_function_generator.py rename to python/paddle/fluid/layers/layer_function_generator.py diff --git a/python/paddle/v2/fluid/layers/math_op_patch.py b/python/paddle/fluid/layers/math_op_patch.py similarity index 100% rename from python/paddle/v2/fluid/layers/math_op_patch.py rename to python/paddle/fluid/layers/math_op_patch.py diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py similarity index 100% rename from python/paddle/v2/fluid/layers/nn.py rename to python/paddle/fluid/layers/nn.py diff --git a/python/paddle/v2/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py similarity index 100% rename from python/paddle/v2/fluid/layers/ops.py rename to python/paddle/fluid/layers/ops.py diff --git a/python/paddle/v2/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py similarity index 100% rename from python/paddle/v2/fluid/layers/tensor.py rename to python/paddle/fluid/layers/tensor.py diff --git a/python/paddle/v2/fluid/learning_rate_decay.py b/python/paddle/fluid/learning_rate_decay.py similarity index 100% rename from python/paddle/v2/fluid/learning_rate_decay.py rename to python/paddle/fluid/learning_rate_decay.py diff --git a/python/paddle/v2/fluid/memory_optimization_transpiler.py b/python/paddle/fluid/memory_optimization_transpiler.py similarity index 100% rename from python/paddle/v2/fluid/memory_optimization_transpiler.py rename to python/paddle/fluid/memory_optimization_transpiler.py diff --git a/python/paddle/v2/fluid/net_drawer.py b/python/paddle/fluid/net_drawer.py similarity index 100% rename from python/paddle/v2/fluid/net_drawer.py rename to python/paddle/fluid/net_drawer.py diff --git a/python/paddle/v2/fluid/nets.py b/python/paddle/fluid/nets.py similarity index 100% rename from python/paddle/v2/fluid/nets.py rename to python/paddle/fluid/nets.py diff --git a/python/paddle/v2/fluid/op.py b/python/paddle/fluid/op.py similarity index 100% rename from python/paddle/v2/fluid/op.py rename to python/paddle/fluid/op.py diff --git a/python/paddle/v2/fluid/optimizer.py b/python/paddle/fluid/optimizer.py similarity index 100% rename from python/paddle/v2/fluid/optimizer.py rename to python/paddle/fluid/optimizer.py diff --git a/python/paddle/v2/fluid/param_attr.py b/python/paddle/fluid/param_attr.py similarity index 100% rename from python/paddle/v2/fluid/param_attr.py rename to python/paddle/fluid/param_attr.py diff --git a/python/paddle/v2/fluid/profiler.py b/python/paddle/fluid/profiler.py similarity index 100% rename from python/paddle/v2/fluid/profiler.py rename to python/paddle/fluid/profiler.py diff --git a/python/paddle/v2/fluid/regularizer.py b/python/paddle/fluid/regularizer.py similarity index 100% rename from python/paddle/v2/fluid/regularizer.py rename to python/paddle/fluid/regularizer.py diff --git a/python/paddle/v2/fluid/tests/.gitignore b/python/paddle/fluid/tests/.gitignore similarity index 100% rename from python/paddle/v2/fluid/tests/.gitignore rename to python/paddle/fluid/tests/.gitignore diff --git a/python/paddle/v2/fluid/tests/CMakeLists.txt b/python/paddle/fluid/tests/CMakeLists.txt similarity index 100% rename from python/paddle/v2/fluid/tests/CMakeLists.txt rename to python/paddle/fluid/tests/CMakeLists.txt diff --git a/python/paddle/v2/fluid/tests/__init__.py b/python/paddle/fluid/tests/__init__.py similarity index 100% rename from python/paddle/v2/fluid/tests/__init__.py rename to python/paddle/fluid/tests/__init__.py diff --git a/python/paddle/v2/fluid/tests/book/.gitignore b/python/paddle/fluid/tests/book/.gitignore similarity index 100% rename from python/paddle/v2/fluid/tests/book/.gitignore rename to python/paddle/fluid/tests/book/.gitignore diff --git a/python/paddle/v2/fluid/tests/book/CMakeLists.txt b/python/paddle/fluid/tests/book/CMakeLists.txt similarity index 100% rename from python/paddle/v2/fluid/tests/book/CMakeLists.txt rename to python/paddle/fluid/tests/book/CMakeLists.txt diff --git a/python/paddle/v2/fluid/tests/book/__init__.py b/python/paddle/fluid/tests/book/__init__.py similarity index 100% rename from python/paddle/v2/fluid/tests/book/__init__.py rename to python/paddle/fluid/tests/book/__init__.py diff --git a/python/paddle/v2/fluid/tests/book/notest_rnn_encoder_decoer.py b/python/paddle/fluid/tests/book/notest_rnn_encoder_decoer.py similarity index 100% rename from python/paddle/v2/fluid/tests/book/notest_rnn_encoder_decoer.py rename to python/paddle/fluid/tests/book/notest_rnn_encoder_decoer.py diff --git a/python/paddle/v2/fluid/tests/book/test_fit_a_line.py b/python/paddle/fluid/tests/book/test_fit_a_line.py similarity index 100% rename from python/paddle/v2/fluid/tests/book/test_fit_a_line.py rename to python/paddle/fluid/tests/book/test_fit_a_line.py diff --git a/python/paddle/v2/fluid/tests/book/test_image_classification.py b/python/paddle/fluid/tests/book/test_image_classification.py similarity index 100% rename from python/paddle/v2/fluid/tests/book/test_image_classification.py rename to python/paddle/fluid/tests/book/test_image_classification.py diff --git a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/fluid/tests/book/test_label_semantic_roles.py similarity index 100% rename from python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py rename to python/paddle/fluid/tests/book/test_label_semantic_roles.py diff --git a/python/paddle/v2/fluid/tests/book/test_machine_translation.py b/python/paddle/fluid/tests/book/test_machine_translation.py similarity index 100% rename from python/paddle/v2/fluid/tests/book/test_machine_translation.py rename to python/paddle/fluid/tests/book/test_machine_translation.py diff --git a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py b/python/paddle/fluid/tests/book/test_recognize_digits.py similarity index 100% rename from python/paddle/v2/fluid/tests/book/test_recognize_digits.py rename to python/paddle/fluid/tests/book/test_recognize_digits.py diff --git a/python/paddle/v2/fluid/tests/book/test_recommender_system.py b/python/paddle/fluid/tests/book/test_recommender_system.py similarity index 100% rename from python/paddle/v2/fluid/tests/book/test_recommender_system.py rename to python/paddle/fluid/tests/book/test_recommender_system.py diff --git a/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py b/python/paddle/fluid/tests/book/test_understand_sentiment.py similarity index 100% rename from python/paddle/v2/fluid/tests/book/test_understand_sentiment.py rename to python/paddle/fluid/tests/book/test_understand_sentiment.py diff --git a/python/paddle/v2/fluid/tests/book/test_word2vec.py b/python/paddle/fluid/tests/book/test_word2vec.py similarity index 100% rename from python/paddle/v2/fluid/tests/book/test_word2vec.py rename to python/paddle/fluid/tests/book/test_word2vec.py diff --git a/python/paddle/v2/fluid/tests/book_distribute/CMakeLists.txt b/python/paddle/fluid/tests/book_distribute/CMakeLists.txt similarity index 100% rename from python/paddle/v2/fluid/tests/book_distribute/CMakeLists.txt rename to python/paddle/fluid/tests/book_distribute/CMakeLists.txt diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_fit_a_line.py b/python/paddle/fluid/tests/book_distribute/notest_dist_fit_a_line.py similarity index 100% rename from python/paddle/v2/fluid/tests/book_distribute/notest_dist_fit_a_line.py rename to python/paddle/fluid/tests/book_distribute/notest_dist_fit_a_line.py diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_image_classification.py b/python/paddle/fluid/tests/book_distribute/notest_dist_image_classification.py similarity index 100% rename from python/paddle/v2/fluid/tests/book_distribute/notest_dist_image_classification.py rename to python/paddle/fluid/tests/book_distribute/notest_dist_image_classification.py diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py b/python/paddle/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py similarity index 100% rename from python/paddle/v2/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py rename to python/paddle/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_dist_word2vec.py b/python/paddle/fluid/tests/book_distribute/notest_dist_word2vec.py similarity index 100% rename from python/paddle/v2/fluid/tests/book_distribute/notest_dist_word2vec.py rename to python/paddle/fluid/tests/book_distribute/notest_dist_word2vec.py diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_machine_translation.py b/python/paddle/fluid/tests/book_distribute/notest_machine_translation.py similarity index 100% rename from python/paddle/v2/fluid/tests/book_distribute/notest_machine_translation.py rename to python/paddle/fluid/tests/book_distribute/notest_machine_translation.py diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py b/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py similarity index 100% rename from python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py rename to python/paddle/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py b/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py similarity index 100% rename from python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py rename to python/paddle/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_recommender_system_dist.py b/python/paddle/fluid/tests/book_distribute/notest_recommender_system_dist.py similarity index 100% rename from python/paddle/v2/fluid/tests/book_distribute/notest_recommender_system_dist.py rename to python/paddle/fluid/tests/book_distribute/notest_recommender_system_dist.py diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py b/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py similarity index 100% rename from python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py rename to python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py b/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py similarity index 100% rename from python/paddle/v2/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py rename to python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py diff --git a/python/paddle/v2/fluid/tests/book_memory_optimization/CMakeLists.txt b/python/paddle/fluid/tests/book_memory_optimization/CMakeLists.txt similarity index 100% rename from python/paddle/v2/fluid/tests/book_memory_optimization/CMakeLists.txt rename to python/paddle/fluid/tests/book_memory_optimization/CMakeLists.txt diff --git a/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py similarity index 100% rename from python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py rename to python/paddle/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py diff --git a/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py similarity index 100% rename from python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py rename to python/paddle/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py diff --git a/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py similarity index 100% rename from python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py rename to python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py diff --git a/python/paddle/v2/fluid/tests/demo/fc_gan.py b/python/paddle/fluid/tests/demo/fc_gan.py similarity index 100% rename from python/paddle/v2/fluid/tests/demo/fc_gan.py rename to python/paddle/fluid/tests/demo/fc_gan.py diff --git a/python/paddle/v2/fluid/tests/notest_concurrency.py b/python/paddle/fluid/tests/notest_concurrency.py similarity index 100% rename from python/paddle/v2/fluid/tests/notest_concurrency.py rename to python/paddle/fluid/tests/notest_concurrency.py diff --git a/python/paddle/v2/fluid/tests/notest_csp.py b/python/paddle/fluid/tests/notest_csp.py similarity index 100% rename from python/paddle/v2/fluid/tests/notest_csp.py rename to python/paddle/fluid/tests/notest_csp.py diff --git a/python/paddle/v2/fluid/tests/test_cpp_reader.py b/python/paddle/fluid/tests/test_cpp_reader.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_cpp_reader.py rename to python/paddle/fluid/tests/test_cpp_reader.py diff --git a/python/paddle/v2/fluid/tests/test_data_feeder.py b/python/paddle/fluid/tests/test_data_feeder.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_data_feeder.py rename to python/paddle/fluid/tests/test_data_feeder.py diff --git a/python/paddle/v2/fluid/tests/test_detection.py b/python/paddle/fluid/tests/test_detection.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_detection.py rename to python/paddle/fluid/tests/test_detection.py diff --git a/python/paddle/v2/fluid/tests/test_error_clip.py b/python/paddle/fluid/tests/test_error_clip.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_error_clip.py rename to python/paddle/fluid/tests/test_error_clip.py diff --git a/python/paddle/v2/fluid/tests/test_gradient_clip.py b/python/paddle/fluid/tests/test_gradient_clip.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_gradient_clip.py rename to python/paddle/fluid/tests/test_gradient_clip.py diff --git a/python/paddle/v2/fluid/tests/test_mnist_if_else_op.py b/python/paddle/fluid/tests/test_mnist_if_else_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_mnist_if_else_op.py rename to python/paddle/fluid/tests/test_mnist_if_else_op.py diff --git a/python/paddle/v2/fluid/tests/test_python_operator_overriding.py b/python/paddle/fluid/tests/test_python_operator_overriding.py similarity index 100% rename from python/paddle/v2/fluid/tests/test_python_operator_overriding.py rename to python/paddle/fluid/tests/test_python_operator_overriding.py diff --git a/python/paddle/v2/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/CMakeLists.txt rename to python/paddle/fluid/tests/unittests/CMakeLists.txt diff --git a/python/paddle/v2/fluid/tests/unittests/__init__.py b/python/paddle/fluid/tests/unittests/__init__.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/__init__.py rename to python/paddle/fluid/tests/unittests/__init__.py diff --git a/python/paddle/v2/fluid/tests/unittests/decorators.py b/python/paddle/fluid/tests/unittests/decorators.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/decorators.py rename to python/paddle/fluid/tests/unittests/decorators.py diff --git a/python/paddle/v2/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/op_test.py rename to python/paddle/fluid/tests/unittests/op_test.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_accuracy_op.py b/python/paddle/fluid/tests/unittests/test_accuracy_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_accuracy_op.py rename to python/paddle/fluid/tests/unittests/test_accuracy_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_activation_op.py rename to python/paddle/fluid/tests/unittests/test_activation_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_adadelta_op.py b/python/paddle/fluid/tests/unittests/test_adadelta_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_adadelta_op.py rename to python/paddle/fluid/tests/unittests/test_adadelta_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_adagrad_op.py b/python/paddle/fluid/tests/unittests/test_adagrad_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_adagrad_op.py rename to python/paddle/fluid/tests/unittests/test_adagrad_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_adam_op.py b/python/paddle/fluid/tests/unittests/test_adam_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_adam_op.py rename to python/paddle/fluid/tests/unittests/test_adam_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_adamax_op.py b/python/paddle/fluid/tests/unittests/test_adamax_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_adamax_op.py rename to python/paddle/fluid/tests/unittests/test_adamax_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_array_read_write_op.py b/python/paddle/fluid/tests/unittests/test_array_read_write_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_array_read_write_op.py rename to python/paddle/fluid/tests/unittests/test_array_read_write_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_assign_op.py b/python/paddle/fluid/tests/unittests/test_assign_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_assign_op.py rename to python/paddle/fluid/tests/unittests/test_assign_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_assign_value_op.py b/python/paddle/fluid/tests/unittests/test_assign_value_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_assign_value_op.py rename to python/paddle/fluid/tests/unittests/test_assign_value_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_auc_op.py b/python/paddle/fluid/tests/unittests/test_auc_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_auc_op.py rename to python/paddle/fluid/tests/unittests/test_auc_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_batch_norm_op.py b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_batch_norm_op.py rename to python/paddle/fluid/tests/unittests/test_batch_norm_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_beam_search_decode_op.py b/python/paddle/fluid/tests/unittests/test_beam_search_decode_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_beam_search_decode_op.py rename to python/paddle/fluid/tests/unittests/test_beam_search_decode_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_beam_search_op.py b/python/paddle/fluid/tests/unittests/test_beam_search_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_beam_search_op.py rename to python/paddle/fluid/tests/unittests/test_beam_search_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_bilinear_tensor_product_op.py b/python/paddle/fluid/tests/unittests/test_bilinear_tensor_product_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_bilinear_tensor_product_op.py rename to python/paddle/fluid/tests/unittests/test_bilinear_tensor_product_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_bipartite_match_op.py b/python/paddle/fluid/tests/unittests/test_bipartite_match_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_bipartite_match_op.py rename to python/paddle/fluid/tests/unittests/test_bipartite_match_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_box_coder_op.py b/python/paddle/fluid/tests/unittests/test_box_coder_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_box_coder_op.py rename to python/paddle/fluid/tests/unittests/test_box_coder_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_calc_gradient.py b/python/paddle/fluid/tests/unittests/test_calc_gradient.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_calc_gradient.py rename to python/paddle/fluid/tests/unittests/test_calc_gradient.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_cast_op.py b/python/paddle/fluid/tests/unittests/test_cast_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_cast_op.py rename to python/paddle/fluid/tests/unittests/test_cast_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_chunk_eval_op.py b/python/paddle/fluid/tests/unittests/test_chunk_eval_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_chunk_eval_op.py rename to python/paddle/fluid/tests/unittests/test_chunk_eval_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_clip_by_norm_op.py b/python/paddle/fluid/tests/unittests/test_clip_by_norm_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_clip_by_norm_op.py rename to python/paddle/fluid/tests/unittests/test_clip_by_norm_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_clip_op.py b/python/paddle/fluid/tests/unittests/test_clip_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_clip_op.py rename to python/paddle/fluid/tests/unittests/test_clip_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_compare_op.py b/python/paddle/fluid/tests/unittests/test_compare_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_compare_op.py rename to python/paddle/fluid/tests/unittests/test_compare_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_concat_op.py b/python/paddle/fluid/tests/unittests/test_concat_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_concat_op.py rename to python/paddle/fluid/tests/unittests/test_concat_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_cond_op.py b/python/paddle/fluid/tests/unittests/test_cond_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_cond_op.py rename to python/paddle/fluid/tests/unittests/test_cond_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_conditional_block.py b/python/paddle/fluid/tests/unittests/test_conditional_block.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_conditional_block.py rename to python/paddle/fluid/tests/unittests/test_conditional_block.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_const_value.py b/python/paddle/fluid/tests/unittests/test_const_value.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_const_value.py rename to python/paddle/fluid/tests/unittests/test_const_value.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_conv2d_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_conv2d_op.py rename to python/paddle/fluid/tests/unittests/test_conv2d_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_conv2d_transpose_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_conv2d_transpose_op.py rename to python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_conv3d_op.py b/python/paddle/fluid/tests/unittests/test_conv3d_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_conv3d_op.py rename to python/paddle/fluid/tests/unittests/test_conv3d_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_conv3d_transpose_op.py b/python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_conv3d_transpose_op.py rename to python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_conv_shift_op.py b/python/paddle/fluid/tests/unittests/test_conv_shift_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_conv_shift_op.py rename to python/paddle/fluid/tests/unittests/test_conv_shift_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_cos_sim_op.py b/python/paddle/fluid/tests/unittests/test_cos_sim_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_cos_sim_op.py rename to python/paddle/fluid/tests/unittests/test_cos_sim_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_create_op_doc_string.py b/python/paddle/fluid/tests/unittests/test_create_op_doc_string.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_create_op_doc_string.py rename to python/paddle/fluid/tests/unittests/test_create_op_doc_string.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_crf_decoding_op.py b/python/paddle/fluid/tests/unittests/test_crf_decoding_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_crf_decoding_op.py rename to python/paddle/fluid/tests/unittests/test_crf_decoding_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_crop_op.py b/python/paddle/fluid/tests/unittests/test_crop_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_crop_op.py rename to python/paddle/fluid/tests/unittests/test_crop_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_cross_entropy_op.py b/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_cross_entropy_op.py rename to python/paddle/fluid/tests/unittests/test_cross_entropy_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_ctc_align.py b/python/paddle/fluid/tests/unittests/test_ctc_align.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_ctc_align.py rename to python/paddle/fluid/tests/unittests/test_ctc_align.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_cumsum_op.py b/python/paddle/fluid/tests/unittests/test_cumsum_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_cumsum_op.py rename to python/paddle/fluid/tests/unittests/test_cumsum_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_decayed_adagrad_op.py b/python/paddle/fluid/tests/unittests/test_decayed_adagrad_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_decayed_adagrad_op.py rename to python/paddle/fluid/tests/unittests/test_decayed_adagrad_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_default_scope_funcs.py b/python/paddle/fluid/tests/unittests/test_default_scope_funcs.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_default_scope_funcs.py rename to python/paddle/fluid/tests/unittests/test_default_scope_funcs.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_detection_map_op.py b/python/paddle/fluid/tests/unittests/test_detection_map_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_detection_map_op.py rename to python/paddle/fluid/tests/unittests/test_detection_map_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_detection_output_op.py b/python/paddle/fluid/tests/unittests/test_detection_output_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_detection_output_op.py rename to python/paddle/fluid/tests/unittests/test_detection_output_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_dropout_op.py b/python/paddle/fluid/tests/unittests/test_dropout_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_dropout_op.py rename to python/paddle/fluid/tests/unittests/test_dropout_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_dyn_rnn.py b/python/paddle/fluid/tests/unittests/test_dyn_rnn.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_dyn_rnn.py rename to python/paddle/fluid/tests/unittests/test_dyn_rnn.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_dynrnn_gradient_check.py b/python/paddle/fluid/tests/unittests/test_dynrnn_gradient_check.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_dynrnn_gradient_check.py rename to python/paddle/fluid/tests/unittests/test_dynrnn_gradient_check.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_dynrnn_static_input.py b/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_dynrnn_static_input.py rename to python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_edit_distance_op.py b/python/paddle/fluid/tests/unittests/test_edit_distance_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_edit_distance_op.py rename to python/paddle/fluid/tests/unittests/test_edit_distance_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_elementwise_add_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_elementwise_add_op.py rename to python/paddle/fluid/tests/unittests/test_elementwise_add_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_elementwise_div_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_elementwise_div_op.py rename to python/paddle/fluid/tests/unittests/test_elementwise_div_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_elementwise_max_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_max_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_elementwise_max_op.py rename to python/paddle/fluid/tests/unittests/test_elementwise_max_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_elementwise_min_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_min_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_elementwise_min_op.py rename to python/paddle/fluid/tests/unittests/test_elementwise_min_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_elementwise_mul_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_elementwise_mul_op.py rename to python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_elementwise_pow_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_pow_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_elementwise_pow_op.py rename to python/paddle/fluid/tests/unittests/test_elementwise_pow_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_elementwise_sub_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_sub_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_elementwise_sub_op.py rename to python/paddle/fluid/tests/unittests/test_elementwise_sub_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_exception.py b/python/paddle/fluid/tests/unittests/test_exception.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_exception.py rename to python/paddle/fluid/tests/unittests/test_exception.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_executor_and_mul.py b/python/paddle/fluid/tests/unittests/test_executor_and_mul.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_executor_and_mul.py rename to python/paddle/fluid/tests/unittests/test_executor_and_mul.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_expand_op.py b/python/paddle/fluid/tests/unittests/test_expand_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_expand_op.py rename to python/paddle/fluid/tests/unittests/test_expand_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_feed_fetch_method.py b/python/paddle/fluid/tests/unittests/test_feed_fetch_method.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_feed_fetch_method.py rename to python/paddle/fluid/tests/unittests/test_feed_fetch_method.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_fetch_var.py b/python/paddle/fluid/tests/unittests/test_fetch_var.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_fetch_var.py rename to python/paddle/fluid/tests/unittests/test_fetch_var.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py b/python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py rename to python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_fill_constant_op.py b/python/paddle/fluid/tests/unittests/test_fill_constant_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_fill_constant_op.py rename to python/paddle/fluid/tests/unittests/test_fill_constant_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_fill_op.py b/python/paddle/fluid/tests/unittests/test_fill_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_fill_op.py rename to python/paddle/fluid/tests/unittests/test_fill_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_fill_zeros_like_op.py b/python/paddle/fluid/tests/unittests/test_fill_zeros_like_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_fill_zeros_like_op.py rename to python/paddle/fluid/tests/unittests/test_fill_zeros_like_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_framework_debug_str.py b/python/paddle/fluid/tests/unittests/test_framework_debug_str.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_framework_debug_str.py rename to python/paddle/fluid/tests/unittests/test_framework_debug_str.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_ftrl_op.py b/python/paddle/fluid/tests/unittests/test_ftrl_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_ftrl_op.py rename to python/paddle/fluid/tests/unittests/test_ftrl_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_gather_op.py b/python/paddle/fluid/tests/unittests/test_gather_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_gather_op.py rename to python/paddle/fluid/tests/unittests/test_gather_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_gaussian_random_batch_size_like_op.py b/python/paddle/fluid/tests/unittests/test_gaussian_random_batch_size_like_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_gaussian_random_batch_size_like_op.py rename to python/paddle/fluid/tests/unittests/test_gaussian_random_batch_size_like_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_gaussian_random_op.py b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_gaussian_random_op.py rename to python/paddle/fluid/tests/unittests/test_gaussian_random_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_get_places_op.py b/python/paddle/fluid/tests/unittests/test_get_places_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_get_places_op.py rename to python/paddle/fluid/tests/unittests/test_get_places_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_gru_op.py b/python/paddle/fluid/tests/unittests/test_gru_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_gru_op.py rename to python/paddle/fluid/tests/unittests/test_gru_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_gru_unit_op.py b/python/paddle/fluid/tests/unittests/test_gru_unit_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_gru_unit_op.py rename to python/paddle/fluid/tests/unittests/test_gru_unit_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_hinge_loss_op.py b/python/paddle/fluid/tests/unittests/test_hinge_loss_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_hinge_loss_op.py rename to python/paddle/fluid/tests/unittests/test_hinge_loss_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_huber_loss_op.py b/python/paddle/fluid/tests/unittests/test_huber_loss_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_huber_loss_op.py rename to python/paddle/fluid/tests/unittests/test_huber_loss_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_im2sequence_op.py b/python/paddle/fluid/tests/unittests/test_im2sequence_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_im2sequence_op.py rename to python/paddle/fluid/tests/unittests/test_im2sequence_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_image_classification_layer.py b/python/paddle/fluid/tests/unittests/test_image_classification_layer.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_image_classification_layer.py rename to python/paddle/fluid/tests/unittests/test_image_classification_layer.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_infer_shape.py b/python/paddle/fluid/tests/unittests/test_infer_shape.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_infer_shape.py rename to python/paddle/fluid/tests/unittests/test_infer_shape.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_inference_model_io.py b/python/paddle/fluid/tests/unittests/test_inference_model_io.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_inference_model_io.py rename to python/paddle/fluid/tests/unittests/test_inference_model_io.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_initializer.py b/python/paddle/fluid/tests/unittests/test_initializer.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_initializer.py rename to python/paddle/fluid/tests/unittests/test_initializer.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_iou_similarity_op.py b/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_iou_similarity_op.py rename to python/paddle/fluid/tests/unittests/test_iou_similarity_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_is_empty_op.py b/python/paddle/fluid/tests/unittests/test_is_empty_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_is_empty_op.py rename to python/paddle/fluid/tests/unittests/test_is_empty_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_l1_norm_op.py b/python/paddle/fluid/tests/unittests/test_l1_norm_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_l1_norm_op.py rename to python/paddle/fluid/tests/unittests/test_l1_norm_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_label_smooth_op.py b/python/paddle/fluid/tests/unittests/test_label_smooth_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_label_smooth_op.py rename to python/paddle/fluid/tests/unittests/test_label_smooth_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_layer_norm_op.py b/python/paddle/fluid/tests/unittests/test_layer_norm_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_layer_norm_op.py rename to python/paddle/fluid/tests/unittests/test_layer_norm_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_layers.py rename to python/paddle/fluid/tests/unittests/test_layers.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_learning_rate_decay.py b/python/paddle/fluid/tests/unittests/test_learning_rate_decay.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_learning_rate_decay.py rename to python/paddle/fluid/tests/unittests/test_learning_rate_decay.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_linear_chain_crf_op.py b/python/paddle/fluid/tests/unittests/test_linear_chain_crf_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_linear_chain_crf_op.py rename to python/paddle/fluid/tests/unittests/test_linear_chain_crf_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_lod_array_length_op.py b/python/paddle/fluid/tests/unittests/test_lod_array_length_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_lod_array_length_op.py rename to python/paddle/fluid/tests/unittests/test_lod_array_length_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_lod_rank_table.py b/python/paddle/fluid/tests/unittests/test_lod_rank_table.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_lod_rank_table.py rename to python/paddle/fluid/tests/unittests/test_lod_rank_table.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_lod_reset_op.py b/python/paddle/fluid/tests/unittests/test_lod_reset_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_lod_reset_op.py rename to python/paddle/fluid/tests/unittests/test_lod_reset_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_lod_tensor_array.py b/python/paddle/fluid/tests/unittests/test_lod_tensor_array.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_lod_tensor_array.py rename to python/paddle/fluid/tests/unittests/test_lod_tensor_array.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_lod_tensor_array_ops.py b/python/paddle/fluid/tests/unittests/test_lod_tensor_array_ops.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_lod_tensor_array_ops.py rename to python/paddle/fluid/tests/unittests/test_lod_tensor_array_ops.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_log_loss_op.py b/python/paddle/fluid/tests/unittests/test_log_loss_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_log_loss_op.py rename to python/paddle/fluid/tests/unittests/test_log_loss_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_logical_op.py b/python/paddle/fluid/tests/unittests/test_logical_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_logical_op.py rename to python/paddle/fluid/tests/unittests/test_logical_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_lookup_table_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_lookup_table_op.py rename to python/paddle/fluid/tests/unittests/test_lookup_table_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_lrn_op.py b/python/paddle/fluid/tests/unittests/test_lrn_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_lrn_op.py rename to python/paddle/fluid/tests/unittests/test_lrn_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_lstm_op.py b/python/paddle/fluid/tests/unittests/test_lstm_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_lstm_op.py rename to python/paddle/fluid/tests/unittests/test_lstm_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_lstm_unit_op.py b/python/paddle/fluid/tests/unittests/test_lstm_unit_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_lstm_unit_op.py rename to python/paddle/fluid/tests/unittests/test_lstm_unit_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_lstmp_op.py b/python/paddle/fluid/tests/unittests/test_lstmp_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_lstmp_op.py rename to python/paddle/fluid/tests/unittests/test_lstmp_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_margin_rank_loss_op.py b/python/paddle/fluid/tests/unittests/test_margin_rank_loss_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_margin_rank_loss_op.py rename to python/paddle/fluid/tests/unittests/test_margin_rank_loss_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_math_op_patch.py b/python/paddle/fluid/tests/unittests/test_math_op_patch.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_math_op_patch.py rename to python/paddle/fluid/tests/unittests/test_math_op_patch.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_matmul_op.py b/python/paddle/fluid/tests/unittests/test_matmul_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_matmul_op.py rename to python/paddle/fluid/tests/unittests/test_matmul_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_maxout_op.py b/python/paddle/fluid/tests/unittests/test_maxout_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_maxout_op.py rename to python/paddle/fluid/tests/unittests/test_maxout_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_mean_op.py b/python/paddle/fluid/tests/unittests/test_mean_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_mean_op.py rename to python/paddle/fluid/tests/unittests/test_mean_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_memory_optimization_transpiler.py b/python/paddle/fluid/tests/unittests/test_memory_optimization_transpiler.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_memory_optimization_transpiler.py rename to python/paddle/fluid/tests/unittests/test_memory_optimization_transpiler.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_mine_hard_examples_op.py b/python/paddle/fluid/tests/unittests/test_mine_hard_examples_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_mine_hard_examples_op.py rename to python/paddle/fluid/tests/unittests/test_mine_hard_examples_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_minus_op.py b/python/paddle/fluid/tests/unittests/test_minus_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_minus_op.py rename to python/paddle/fluid/tests/unittests/test_minus_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_modified_huber_loss_op.py b/python/paddle/fluid/tests/unittests/test_modified_huber_loss_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_modified_huber_loss_op.py rename to python/paddle/fluid/tests/unittests/test_modified_huber_loss_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_momentum_op.py b/python/paddle/fluid/tests/unittests/test_momentum_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_momentum_op.py rename to python/paddle/fluid/tests/unittests/test_momentum_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_mul_op.py b/python/paddle/fluid/tests/unittests/test_mul_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_mul_op.py rename to python/paddle/fluid/tests/unittests/test_mul_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_multiclass_nms_op.py b/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_multiclass_nms_op.py rename to python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_multihead_attention.py b/python/paddle/fluid/tests/unittests/test_multihead_attention.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_multihead_attention.py rename to python/paddle/fluid/tests/unittests/test_multihead_attention.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_multiplex_op.py b/python/paddle/fluid/tests/unittests/test_multiplex_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_multiplex_op.py rename to python/paddle/fluid/tests/unittests/test_multiplex_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_nce.py b/python/paddle/fluid/tests/unittests/test_nce.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_nce.py rename to python/paddle/fluid/tests/unittests/test_nce.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_net.py b/python/paddle/fluid/tests/unittests/test_net.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_net.py rename to python/paddle/fluid/tests/unittests/test_net.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_norm_op.py b/python/paddle/fluid/tests/unittests/test_norm_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_norm_op.py rename to python/paddle/fluid/tests/unittests/test_norm_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_normalization_wrapper.py b/python/paddle/fluid/tests/unittests/test_normalization_wrapper.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_normalization_wrapper.py rename to python/paddle/fluid/tests/unittests/test_normalization_wrapper.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_one_hot_op.py b/python/paddle/fluid/tests/unittests/test_one_hot_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_one_hot_op.py rename to python/paddle/fluid/tests/unittests/test_one_hot_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_op_support_gpu.py b/python/paddle/fluid/tests/unittests/test_op_support_gpu.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_op_support_gpu.py rename to python/paddle/fluid/tests/unittests/test_op_support_gpu.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_operator.py b/python/paddle/fluid/tests/unittests/test_operator.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_operator.py rename to python/paddle/fluid/tests/unittests/test_operator.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_operator_desc.py b/python/paddle/fluid/tests/unittests/test_operator_desc.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_operator_desc.py rename to python/paddle/fluid/tests/unittests/test_operator_desc.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_optimizer.py b/python/paddle/fluid/tests/unittests/test_optimizer.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_optimizer.py rename to python/paddle/fluid/tests/unittests/test_optimizer.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_pad_op.py b/python/paddle/fluid/tests/unittests/test_pad_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_pad_op.py rename to python/paddle/fluid/tests/unittests/test_pad_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_parallel_op.py b/python/paddle/fluid/tests/unittests/test_parallel_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_parallel_op.py rename to python/paddle/fluid/tests/unittests/test_parallel_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_parameter.py b/python/paddle/fluid/tests/unittests/test_parameter.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_parameter.py rename to python/paddle/fluid/tests/unittests/test_parameter.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_pool2d_op.py b/python/paddle/fluid/tests/unittests/test_pool2d_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_pool2d_op.py rename to python/paddle/fluid/tests/unittests/test_pool2d_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_pool3d_op.py b/python/paddle/fluid/tests/unittests/test_pool3d_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_pool3d_op.py rename to python/paddle/fluid/tests/unittests/test_pool3d_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_pool_max_op.py b/python/paddle/fluid/tests/unittests/test_pool_max_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_pool_max_op.py rename to python/paddle/fluid/tests/unittests/test_pool_max_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_positive_negative_pair_op.py b/python/paddle/fluid/tests/unittests/test_positive_negative_pair_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_positive_negative_pair_op.py rename to python/paddle/fluid/tests/unittests/test_positive_negative_pair_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_precision_recall_op.py b/python/paddle/fluid/tests/unittests/test_precision_recall_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_precision_recall_op.py rename to python/paddle/fluid/tests/unittests/test_precision_recall_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_prelu_op.py b/python/paddle/fluid/tests/unittests/test_prelu_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_prelu_op.py rename to python/paddle/fluid/tests/unittests/test_prelu_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_print_op.py b/python/paddle/fluid/tests/unittests/test_print_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_print_op.py rename to python/paddle/fluid/tests/unittests/test_print_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_prior_box_op.py b/python/paddle/fluid/tests/unittests/test_prior_box_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_prior_box_op.py rename to python/paddle/fluid/tests/unittests/test_prior_box_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_profiler.py b/python/paddle/fluid/tests/unittests/test_profiler.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_profiler.py rename to python/paddle/fluid/tests/unittests/test_profiler.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_program.py b/python/paddle/fluid/tests/unittests/test_program.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_program.py rename to python/paddle/fluid/tests/unittests/test_program.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_protobuf.py b/python/paddle/fluid/tests/unittests/test_protobuf.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_protobuf.py rename to python/paddle/fluid/tests/unittests/test_protobuf.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_protobuf_descs.py b/python/paddle/fluid/tests/unittests/test_protobuf_descs.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_protobuf_descs.py rename to python/paddle/fluid/tests/unittests/test_protobuf_descs.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_proximal_adagrad_op.py b/python/paddle/fluid/tests/unittests/test_proximal_adagrad_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_proximal_adagrad_op.py rename to python/paddle/fluid/tests/unittests/test_proximal_adagrad_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_proximal_gd_op.py b/python/paddle/fluid/tests/unittests/test_proximal_gd_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_proximal_gd_op.py rename to python/paddle/fluid/tests/unittests/test_proximal_gd_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_rank_loss_op.py b/python/paddle/fluid/tests/unittests/test_rank_loss_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_rank_loss_op.py rename to python/paddle/fluid/tests/unittests/test_rank_loss_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_recurrent_op.py b/python/paddle/fluid/tests/unittests/test_recurrent_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_recurrent_op.py rename to python/paddle/fluid/tests/unittests/test_recurrent_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_recv_op.py b/python/paddle/fluid/tests/unittests/test_recv_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_recv_op.py rename to python/paddle/fluid/tests/unittests/test_recv_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_reduce_op.py b/python/paddle/fluid/tests/unittests/test_reduce_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_reduce_op.py rename to python/paddle/fluid/tests/unittests/test_reduce_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_registry.py b/python/paddle/fluid/tests/unittests/test_registry.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_registry.py rename to python/paddle/fluid/tests/unittests/test_registry.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_regularizer.py b/python/paddle/fluid/tests/unittests/test_regularizer.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_regularizer.py rename to python/paddle/fluid/tests/unittests/test_regularizer.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_reorder_lod_tensor.py b/python/paddle/fluid/tests/unittests/test_reorder_lod_tensor.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_reorder_lod_tensor.py rename to python/paddle/fluid/tests/unittests/test_reorder_lod_tensor.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_reshape_op.py b/python/paddle/fluid/tests/unittests/test_reshape_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_reshape_op.py rename to python/paddle/fluid/tests/unittests/test_reshape_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_rmsprop_op.py b/python/paddle/fluid/tests/unittests/test_rmsprop_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_rmsprop_op.py rename to python/paddle/fluid/tests/unittests/test_rmsprop_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_rnn_memory_helper_op.py b/python/paddle/fluid/tests/unittests/test_rnn_memory_helper_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_rnn_memory_helper_op.py rename to python/paddle/fluid/tests/unittests/test_rnn_memory_helper_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_roi_pool_op.py b/python/paddle/fluid/tests/unittests/test_roi_pool_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_roi_pool_op.py rename to python/paddle/fluid/tests/unittests/test_roi_pool_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_row_conv_op.py b/python/paddle/fluid/tests/unittests/test_row_conv_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_row_conv_op.py rename to python/paddle/fluid/tests/unittests/test_row_conv_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_scale_op.py b/python/paddle/fluid/tests/unittests/test_scale_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_scale_op.py rename to python/paddle/fluid/tests/unittests/test_scale_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_scatter_op.py b/python/paddle/fluid/tests/unittests/test_scatter_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_scatter_op.py rename to python/paddle/fluid/tests/unittests/test_scatter_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_scope.py b/python/paddle/fluid/tests/unittests/test_scope.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_scope.py rename to python/paddle/fluid/tests/unittests/test_scope.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_selected_rows.py b/python/paddle/fluid/tests/unittests/test_selected_rows.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_selected_rows.py rename to python/paddle/fluid/tests/unittests/test_selected_rows.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_seq_concat_op.py b/python/paddle/fluid/tests/unittests/test_seq_concat_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_seq_concat_op.py rename to python/paddle/fluid/tests/unittests/test_seq_concat_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_seq_conv.py b/python/paddle/fluid/tests/unittests/test_seq_conv.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_seq_conv.py rename to python/paddle/fluid/tests/unittests/test_seq_conv.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_seq_pool.py b/python/paddle/fluid/tests/unittests/test_seq_pool.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_seq_pool.py rename to python/paddle/fluid/tests/unittests/test_seq_pool.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_sequence_erase_op.py b/python/paddle/fluid/tests/unittests/test_sequence_erase_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_sequence_erase_op.py rename to python/paddle/fluid/tests/unittests/test_sequence_erase_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_sequence_expand.py b/python/paddle/fluid/tests/unittests/test_sequence_expand.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_sequence_expand.py rename to python/paddle/fluid/tests/unittests/test_sequence_expand.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_sequence_reshape.py b/python/paddle/fluid/tests/unittests/test_sequence_reshape.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_sequence_reshape.py rename to python/paddle/fluid/tests/unittests/test_sequence_reshape.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_sequence_slice_op.py b/python/paddle/fluid/tests/unittests/test_sequence_slice_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_sequence_slice_op.py rename to python/paddle/fluid/tests/unittests/test_sequence_slice_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_sequence_softmax_op.py b/python/paddle/fluid/tests/unittests/test_sequence_softmax_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_sequence_softmax_op.py rename to python/paddle/fluid/tests/unittests/test_sequence_softmax_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_sgd_op.py b/python/paddle/fluid/tests/unittests/test_sgd_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_sgd_op.py rename to python/paddle/fluid/tests/unittests/test_sgd_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_shrink_rnn_memory.py b/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_shrink_rnn_memory.py rename to python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py b/python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py rename to python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_sign_op.py b/python/paddle/fluid/tests/unittests/test_sign_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_sign_op.py rename to python/paddle/fluid/tests/unittests/test_sign_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_smooth_l1_loss_op.py b/python/paddle/fluid/tests/unittests/test_smooth_l1_loss_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_smooth_l1_loss_op.py rename to python/paddle/fluid/tests/unittests/test_smooth_l1_loss_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_softmax_op.py b/python/paddle/fluid/tests/unittests/test_softmax_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_softmax_op.py rename to python/paddle/fluid/tests/unittests/test_softmax_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py b/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py rename to python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py b/python/paddle/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py rename to python/paddle/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_split_op.py b/python/paddle/fluid/tests/unittests/test_split_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_split_op.py rename to python/paddle/fluid/tests/unittests/test_split_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_split_selected_rows_op.py b/python/paddle/fluid/tests/unittests/test_split_selected_rows_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_split_selected_rows_op.py rename to python/paddle/fluid/tests/unittests/test_split_selected_rows_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_split_var.py b/python/paddle/fluid/tests/unittests/test_split_var.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_split_var.py rename to python/paddle/fluid/tests/unittests/test_split_var.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_spp_op.py b/python/paddle/fluid/tests/unittests/test_spp_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_spp_op.py rename to python/paddle/fluid/tests/unittests/test_spp_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_squared_l2_distance_op.py b/python/paddle/fluid/tests/unittests/test_squared_l2_distance_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_squared_l2_distance_op.py rename to python/paddle/fluid/tests/unittests/test_squared_l2_distance_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_squared_l2_norm_op.py b/python/paddle/fluid/tests/unittests/test_squared_l2_norm_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_squared_l2_norm_op.py rename to python/paddle/fluid/tests/unittests/test_squared_l2_norm_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_sum_op.py b/python/paddle/fluid/tests/unittests/test_sum_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_sum_op.py rename to python/paddle/fluid/tests/unittests/test_sum_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_switch.py b/python/paddle/fluid/tests/unittests/test_switch.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_switch.py rename to python/paddle/fluid/tests/unittests/test_switch.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_target_assign_op.py b/python/paddle/fluid/tests/unittests/test_target_assign_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_target_assign_op.py rename to python/paddle/fluid/tests/unittests/test_target_assign_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_tensor.py b/python/paddle/fluid/tests/unittests/test_tensor.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_tensor.py rename to python/paddle/fluid/tests/unittests/test_tensor.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_top_k_op.py b/python/paddle/fluid/tests/unittests/test_top_k_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_top_k_op.py rename to python/paddle/fluid/tests/unittests/test_top_k_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_transpose_op.py b/python/paddle/fluid/tests/unittests/test_transpose_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_transpose_op.py rename to python/paddle/fluid/tests/unittests/test_transpose_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_uniform_random_batch_size_like_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_batch_size_like_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_uniform_random_batch_size_like_op.py rename to python/paddle/fluid/tests/unittests/test_uniform_random_batch_size_like_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_uniform_random_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_uniform_random_op.py rename to python/paddle/fluid/tests/unittests/test_uniform_random_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_unique_name.py b/python/paddle/fluid/tests/unittests/test_unique_name.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_unique_name.py rename to python/paddle/fluid/tests/unittests/test_unique_name.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_unpool_op.py b/python/paddle/fluid/tests/unittests/test_unpool_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_unpool_op.py rename to python/paddle/fluid/tests/unittests/test_unpool_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_variable.py b/python/paddle/fluid/tests/unittests/test_variable.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_variable.py rename to python/paddle/fluid/tests/unittests/test_variable.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_warpctc_op.py b/python/paddle/fluid/tests/unittests/test_warpctc_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_warpctc_op.py rename to python/paddle/fluid/tests/unittests/test_warpctc_op.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_weight_normalization.py b/python/paddle/fluid/tests/unittests/test_weight_normalization.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_weight_normalization.py rename to python/paddle/fluid/tests/unittests/test_weight_normalization.py diff --git a/python/paddle/v2/fluid/tests/unittests/test_while_op.py b/python/paddle/fluid/tests/unittests/test_while_op.py similarity index 100% rename from python/paddle/v2/fluid/tests/unittests/test_while_op.py rename to python/paddle/fluid/tests/unittests/test_while_op.py diff --git a/python/paddle/v2/fluid/unique_name.py b/python/paddle/fluid/unique_name.py similarity index 100% rename from python/paddle/v2/fluid/unique_name.py rename to python/paddle/fluid/unique_name.py -- GitLab From 3b8bade61776d38686944ce9ebdf9b50e6bb9b9c Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sat, 24 Feb 2018 17:27:33 +0800 Subject: [PATCH 167/217] init learning_rate_map when input learning rate is a Variable --- python/paddle/v2/fluid/optimizer.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/python/paddle/v2/fluid/optimizer.py b/python/paddle/v2/fluid/optimizer.py index 0b3e019d80..9309ec3916 100644 --- a/python/paddle/v2/fluid/optimizer.py +++ b/python/paddle/v2/fluid/optimizer.py @@ -45,6 +45,9 @@ class Optimizer(object): # each program should have a independent learning rate # program -> Variable(learning_rate) self._learning_rate_map = defaultdict(lambda: None) + if isinstance(self._learning_rate, framework.Variable): + self._learning_rate_map[framework.default_main_program( + )] = self._learning_rate # Dictionary of accumulators. Some optimizer subclasses need to # allocate and manage extra variables associated with the parameters # to train. These variables are called accumulators. -- GitLab From f7fa7c57c812b59ece75f5143e12298aa843ad93 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Sat, 24 Feb 2018 17:32:13 +0800 Subject: [PATCH 168/217] modify related build code after the move --- python/CMakeLists.txt | 12 +++++++----- python/setup.py.in | 12 ++++++------ 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 36919ab00b..0d497dcfce 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -3,12 +3,14 @@ file(GLOB TRAINER_PY_FILES . ./paddle/trainer/*.py) file(GLOB HELPERS_PY_FILES . ./paddle/trainer_config_helpers/*.py) file(GLOB UTILS_PY_FILES . ./paddle/utils/*.py) file(GLOB_RECURSE V2_PY_FILES ./paddle/v2/ *.py) +file(GLOB_RECURSE FLUID_PY_FILES ./paddle/fluid/ *.py) set(PY_FILES paddle/__init__.py ${TRAINER_PY_FILES} ${HELPERS_PY_FILES} ${UTILS_PY_FILES} - ${V2_PY_FILES}) + ${V2_PY_FILES} + ${FLUID_PY_FILES}) add_custom_target(copy_paddle_master) @@ -43,10 +45,10 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in ${CMAKE_CURRENT_BINARY_DIR}/setup.py) -add_custom_command(OUTPUT ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/core.so - COMMAND cmake -E copy $ ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/core.so +add_custom_command(OUTPUT ${PADDLE_SOURCE_DIR}/python/paddle/fluid/core.so + COMMAND cmake -E copy $ ${PADDLE_SOURCE_DIR}/python/paddle/fluid/core.so DEPENDS paddle_pybind) -add_custom_target(copy_paddle_pybind ALL DEPENDS ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/core.so) +add_custom_target(copy_paddle_pybind ALL DEPENDS ${PADDLE_SOURCE_DIR}/python/paddle/fluid/core.so) add_custom_command(OUTPUT ${PADDLE_PYTHON_BUILD_DIR}/.timestamp @@ -72,7 +74,7 @@ if (WITH_TESTING) add_subdirectory(paddle/v2/tests) add_subdirectory(paddle/v2/reader/tests) add_subdirectory(paddle/v2/plot/tests) - add_subdirectory(paddle/v2/fluid/tests) + add_subdirectory(paddle/fluid/tests) endif() endif() install(DIRECTORY ${PADDLE_PYTHON_PACKAGE_DIR} diff --git a/python/setup.py.in b/python/setup.py.in index 5a0d999954..6fff1bb09e 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -71,9 +71,9 @@ packages=['paddle', 'paddle.v2.reader', 'paddle.v2.master', 'paddle.v2.plot', - 'paddle.v2.fluid', - 'paddle.v2.fluid.proto', - 'paddle.v2.fluid.layers', + 'paddle.fluid', + 'paddle.fluid.proto', + 'paddle.fluid.layers', 'py_paddle'] with open('@PADDLE_SOURCE_DIR@/python/requirements.txt') as f: @@ -102,14 +102,14 @@ setup(name='${PACKAGE_NAME}', ext_modules=[Extension('_foo', ['stub.cc'])], package_data={ 'paddle.v2.master': ['libpaddle_master.so'], - 'paddle.v2.fluid': ['core.so'], + 'paddle.fluid': ['core.so'], 'py_paddle':['*.py','_swig_paddle.so'] }, package_dir={ '': '${CMAKE_CURRENT_SOURCE_DIR}', - # The paddle.v2.fluid.proto will be generated while compiling. + # The paddle.fluid.proto will be generated while compiling. # So that package points to other directory. - 'paddle.v2.fluid.proto': '${PADDLE_BINARY_DIR}/paddle/fluid/framework', + 'paddle.fluid.proto': '${PADDLE_BINARY_DIR}/paddle/fluid/framework', 'py_paddle': '${PADDLE_SOURCE_DIR}/paddle/py_paddle' }, scripts=paddle_bins, -- GitLab From bde090a97564b9c61a6aaa38b72ccc4889d102d9 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Sat, 24 Feb 2018 18:00:01 +0800 Subject: [PATCH 169/217] replace paddle.v2.fluid by paddle.fluid in tests --- python/paddle/fluid/backward.py | 2 +- python/paddle/fluid/default_scope_funcs.py | 6 +++--- python/paddle/fluid/io.py | 4 ++-- python/paddle/fluid/layer_helper.py | 2 +- python/paddle/fluid/net_drawer.py | 4 ++-- python/paddle/fluid/op.py | 4 ++-- .../fluid/tests/book/notest_rnn_encoder_decoer.py | 10 +++++----- python/paddle/fluid/tests/book/test_fit_a_line.py | 2 +- .../fluid/tests/book/test_image_classification.py | 2 +- .../fluid/tests/book/test_label_semantic_roles.py | 4 ++-- .../fluid/tests/book/test_machine_translation.py | 8 ++++---- .../paddle/fluid/tests/book/test_recognize_digits.py | 2 +- .../fluid/tests/book/test_recommender_system.py | 12 ++++++------ .../fluid/tests/book/test_understand_sentiment.py | 2 +- python/paddle/fluid/tests/book/test_word2vec.py | 2 +- .../tests/book_distribute/notest_dist_fit_a_line.py | 2 +- .../notest_dist_image_classification.py | 2 +- .../notest_dist_label_semantic_roles.py | 2 +- .../tests/book_distribute/notest_dist_word2vec.py | 2 +- .../book_distribute/notest_machine_translation.py | 10 +++++----- .../notest_recognize_digits_conv_dist.py | 2 +- .../notest_recognize_digits_mlp_dist.py | 2 +- .../notest_recommender_system_dist.py | 10 +++++----- .../notest_understand_sentiment_conv_dist.py | 2 +- .../notest_understand_sentiment_dynamic_lstm.py | 2 +- .../test_memopt_fit_a_line.py | 2 +- .../test_memopt_image_classification_train.py | 2 +- .../test_memopt_machine_translation.py | 10 +++++----- python/paddle/fluid/tests/demo/fc_gan.py | 2 +- python/paddle/fluid/tests/notest_concurrency.py | 6 +++--- python/paddle/fluid/tests/notest_csp.py | 2 +- python/paddle/fluid/tests/test_cpp_reader.py | 2 +- python/paddle/fluid/tests/test_data_feeder.py | 2 +- python/paddle/fluid/tests/test_detection.py | 6 +++--- python/paddle/fluid/tests/test_error_clip.py | 2 +- python/paddle/fluid/tests/test_gradient_clip.py | 2 +- python/paddle/fluid/tests/test_mnist_if_else_op.py | 10 +++++----- .../fluid/tests/test_python_operator_overriding.py | 6 +++--- python/paddle/fluid/tests/unittests/decorators.py | 2 +- python/paddle/fluid/tests/unittests/op_test.py | 10 +++++----- .../paddle/fluid/tests/unittests/test_adagrad_op.py | 4 ++-- python/paddle/fluid/tests/unittests/test_adam_op.py | 4 ++-- .../tests/unittests/test_array_read_write_op.py | 10 +++++----- .../fluid/tests/unittests/test_assign_value_op.py | 6 +++--- .../fluid/tests/unittests/test_batch_norm_op.py | 6 +++--- .../tests/unittests/test_beam_search_decode_op.py | 4 ++-- .../fluid/tests/unittests/test_beam_search_op.py | 4 ++-- .../fluid/tests/unittests/test_calc_gradient.py | 10 +++++----- python/paddle/fluid/tests/unittests/test_cast_op.py | 2 +- python/paddle/fluid/tests/unittests/test_cond_op.py | 4 ++-- .../fluid/tests/unittests/test_conditional_block.py | 10 +++++----- .../paddle/fluid/tests/unittests/test_const_value.py | 2 +- .../paddle/fluid/tests/unittests/test_conv2d_op.py | 2 +- .../tests/unittests/test_conv2d_transpose_op.py | 2 +- .../paddle/fluid/tests/unittests/test_conv3d_op.py | 2 +- .../tests/unittests/test_conv3d_transpose_op.py | 2 +- .../tests/unittests/test_create_op_doc_string.py | 2 +- .../tests/unittests/test_default_scope_funcs.py | 2 +- python/paddle/fluid/tests/unittests/test_dyn_rnn.py | 2 +- .../tests/unittests/test_dynrnn_gradient_check.py | 2 +- .../tests/unittests/test_dynrnn_static_input.py | 10 +++++----- .../paddle/fluid/tests/unittests/test_exception.py | 2 +- .../fluid/tests/unittests/test_executor_and_mul.py | 6 +++--- .../fluid/tests/unittests/test_feed_fetch_method.py | 2 +- .../paddle/fluid/tests/unittests/test_fetch_var.py | 4 ++-- python/paddle/fluid/tests/unittests/test_fill_op.py | 2 +- .../tests/unittests/test_framework_debug_str.py | 2 +- .../fluid/tests/unittests/test_gaussian_random_op.py | 8 ++++---- .../fluid/tests/unittests/test_get_places_op.py | 2 +- .../unittests/test_image_classification_layer.py | 6 +++--- .../paddle/fluid/tests/unittests/test_infer_shape.py | 2 +- .../fluid/tests/unittests/test_inference_model_io.py | 12 ++++++------ .../paddle/fluid/tests/unittests/test_initializer.py | 4 ++-- .../paddle/fluid/tests/unittests/test_is_empty_op.py | 4 ++-- .../fluid/tests/unittests/test_layer_norm_op.py | 6 +++--- python/paddle/fluid/tests/unittests/test_layers.py | 8 ++++---- .../tests/unittests/test_learning_rate_decay.py | 8 ++++---- .../tests/unittests/test_lod_array_length_op.py | 6 +++--- .../fluid/tests/unittests/test_lod_rank_table.py | 6 +++--- .../fluid/tests/unittests/test_lod_tensor_array.py | 2 +- .../tests/unittests/test_lod_tensor_array_ops.py | 10 +++++----- .../fluid/tests/unittests/test_math_op_patch.py | 2 +- .../unittests/test_memory_optimization_transpiler.py | 8 ++++---- .../tests/unittests/test_multihead_attention.py | 4 ++-- python/paddle/fluid/tests/unittests/test_net.py | 4 ++-- .../tests/unittests/test_normalization_wrapper.py | 4 ++-- .../paddle/fluid/tests/unittests/test_one_hot_op.py | 8 ++++---- .../fluid/tests/unittests/test_op_support_gpu.py | 2 +- python/paddle/fluid/tests/unittests/test_operator.py | 4 ++-- .../fluid/tests/unittests/test_operator_desc.py | 4 ++-- .../paddle/fluid/tests/unittests/test_optimizer.py | 6 +++--- .../paddle/fluid/tests/unittests/test_parallel_op.py | 2 +- .../paddle/fluid/tests/unittests/test_parameter.py | 10 +++++----- .../paddle/fluid/tests/unittests/test_pool2d_op.py | 2 +- .../paddle/fluid/tests/unittests/test_pool3d_op.py | 2 +- python/paddle/fluid/tests/unittests/test_print_op.py | 12 ++++++------ python/paddle/fluid/tests/unittests/test_profiler.py | 8 ++++---- python/paddle/fluid/tests/unittests/test_program.py | 4 ++-- python/paddle/fluid/tests/unittests/test_protobuf.py | 2 +- .../fluid/tests/unittests/test_protobuf_descs.py | 2 +- .../fluid/tests/unittests/test_recurrent_op.py | 10 +++++----- python/paddle/fluid/tests/unittests/test_recv_op.py | 4 ++-- python/paddle/fluid/tests/unittests/test_registry.py | 2 +- .../paddle/fluid/tests/unittests/test_regularizer.py | 8 ++++---- .../fluid/tests/unittests/test_reorder_lod_tensor.py | 4 ++-- .../tests/unittests/test_rnn_memory_helper_op.py | 8 ++++---- python/paddle/fluid/tests/unittests/test_scope.py | 10 +++++----- .../fluid/tests/unittests/test_selected_rows.py | 2 +- python/paddle/fluid/tests/unittests/test_sgd_op.py | 4 ++-- .../fluid/tests/unittests/test_shrink_rnn_memory.py | 12 ++++++------ .../unittests/test_split_and_merge_lod_tensor_op.py | 10 +++++----- .../tests/unittests/test_split_selected_rows_op.py | 4 ++-- .../paddle/fluid/tests/unittests/test_split_var.py | 6 +++--- python/paddle/fluid/tests/unittests/test_switch.py | 10 +++++----- python/paddle/fluid/tests/unittests/test_tensor.py | 2 +- .../paddle/fluid/tests/unittests/test_unique_name.py | 2 +- python/paddle/fluid/tests/unittests/test_variable.py | 4 ++-- .../tests/unittests/test_weight_normalization.py | 8 ++++---- python/paddle/fluid/tests/unittests/test_while_op.py | 8 ++++---- 119 files changed, 285 insertions(+), 285 deletions(-) diff --git a/python/paddle/fluid/backward.py b/python/paddle/fluid/backward.py index 4da73bb996..58fa7f1beb 100644 --- a/python/paddle/fluid/backward.py +++ b/python/paddle/fluid/backward.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.v2.fluid import framework as framework +from paddle.fluid import framework as framework from . import core import collections import copy diff --git a/python/paddle/fluid/default_scope_funcs.py b/python/paddle/fluid/default_scope_funcs.py index eeb9fb2043..f8faf69425 100644 --- a/python/paddle/fluid/default_scope_funcs.py +++ b/python/paddle/fluid/default_scope_funcs.py @@ -26,7 +26,7 @@ A `scoped_function` will take a `function` as input. That function will be invoked in a new local scope. """ -import paddle.v2.fluid.core +import paddle.fluid.core import threading __tl_scope__ = threading.local() @@ -44,13 +44,13 @@ __all__ = [ def get_cur_scope(): """ Get current scope. - :rtype: paddle.v2.fluid.core.Scope + :rtype: paddle.fluid.core.Scope """ cur_scope_stack = getattr(__tl_scope__, 'cur_scope', None) if cur_scope_stack is None: __tl_scope__.cur_scope = list() if len(__tl_scope__.cur_scope) == 0: - __tl_scope__.cur_scope.append(paddle.v2.fluid.core.Scope()) + __tl_scope__.cur_scope.append(paddle.fluid.core.Scope()) return __tl_scope__.cur_scope[-1] diff --git a/python/paddle/fluid/io.py b/python/paddle/fluid/io.py index 8a8bd089b5..33f709ece4 100644 --- a/python/paddle/fluid/io.py +++ b/python/paddle/fluid/io.py @@ -14,8 +14,8 @@ import os -from paddle.v2.fluid.evaluator import Evaluator -from paddle.v2.fluid.framework import Program, Parameter, default_main_program, Variable +from paddle.fluid.evaluator import Evaluator +from paddle.fluid.framework import Program, Parameter, default_main_program, Variable from . import core __all__ = [ diff --git a/python/paddle/fluid/layer_helper.py b/python/paddle/fluid/layer_helper.py index dc4f992ddc..6437dbb446 100644 --- a/python/paddle/fluid/layer_helper.py +++ b/python/paddle/fluid/layer_helper.py @@ -17,7 +17,7 @@ import itertools from framework import Variable, Parameter, default_main_program, default_startup_program, dtype_is_floating import unique_name -from paddle.v2.fluid.initializer import Constant, Xavier +from paddle.fluid.initializer import Constant, Xavier from param_attr import ParamAttr, WeightNormParamAttr diff --git a/python/paddle/fluid/net_drawer.py b/python/paddle/fluid/net_drawer.py index 66793a5785..73946a0721 100644 --- a/python/paddle/fluid/net_drawer.py +++ b/python/paddle/fluid/net_drawer.py @@ -17,8 +17,8 @@ import json import logging from collections import defaultdict -import paddle.v2.fluid.core as core -import paddle.v2.fluid.proto.framework_pb2 as framework_pb2 +import paddle.fluid.core as core +import paddle.fluid.proto.framework_pb2 as framework_pb2 logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) diff --git a/python/paddle/fluid/op.py b/python/paddle/fluid/op.py index 6a41370458..0b76e94157 100644 --- a/python/paddle/fluid/op.py +++ b/python/paddle/fluid/op.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle.v2.fluid.core as core -import paddle.v2.fluid.proto.framework_pb2 as framework_pb2 +import paddle.fluid.core as core +import paddle.fluid.proto.framework_pb2 as framework_pb2 def get_all_op_protos(): diff --git a/python/paddle/fluid/tests/book/notest_rnn_encoder_decoer.py b/python/paddle/fluid/tests/book/notest_rnn_encoder_decoer.py index c7db70f1b1..adf38ea289 100644 --- a/python/paddle/fluid/tests/book/notest_rnn_encoder_decoer.py +++ b/python/paddle/fluid/tests/book/notest_rnn_encoder_decoer.py @@ -14,15 +14,15 @@ import numpy as np import paddle.v2 as paddle -import paddle.v2.fluid as fluid -import paddle.v2.fluid.core as core -import paddle.v2.fluid.framework as framework -import paddle.v2.fluid.layers as layers +import paddle.fluid as fluid +import paddle.fluid.core as core +import paddle.fluid.framework as framework +import paddle.fluid.layers as layers import contextlib import math import sys import unittest -from paddle.v2.fluid.executor import Executor +from paddle.fluid.executor import Executor dict_size = 30000 source_dict_dim = target_dict_dim = dict_size diff --git a/python/paddle/fluid/tests/book/test_fit_a_line.py b/python/paddle/fluid/tests/book/test_fit_a_line.py index a66c2c3c2f..0d6fb872d7 100644 --- a/python/paddle/fluid/tests/book/test_fit_a_line.py +++ b/python/paddle/fluid/tests/book/test_fit_a_line.py @@ -13,7 +13,7 @@ # limitations under the License. import paddle.v2 as paddle -import paddle.v2.fluid as fluid +import paddle.fluid as fluid import contextlib import numpy import unittest diff --git a/python/paddle/fluid/tests/book/test_image_classification.py b/python/paddle/fluid/tests/book/test_image_classification.py index 734ab3e4fb..2f6c7d1a70 100644 --- a/python/paddle/fluid/tests/book/test_image_classification.py +++ b/python/paddle/fluid/tests/book/test_image_classification.py @@ -15,7 +15,7 @@ from __future__ import print_function import paddle.v2 as paddle -import paddle.v2.fluid as fluid +import paddle.fluid as fluid import contextlib import math import sys diff --git a/python/paddle/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/fluid/tests/book/test_label_semantic_roles.py index b790246ec1..fcc9dbf8bb 100644 --- a/python/paddle/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/fluid/tests/book/test_label_semantic_roles.py @@ -17,8 +17,8 @@ import math import numpy as np import paddle.v2 as paddle import paddle.v2.dataset.conll05 as conll05 -import paddle.v2.fluid as fluid -from paddle.v2.fluid.initializer import init_on_cpu +import paddle.fluid as fluid +from paddle.fluid.initializer import init_on_cpu import contextlib import time import unittest diff --git a/python/paddle/fluid/tests/book/test_machine_translation.py b/python/paddle/fluid/tests/book/test_machine_translation.py index d3405a9601..287ef7a752 100644 --- a/python/paddle/fluid/tests/book/test_machine_translation.py +++ b/python/paddle/fluid/tests/book/test_machine_translation.py @@ -15,10 +15,10 @@ import contextlib import numpy as np import paddle.v2 as paddle -import paddle.v2.fluid as fluid -import paddle.v2.fluid.framework as framework -import paddle.v2.fluid.layers as pd -from paddle.v2.fluid.executor import Executor +import paddle.fluid as fluid +import paddle.fluid.framework as framework +import paddle.fluid.layers as pd +from paddle.fluid.executor import Executor import unittest dict_size = 30000 diff --git a/python/paddle/fluid/tests/book/test_recognize_digits.py b/python/paddle/fluid/tests/book/test_recognize_digits.py index 2462d425e1..2a778b0453 100644 --- a/python/paddle/fluid/tests/book/test_recognize_digits.py +++ b/python/paddle/fluid/tests/book/test_recognize_digits.py @@ -13,7 +13,7 @@ # limitations under the License. from __future__ import print_function import argparse -import paddle.v2.fluid as fluid +import paddle.fluid as fluid import paddle.v2 as paddle import sys import numpy diff --git a/python/paddle/fluid/tests/book/test_recommender_system.py b/python/paddle/fluid/tests/book/test_recommender_system.py index 1a7d8d57ff..81318823e5 100644 --- a/python/paddle/fluid/tests/book/test_recommender_system.py +++ b/python/paddle/fluid/tests/book/test_recommender_system.py @@ -16,12 +16,12 @@ import math import sys import numpy as np import paddle.v2 as paddle -import paddle.v2.fluid as fluid -import paddle.v2.fluid.framework as framework -import paddle.v2.fluid.layers as layers -import paddle.v2.fluid.nets as nets -from paddle.v2.fluid.executor import Executor -from paddle.v2.fluid.optimizer import SGDOptimizer +import paddle.fluid as fluid +import paddle.fluid.framework as framework +import paddle.fluid.layers as layers +import paddle.fluid.nets as nets +from paddle.fluid.executor import Executor +from paddle.fluid.optimizer import SGDOptimizer IS_SPARSE = True USE_GPU = False diff --git a/python/paddle/fluid/tests/book/test_understand_sentiment.py b/python/paddle/fluid/tests/book/test_understand_sentiment.py index 61f46b51c4..8bb4bf92de 100644 --- a/python/paddle/fluid/tests/book/test_understand_sentiment.py +++ b/python/paddle/fluid/tests/book/test_understand_sentiment.py @@ -14,7 +14,7 @@ from __future__ import print_function import unittest -import paddle.v2.fluid as fluid +import paddle.fluid as fluid import paddle.v2 as paddle import contextlib import math diff --git a/python/paddle/fluid/tests/book/test_word2vec.py b/python/paddle/fluid/tests/book/test_word2vec.py index 9bd8f90c5e..965f1ff851 100644 --- a/python/paddle/fluid/tests/book/test_word2vec.py +++ b/python/paddle/fluid/tests/book/test_word2vec.py @@ -12,7 +12,7 @@ # limitations under the License. import paddle.v2 as paddle -import paddle.v2.fluid as fluid +import paddle.fluid as fluid import unittest import os import numpy as np diff --git a/python/paddle/fluid/tests/book_distribute/notest_dist_fit_a_line.py b/python/paddle/fluid/tests/book_distribute/notest_dist_fit_a_line.py index c443c4e0b7..b5fbffea36 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_dist_fit_a_line.py +++ b/python/paddle/fluid/tests/book_distribute/notest_dist_fit_a_line.py @@ -14,7 +14,7 @@ import numpy as np import paddle.v2 as paddle -import paddle.v2.fluid as fluid +import paddle.fluid as fluid import os x = fluid.layers.data(name='x', shape=[13], dtype='float32') diff --git a/python/paddle/fluid/tests/book_distribute/notest_dist_image_classification.py b/python/paddle/fluid/tests/book_distribute/notest_dist_image_classification.py index 298ecfc386..9f807df882 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_dist_image_classification.py +++ b/python/paddle/fluid/tests/book_distribute/notest_dist_image_classification.py @@ -15,7 +15,7 @@ from __future__ import print_function import paddle.v2 as paddle -import paddle.v2.fluid as fluid +import paddle.fluid as fluid import os import sys diff --git a/python/paddle/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py b/python/paddle/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py index 1210bf1d84..533cdb5a9b 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py +++ b/python/paddle/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py @@ -17,7 +17,7 @@ import math import numpy as np import paddle.v2 as paddle import paddle.v2.dataset.conll05 as conll05 -import paddle.v2.fluid as fluid +import paddle.fluid as fluid import time import os diff --git a/python/paddle/fluid/tests/book_distribute/notest_dist_word2vec.py b/python/paddle/fluid/tests/book_distribute/notest_dist_word2vec.py index 0d5ad98850..bee022bf4e 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_dist_word2vec.py +++ b/python/paddle/fluid/tests/book_distribute/notest_dist_word2vec.py @@ -15,7 +15,7 @@ from __future__ import print_function import numpy as np import paddle.v2 as paddle -import paddle.v2.fluid as fluid +import paddle.fluid as fluid import os PASS_NUM = 100 diff --git a/python/paddle/fluid/tests/book_distribute/notest_machine_translation.py b/python/paddle/fluid/tests/book_distribute/notest_machine_translation.py index 15d2d40979..243d7f5073 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_machine_translation.py +++ b/python/paddle/fluid/tests/book_distribute/notest_machine_translation.py @@ -14,11 +14,11 @@ import numpy as np import paddle.v2 as paddle -import paddle.v2.fluid as fluid -import paddle.v2.fluid.core as core -import paddle.v2.fluid.framework as framework -import paddle.v2.fluid.layers as layers -from paddle.v2.fluid.executor import Executor +import paddle.fluid as fluid +import paddle.fluid.core as core +import paddle.fluid.framework as framework +import paddle.fluid.layers as layers +from paddle.fluid.executor import Executor import os dict_size = 30000 diff --git a/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py b/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py index 1c1fffc589..223428f820 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py +++ b/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py @@ -15,7 +15,7 @@ from __future__ import print_function import numpy as np import paddle.v2 as paddle -import paddle.v2.fluid as fluid +import paddle.fluid as fluid import os images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype='float32') diff --git a/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py b/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py index c442ada6e3..9fb356e78d 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py +++ b/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py @@ -15,7 +15,7 @@ from __future__ import print_function import numpy as np import paddle.v2 as paddle -import paddle.v2.fluid as fluid +import paddle.fluid as fluid import os BATCH_SIZE = 128 diff --git a/python/paddle/fluid/tests/book_distribute/notest_recommender_system_dist.py b/python/paddle/fluid/tests/book_distribute/notest_recommender_system_dist.py index 363c7102c7..ea05fbb457 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_recommender_system_dist.py +++ b/python/paddle/fluid/tests/book_distribute/notest_recommender_system_dist.py @@ -15,11 +15,11 @@ import numpy as np import os import paddle.v2 as paddle -import paddle.v2.fluid as fluid -import paddle.v2.fluid.core as core -import paddle.v2.fluid.layers as layers -import paddle.v2.fluid.nets as nets -from paddle.v2.fluid.optimizer import SGDOptimizer +import paddle.fluid as fluid +import paddle.fluid.core as core +import paddle.fluid.layers as layers +import paddle.fluid.nets as nets +from paddle.fluid.optimizer import SGDOptimizer IS_SPARSE = True BATCH_SIZE = 256 diff --git a/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py b/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py index c5c0856c31..3b057c5d96 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py +++ b/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py @@ -16,7 +16,7 @@ from __future__ import print_function import os import numpy as np import paddle.v2 as paddle -import paddle.v2.fluid as fluid +import paddle.fluid as fluid def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32, diff --git a/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py b/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py index 99e2c2bbac..b6c85ebb40 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py +++ b/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py @@ -15,7 +15,7 @@ import numpy as np import os import paddle.v2 as paddle -import paddle.v2.fluid as fluid +import paddle.fluid as fluid def stacked_lstm_net(data, diff --git a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py index 944f8af086..11250db9cb 100644 --- a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py +++ b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py @@ -14,7 +14,7 @@ import numpy as np import paddle.v2 as paddle -import paddle.v2.fluid as fluid +import paddle.fluid as fluid import math import sys diff --git a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py index a556904107..64f55e0853 100644 --- a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py +++ b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py @@ -17,7 +17,7 @@ from __future__ import print_function import sys import paddle.v2 as paddle -import paddle.v2.fluid as fluid +import paddle.fluid as fluid import math import sys diff --git a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py index 4c1eae861b..c439401ea9 100644 --- a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py +++ b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py @@ -14,11 +14,11 @@ import numpy as np import paddle.v2 as paddle -import paddle.v2.fluid as fluid -import paddle.v2.fluid.core as core -import paddle.v2.fluid.framework as framework -import paddle.v2.fluid.layers as layers -from paddle.v2.fluid.executor import Executor +import paddle.fluid as fluid +import paddle.fluid.core as core +import paddle.fluid.framework as framework +import paddle.fluid.layers as layers +from paddle.fluid.executor import Executor import math import sys diff --git a/python/paddle/fluid/tests/demo/fc_gan.py b/python/paddle/fluid/tests/demo/fc_gan.py index 67921db04a..fd9d80fa94 100644 --- a/python/paddle/fluid/tests/demo/fc_gan.py +++ b/python/paddle/fluid/tests/demo/fc_gan.py @@ -20,7 +20,7 @@ import matplotlib import numpy import paddle.v2 as paddle -import paddle.v2.fluid as fluid +import paddle.fluid as fluid matplotlib.use('Agg') import matplotlib.pyplot as plt diff --git a/python/paddle/fluid/tests/notest_concurrency.py b/python/paddle/fluid/tests/notest_concurrency.py index 9d87ed9c07..602d5f31eb 100644 --- a/python/paddle/fluid/tests/notest_concurrency.py +++ b/python/paddle/fluid/tests/notest_concurrency.py @@ -13,9 +13,9 @@ # limitations under the License. import unittest -import paddle.v2.fluid as fluid -import paddle.v2.fluid.core as core -from paddle.v2.fluid.executor import Executor +import paddle.fluid as fluid +import paddle.fluid.core as core +from paddle.fluid.executor import Executor class TestRoutineOp(unittest.TestCase): diff --git a/python/paddle/fluid/tests/notest_csp.py b/python/paddle/fluid/tests/notest_csp.py index 7fe234a20b..f4be833dee 100644 --- a/python/paddle/fluid/tests/notest_csp.py +++ b/python/paddle/fluid/tests/notest_csp.py @@ -13,7 +13,7 @@ # limitations under the License. import unittest -import paddle.v2.fluid as fluid +import paddle.fluid as fluid class TestCSPFramework(unittest.TestCase): diff --git a/python/paddle/fluid/tests/test_cpp_reader.py b/python/paddle/fluid/tests/test_cpp_reader.py index 6d2312dbcb..b655920578 100644 --- a/python/paddle/fluid/tests/test_cpp_reader.py +++ b/python/paddle/fluid/tests/test_cpp_reader.py @@ -13,7 +13,7 @@ # limitations under the License. import paddle.v2 as paddle -import paddle.v2.fluid as fluid +import paddle.fluid as fluid import numpy as np prog = fluid.framework.Program() diff --git a/python/paddle/fluid/tests/test_data_feeder.py b/python/paddle/fluid/tests/test_data_feeder.py index 3154293ee6..861dd3174a 100644 --- a/python/paddle/fluid/tests/test_data_feeder.py +++ b/python/paddle/fluid/tests/test_data_feeder.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle.v2.fluid as fluid +import paddle.fluid as fluid def test_converter(): diff --git a/python/paddle/fluid/tests/test_detection.py b/python/paddle/fluid/tests/test_detection.py index 908f4e82a6..1dc6d107d2 100644 --- a/python/paddle/fluid/tests/test_detection.py +++ b/python/paddle/fluid/tests/test_detection.py @@ -13,9 +13,9 @@ # limitations under the License. from __future__ import print_function -import paddle.v2.fluid as fluid -import paddle.v2.fluid.layers as layers -from paddle.v2.fluid.framework import Program, program_guard +import paddle.fluid as fluid +import paddle.fluid.layers as layers +from paddle.fluid.framework import Program, program_guard import unittest diff --git a/python/paddle/fluid/tests/test_error_clip.py b/python/paddle/fluid/tests/test_error_clip.py index d577d0014d..f4c0dfa7a7 100644 --- a/python/paddle/fluid/tests/test_error_clip.py +++ b/python/paddle/fluid/tests/test_error_clip.py @@ -15,7 +15,7 @@ from __future__ import print_function import numpy as np import paddle.v2 as paddle -import paddle.v2.fluid as fluid +import paddle.fluid as fluid BATCH_SIZE = 128 CLIP_MAX = 2e-6 diff --git a/python/paddle/fluid/tests/test_gradient_clip.py b/python/paddle/fluid/tests/test_gradient_clip.py index 792262df84..ca0b129008 100644 --- a/python/paddle/fluid/tests/test_gradient_clip.py +++ b/python/paddle/fluid/tests/test_gradient_clip.py @@ -14,7 +14,7 @@ import numpy as np import paddle.v2 as paddle -import paddle.v2.fluid as fluid +import paddle.fluid as fluid BATCH_SIZE = 128 CLIP = 1 diff --git a/python/paddle/fluid/tests/test_mnist_if_else_op.py b/python/paddle/fluid/tests/test_mnist_if_else_op.py index 75a651cf27..0b557942b0 100644 --- a/python/paddle/fluid/tests/test_mnist_if_else_op.py +++ b/python/paddle/fluid/tests/test_mnist_if_else_op.py @@ -12,11 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle.v2.fluid.layers as layers -from paddle.v2.fluid.framework import Program, program_guard, default_main_program, default_startup_program -from paddle.v2.fluid.executor import Executor -from paddle.v2.fluid.optimizer import MomentumOptimizer -import paddle.v2.fluid.core as core +import paddle.fluid.layers as layers +from paddle.fluid.framework import Program, program_guard, default_main_program, default_startup_program +from paddle.fluid.executor import Executor +from paddle.fluid.optimizer import MomentumOptimizer +import paddle.fluid.core as core import paddle.v2 as paddle import unittest import numpy as np diff --git a/python/paddle/fluid/tests/test_python_operator_overriding.py b/python/paddle/fluid/tests/test_python_operator_overriding.py index e5198ec17d..b5ac97eac5 100644 --- a/python/paddle/fluid/tests/test_python_operator_overriding.py +++ b/python/paddle/fluid/tests/test_python_operator_overriding.py @@ -16,9 +16,9 @@ import unittest import numpy as np -import paddle.v2.fluid.layers as layers -import paddle.v2.fluid.framework as framework -import paddle.v2.fluid as fluid +import paddle.fluid.layers as layers +import paddle.fluid.framework as framework +import paddle.fluid as fluid class TestPythonOperatorOverride(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/decorators.py b/python/paddle/fluid/tests/unittests/decorators.py index 7081e4b934..d1165e2a91 100644 --- a/python/paddle/fluid/tests/unittests/decorators.py +++ b/python/paddle/fluid/tests/unittests/decorators.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle.v2.fluid as fluid +import paddle.fluid as fluid __all__ = ['many_times', 'prog_scope'] diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index d8867550ca..f7e02595ec 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -16,12 +16,12 @@ import unittest import numpy as np import random import itertools -import paddle.v2.fluid.core as core +import paddle.fluid.core as core import collections -from paddle.v2.fluid.backward import append_backward -from paddle.v2.fluid.op import Operator -from paddle.v2.fluid.executor import Executor -from paddle.v2.fluid.framework import Program, OpProtoHolder +from paddle.fluid.backward import append_backward +from paddle.fluid.op import Operator +from paddle.fluid.executor import Executor +from paddle.fluid.framework import Program, OpProtoHolder def randomize_probability(batch_size, class_num, dtype='float32'): diff --git a/python/paddle/fluid/tests/unittests/test_adagrad_op.py b/python/paddle/fluid/tests/unittests/test_adagrad_op.py index 320f43023c..2f0ea79f4d 100644 --- a/python/paddle/fluid/tests/unittests/test_adagrad_op.py +++ b/python/paddle/fluid/tests/unittests/test_adagrad_op.py @@ -14,8 +14,8 @@ import unittest import numpy as np -import paddle.v2.fluid.core as core -from paddle.v2.fluid.op import Operator +import paddle.fluid.core as core +from paddle.fluid.op import Operator from op_test import OpTest import math diff --git a/python/paddle/fluid/tests/unittests/test_adam_op.py b/python/paddle/fluid/tests/unittests/test_adam_op.py index d6c5a16ff2..3c65f3d44a 100644 --- a/python/paddle/fluid/tests/unittests/test_adam_op.py +++ b/python/paddle/fluid/tests/unittests/test_adam_op.py @@ -15,8 +15,8 @@ import unittest import numpy as np from op_test import OpTest -from paddle.v2.fluid import core -from paddle.v2.fluid.op import Operator +from paddle.fluid import core +from paddle.fluid.op import Operator class TestAdamOp1(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_array_read_write_op.py b/python/paddle/fluid/tests/unittests/test_array_read_write_op.py index 8917b9b906..380a8549b3 100644 --- a/python/paddle/fluid/tests/unittests/test_array_read_write_op.py +++ b/python/paddle/fluid/tests/unittests/test_array_read_write_op.py @@ -13,11 +13,11 @@ # limitations under the License. import unittest -import paddle.v2.fluid.core as core -import paddle.v2.fluid.layers as layers -from paddle.v2.fluid.executor import Executor -from paddle.v2.fluid.backward import append_backward -from paddle.v2.fluid.framework import default_main_program +import paddle.fluid.core as core +import paddle.fluid.layers as layers +from paddle.fluid.executor import Executor +from paddle.fluid.backward import append_backward +from paddle.fluid.framework import default_main_program import numpy diff --git a/python/paddle/fluid/tests/unittests/test_assign_value_op.py b/python/paddle/fluid/tests/unittests/test_assign_value_op.py index 99d7e958c3..02f2e6eddc 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_value_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_value_op.py @@ -12,12 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle.v2.fluid as fluid -import paddle.v2.fluid.layers as layers +import paddle.fluid as fluid +import paddle.fluid.layers as layers import op_test import numpy import unittest -import paddle.v2.fluid.framework as framework +import paddle.fluid.framework as framework class TestAssignValueOp(op_test.OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py index b7c0cb521a..80e6fa6df3 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py @@ -15,9 +15,9 @@ import unittest import numpy as np from op_test import OpTest -import paddle.v2.fluid.core as core -from paddle.v2.fluid.op import Operator -from paddle.v2.fluid.framework import grad_var_name +import paddle.fluid.core as core +from paddle.fluid.op import Operator +from paddle.fluid.framework import grad_var_name def get_backward_op(scope, op, no_grad_set): diff --git a/python/paddle/fluid/tests/unittests/test_beam_search_decode_op.py b/python/paddle/fluid/tests/unittests/test_beam_search_decode_op.py index 91f8f7b18b..4ee00605e2 100644 --- a/python/paddle/fluid/tests/unittests/test_beam_search_decode_op.py +++ b/python/paddle/fluid/tests/unittests/test_beam_search_decode_op.py @@ -15,8 +15,8 @@ import unittest import numpy as np -import paddle.v2.fluid.core as core -from paddle.v2.fluid.op import Operator +import paddle.fluid.core as core +from paddle.fluid.op import Operator class TestBeamSearchDecodeOp(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_beam_search_op.py b/python/paddle/fluid/tests/unittests/test_beam_search_op.py index 1596bb3970..bc708f3aff 100644 --- a/python/paddle/fluid/tests/unittests/test_beam_search_op.py +++ b/python/paddle/fluid/tests/unittests/test_beam_search_op.py @@ -13,8 +13,8 @@ # limitations under the License. import logging -from paddle.v2.fluid.op import Operator, DynamicRecurrentOp -import paddle.v2.fluid.core as core +from paddle.fluid.op import Operator, DynamicRecurrentOp +import paddle.fluid.core as core import unittest import numpy as np diff --git a/python/paddle/fluid/tests/unittests/test_calc_gradient.py b/python/paddle/fluid/tests/unittests/test_calc_gradient.py index 1b38dcf343..15731fefc8 100644 --- a/python/paddle/fluid/tests/unittests/test_calc_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_calc_gradient.py @@ -14,11 +14,11 @@ import unittest -import paddle.v2.fluid as fluid -import paddle.v2.fluid.layers as layers -import paddle.v2.fluid.framework as framework -import paddle.v2.fluid.optimizer as optimizer -from paddle.v2.fluid.backward import calc_gradient +import paddle.fluid as fluid +import paddle.fluid.layers as layers +import paddle.fluid.framework as framework +import paddle.fluid.optimizer as optimizer +from paddle.fluid.backward import calc_gradient class TestCalcGradient(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_cast_op.py b/python/paddle/fluid/tests/unittests/test_cast_op.py index 3d05a319cd..8fb8d03828 100644 --- a/python/paddle/fluid/tests/unittests/test_cast_op.py +++ b/python/paddle/fluid/tests/unittests/test_cast_op.py @@ -15,7 +15,7 @@ import op_test import unittest import numpy as np -import paddle.v2.fluid.core as core +import paddle.fluid.core as core class TestCastOp(op_test.OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_cond_op.py b/python/paddle/fluid/tests/unittests/test_cond_op.py index 4a1e806c4b..66fbae961a 100644 --- a/python/paddle/fluid/tests/unittests/test_cond_op.py +++ b/python/paddle/fluid/tests/unittests/test_cond_op.py @@ -13,10 +13,10 @@ # limitations under the License. import logging -import paddle.v2.fluid.core as core +import paddle.fluid.core as core import unittest import numpy as np -from paddle.v2.fluid.op import Operator, CondOp +from paddle.fluid.op import Operator, CondOp class PySimpleCond(object): diff --git a/python/paddle/fluid/tests/unittests/test_conditional_block.py b/python/paddle/fluid/tests/unittests/test_conditional_block.py index 58ac267203..fad97a2816 100644 --- a/python/paddle/fluid/tests/unittests/test_conditional_block.py +++ b/python/paddle/fluid/tests/unittests/test_conditional_block.py @@ -13,11 +13,11 @@ # limitations under the License. import unittest -import paddle.v2.fluid.layers as layers -import paddle.v2.fluid.core as core -from paddle.v2.fluid.framework import default_startup_program, default_main_program -from paddle.v2.fluid.executor import Executor -from paddle.v2.fluid.backward import append_backward +import paddle.fluid.layers as layers +import paddle.fluid.core as core +from paddle.fluid.framework import default_startup_program, default_main_program +from paddle.fluid.executor import Executor +from paddle.fluid.backward import append_backward import numpy diff --git a/python/paddle/fluid/tests/unittests/test_const_value.py b/python/paddle/fluid/tests/unittests/test_const_value.py index 06c1c21fbc..d1075d514e 100644 --- a/python/paddle/fluid/tests/unittests/test_const_value.py +++ b/python/paddle/fluid/tests/unittests/test_const_value.py @@ -13,7 +13,7 @@ # limitations under the License. import unittest -import paddle.v2.fluid.framework as framework +import paddle.fluid.framework as framework class ConditionalBlock(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_op.py index ad242692ec..1fada38a03 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np -import paddle.v2.fluid.core as core +import paddle.fluid.core as core from op_test import OpTest diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py index c9e74f5860..9831b7eb12 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np -import paddle.v2.fluid.core as core +import paddle.fluid.core as core from op_test import OpTest diff --git a/python/paddle/fluid/tests/unittests/test_conv3d_op.py b/python/paddle/fluid/tests/unittests/test_conv3d_op.py index 0f7e383d1a..4d3df5e33c 100644 --- a/python/paddle/fluid/tests/unittests/test_conv3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv3d_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np -import paddle.v2.fluid.core as core +import paddle.fluid.core as core from op_test import OpTest diff --git a/python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py b/python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py index a70f23d4ad..a79bfa13d6 100644 --- a/python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np -import paddle.v2.fluid.core as core +import paddle.fluid.core as core from op_test import OpTest diff --git a/python/paddle/fluid/tests/unittests/test_create_op_doc_string.py b/python/paddle/fluid/tests/unittests/test_create_op_doc_string.py index 4eadbd18ac..5e6f9a20a9 100644 --- a/python/paddle/fluid/tests/unittests/test_create_op_doc_string.py +++ b/python/paddle/fluid/tests/unittests/test_create_op_doc_string.py @@ -13,7 +13,7 @@ # limitations under the License. import unittest -import paddle.v2.fluid.layers as layers +import paddle.fluid.layers as layers class TestDocString(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_default_scope_funcs.py b/python/paddle/fluid/tests/unittests/test_default_scope_funcs.py index d7ca596070..a3bf7b544b 100644 --- a/python/paddle/fluid/tests/unittests/test_default_scope_funcs.py +++ b/python/paddle/fluid/tests/unittests/test_default_scope_funcs.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.v2.fluid.default_scope_funcs import * +from paddle.fluid.default_scope_funcs import * import unittest diff --git a/python/paddle/fluid/tests/unittests/test_dyn_rnn.py b/python/paddle/fluid/tests/unittests/test_dyn_rnn.py index 1571572fc6..0a3812a68d 100644 --- a/python/paddle/fluid/tests/unittests/test_dyn_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_dyn_rnn.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle.v2.fluid as fluid +import paddle.fluid as fluid import paddle.v2 as paddle import unittest import numpy diff --git a/python/paddle/fluid/tests/unittests/test_dynrnn_gradient_check.py b/python/paddle/fluid/tests/unittests/test_dynrnn_gradient_check.py index 8b01ec730a..f3dcbe3386 100644 --- a/python/paddle/fluid/tests/unittests/test_dynrnn_gradient_check.py +++ b/python/paddle/fluid/tests/unittests/test_dynrnn_gradient_check.py @@ -15,7 +15,7 @@ import numpy import random import collections -import paddle.v2.fluid as fluid +import paddle.fluid as fluid import unittest from decorators import * diff --git a/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py b/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py index d2f05dcd14..c28177a1ae 100644 --- a/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py +++ b/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py @@ -14,11 +14,11 @@ import unittest import paddle.v2 as paddle -import paddle.v2.fluid.core as core -import paddle.v2.fluid as fluid -from paddle.v2.fluid.backward import append_backward -import paddle.v2.fluid.framework as framework -from paddle.v2.fluid.framework import Program, switch_main_program +import paddle.fluid.core as core +import paddle.fluid as fluid +from paddle.fluid.backward import append_backward +import paddle.fluid.framework as framework +from paddle.fluid.framework import Program, switch_main_program import bisect import numpy as np diff --git a/python/paddle/fluid/tests/unittests/test_exception.py b/python/paddle/fluid/tests/unittests/test_exception.py index 066b0b7409..bb7c0f88f6 100644 --- a/python/paddle/fluid/tests/unittests/test_exception.py +++ b/python/paddle/fluid/tests/unittests/test_exception.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle.v2.fluid.core as core +import paddle.fluid.core as core import unittest diff --git a/python/paddle/fluid/tests/unittests/test_executor_and_mul.py b/python/paddle/fluid/tests/unittests/test_executor_and_mul.py index c043c07b3a..4958bef3ef 100644 --- a/python/paddle/fluid/tests/unittests/test_executor_and_mul.py +++ b/python/paddle/fluid/tests/unittests/test_executor_and_mul.py @@ -15,10 +15,10 @@ import unittest import numpy -import paddle.v2.fluid.core as core +import paddle.fluid.core as core -from paddle.v2.fluid.executor import Executor -from paddle.v2.fluid.layers import mul, data +from paddle.fluid.executor import Executor +from paddle.fluid.layers import mul, data class TestExecutor(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_feed_fetch_method.py b/python/paddle/fluid/tests/unittests/test_feed_fetch_method.py index f24e5e27f3..9d724a6479 100644 --- a/python/paddle/fluid/tests/unittests/test_feed_fetch_method.py +++ b/python/paddle/fluid/tests/unittests/test_feed_fetch_method.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle.v2.fluid.core as core +import paddle.fluid.core as core import unittest import numpy as np diff --git a/python/paddle/fluid/tests/unittests/test_fetch_var.py b/python/paddle/fluid/tests/unittests/test_fetch_var.py index ed75a350b0..46c3bbb671 100644 --- a/python/paddle/fluid/tests/unittests/test_fetch_var.py +++ b/python/paddle/fluid/tests/unittests/test_fetch_var.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle.v2.fluid as fluid -import paddle.v2.fluid.layers as layers +import paddle.fluid as fluid +import paddle.fluid.layers as layers import op_test import numpy import unittest diff --git a/python/paddle/fluid/tests/unittests/test_fill_op.py b/python/paddle/fluid/tests/unittests/test_fill_op.py index c2e3cfe6f3..762d29199e 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np from op_test import OpTest -import paddle.v2.fluid.core as core +import paddle.fluid.core as core class TestFillOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_framework_debug_str.py b/python/paddle/fluid/tests/unittests/test_framework_debug_str.py index 88995c24df..c906c74afe 100644 --- a/python/paddle/fluid/tests/unittests/test_framework_debug_str.py +++ b/python/paddle/fluid/tests/unittests/test_framework_debug_str.py @@ -13,7 +13,7 @@ # limitations under the License. import unittest -from paddle.v2.fluid.framework import Program +from paddle.fluid.framework import Program class TestDebugStringFramework(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py index 3c0ee64098..272caceaf3 100644 --- a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py @@ -15,10 +15,10 @@ import unittest import numpy -import paddle.v2.fluid as fluid -import paddle.v2.fluid.core as core -from paddle.v2.fluid.op import Operator -from paddle.v2.fluid.executor import Executor +import paddle.fluid as fluid +import paddle.fluid.core as core +from paddle.fluid.op import Operator +from paddle.fluid.executor import Executor class TestGaussianRandomOp(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_get_places_op.py b/python/paddle/fluid/tests/unittests/test_get_places_op.py index 265433e606..6dab1e22f0 100644 --- a/python/paddle/fluid/tests/unittests/test_get_places_op.py +++ b/python/paddle/fluid/tests/unittests/test_get_places_op.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle.v2.fluid as fluid +import paddle.fluid as fluid import decorators import unittest diff --git a/python/paddle/fluid/tests/unittests/test_image_classification_layer.py b/python/paddle/fluid/tests/unittests/test_image_classification_layer.py index 8af8f646a7..6ecfa9ea21 100644 --- a/python/paddle/fluid/tests/unittests/test_image_classification_layer.py +++ b/python/paddle/fluid/tests/unittests/test_image_classification_layer.py @@ -14,9 +14,9 @@ import unittest -import paddle.v2.fluid as fluid -import paddle.v2.fluid.nets as nets -from paddle.v2.fluid.framework import Program +import paddle.fluid as fluid +import paddle.fluid.nets as nets +from paddle.fluid.framework import Program def conv_block(input, num_filter, groups, dropouts): diff --git a/python/paddle/fluid/tests/unittests/test_infer_shape.py b/python/paddle/fluid/tests/unittests/test_infer_shape.py index 17957b9e04..699a2d4246 100644 --- a/python/paddle/fluid/tests/unittests/test_infer_shape.py +++ b/python/paddle/fluid/tests/unittests/test_infer_shape.py @@ -14,7 +14,7 @@ import unittest -import paddle.v2.fluid.core as core +import paddle.fluid.core as core class TestInferShape(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_inference_model_io.py b/python/paddle/fluid/tests/unittests/test_inference_model_io.py index e381312ccc..238ca7188b 100644 --- a/python/paddle/fluid/tests/unittests/test_inference_model_io.py +++ b/python/paddle/fluid/tests/unittests/test_inference_model_io.py @@ -15,13 +15,13 @@ import unittest import numpy as np -import paddle.v2.fluid.core as core +import paddle.fluid.core as core -import paddle.v2.fluid.executor as executor -import paddle.v2.fluid.layers as layers -import paddle.v2.fluid.optimizer as optimizer -from paddle.v2.fluid.framework import Program, program_guard -from paddle.v2.fluid.io import save_inference_model, load_inference_model +import paddle.fluid.executor as executor +import paddle.fluid.layers as layers +import paddle.fluid.optimizer as optimizer +from paddle.fluid.framework import Program, program_guard +from paddle.fluid.io import save_inference_model, load_inference_model class TestBook(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_initializer.py b/python/paddle/fluid/tests/unittests/test_initializer.py index 6d4eb62916..587e2025e1 100644 --- a/python/paddle/fluid/tests/unittests/test_initializer.py +++ b/python/paddle/fluid/tests/unittests/test_initializer.py @@ -15,8 +15,8 @@ import numpy as np import unittest -import paddle.v2.fluid.framework as framework -import paddle.v2.fluid.initializer as initializer +import paddle.fluid.framework as framework +import paddle.fluid.initializer as initializer DELTA = 0.00001 diff --git a/python/paddle/fluid/tests/unittests/test_is_empty_op.py b/python/paddle/fluid/tests/unittests/test_is_empty_op.py index 799da9dc15..4d11cf226b 100644 --- a/python/paddle/fluid/tests/unittests/test_is_empty_op.py +++ b/python/paddle/fluid/tests/unittests/test_is_empty_op.py @@ -14,8 +14,8 @@ import unittest import numpy as np -from paddle.v2.fluid.op import Operator -import paddle.v2.fluid.core as core +from paddle.fluid.op import Operator +import paddle.fluid.core as core def create_tensor(scope, name, np_data): diff --git a/python/paddle/fluid/tests/unittests/test_layer_norm_op.py b/python/paddle/fluid/tests/unittests/test_layer_norm_op.py index a1206b3b85..8c67e45b7f 100644 --- a/python/paddle/fluid/tests/unittests/test_layer_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_layer_norm_op.py @@ -16,9 +16,9 @@ import numpy as np from operator import mul from op_test import OpTest -import paddle.v2.fluid.core as core -from paddle.v2.fluid.op import Operator -from paddle.v2.fluid.framework import grad_var_name +import paddle.fluid.core as core +from paddle.fluid.op import Operator +from paddle.fluid.framework import grad_var_name np.random.random(123) diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index e757598bba..bb673d3b54 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -15,10 +15,10 @@ from __future__ import print_function import unittest -import paddle.v2.fluid.layers as layers -import paddle.v2.fluid.nets as nets -from paddle.v2.fluid.framework import Program, program_guard, default_main_program -from paddle.v2.fluid.param_attr import ParamAttr +import paddle.fluid.layers as layers +import paddle.fluid.nets as nets +from paddle.fluid.framework import Program, program_guard, default_main_program +from paddle.fluid.param_attr import ParamAttr import decorators diff --git a/python/paddle/fluid/tests/unittests/test_learning_rate_decay.py b/python/paddle/fluid/tests/unittests/test_learning_rate_decay.py index 1d6bab3d6c..595b051689 100644 --- a/python/paddle/fluid/tests/unittests/test_learning_rate_decay.py +++ b/python/paddle/fluid/tests/unittests/test_learning_rate_decay.py @@ -17,10 +17,10 @@ import unittest import math import copy -import paddle.v2.fluid.framework as framework -import paddle.v2.fluid as fluid -import paddle.v2.fluid.layers as layers -import paddle.v2.fluid.learning_rate_decay as lr_decay +import paddle.fluid.framework as framework +import paddle.fluid as fluid +import paddle.fluid.layers as layers +import paddle.fluid.learning_rate_decay as lr_decay def exponential_decay(learning_rate, diff --git a/python/paddle/fluid/tests/unittests/test_lod_array_length_op.py b/python/paddle/fluid/tests/unittests/test_lod_array_length_op.py index 643ee906d6..d8b4e40662 100644 --- a/python/paddle/fluid/tests/unittests/test_lod_array_length_op.py +++ b/python/paddle/fluid/tests/unittests/test_lod_array_length_op.py @@ -13,9 +13,9 @@ # limitations under the License. import unittest -import paddle.v2.fluid.layers as layers -from paddle.v2.fluid.executor import Executor -import paddle.v2.fluid.core as core +import paddle.fluid.layers as layers +from paddle.fluid.executor import Executor +import paddle.fluid.core as core import numpy diff --git a/python/paddle/fluid/tests/unittests/test_lod_rank_table.py b/python/paddle/fluid/tests/unittests/test_lod_rank_table.py index 70b8d69585..093eecb837 100644 --- a/python/paddle/fluid/tests/unittests/test_lod_rank_table.py +++ b/python/paddle/fluid/tests/unittests/test_lod_rank_table.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.v2.fluid.layers import lod_rank_table, data -from paddle.v2.fluid.executor import Executor -import paddle.v2.fluid.core as core +from paddle.fluid.layers import lod_rank_table, data +from paddle.fluid.executor import Executor +import paddle.fluid.core as core import numpy import unittest diff --git a/python/paddle/fluid/tests/unittests/test_lod_tensor_array.py b/python/paddle/fluid/tests/unittests/test_lod_tensor_array.py index 0e90e25538..63b17a5ccd 100644 --- a/python/paddle/fluid/tests/unittests/test_lod_tensor_array.py +++ b/python/paddle/fluid/tests/unittests/test_lod_tensor_array.py @@ -13,7 +13,7 @@ # limitations under the License. import unittest -import paddle.v2.fluid.core as core +import paddle.fluid.core as core import numpy diff --git a/python/paddle/fluid/tests/unittests/test_lod_tensor_array_ops.py b/python/paddle/fluid/tests/unittests/test_lod_tensor_array_ops.py index ebc0a2f714..de1a3d101d 100644 --- a/python/paddle/fluid/tests/unittests/test_lod_tensor_array_ops.py +++ b/python/paddle/fluid/tests/unittests/test_lod_tensor_array_ops.py @@ -13,12 +13,12 @@ # limitations under the License. import unittest -import paddle.v2.fluid.core as core +import paddle.fluid.core as core import numpy -import paddle.v2.fluid.layers as layers -from paddle.v2.fluid.framework import Program, program_guard -from paddle.v2.fluid.executor import Executor -from paddle.v2.fluid.backward import append_backward +import paddle.fluid.layers as layers +from paddle.fluid.framework import Program, program_guard +from paddle.fluid.executor import Executor +from paddle.fluid.backward import append_backward class TestCPULoDTensorArrayOps(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_math_op_patch.py b/python/paddle/fluid/tests/unittests/test_math_op_patch.py index cae5188fe8..6864d271e7 100644 --- a/python/paddle/fluid/tests/unittests/test_math_op_patch.py +++ b/python/paddle/fluid/tests/unittests/test_math_op_patch.py @@ -14,7 +14,7 @@ import unittest import decorators -import paddle.v2.fluid as fluid +import paddle.fluid as fluid import numpy diff --git a/python/paddle/fluid/tests/unittests/test_memory_optimization_transpiler.py b/python/paddle/fluid/tests/unittests/test_memory_optimization_transpiler.py index a276db581e..e57804d0df 100644 --- a/python/paddle/fluid/tests/unittests/test_memory_optimization_transpiler.py +++ b/python/paddle/fluid/tests/unittests/test_memory_optimization_transpiler.py @@ -15,10 +15,10 @@ from __future__ import print_function import unittest -import paddle.v2.fluid.layers as layers -import paddle.v2.fluid.optimizer as optimizer -from paddle.v2.fluid.framework import Program, program_guard -from paddle.v2.fluid.memory_optimization_transpiler import memory_optimize +import paddle.fluid.layers as layers +import paddle.fluid.optimizer as optimizer +from paddle.fluid.framework import Program, program_guard +from paddle.fluid.memory_optimization_transpiler import memory_optimize class TestControlFlowGraph(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_multihead_attention.py b/python/paddle/fluid/tests/unittests/test_multihead_attention.py index 6eeeefe021..80c3c67967 100644 --- a/python/paddle/fluid/tests/unittests/test_multihead_attention.py +++ b/python/paddle/fluid/tests/unittests/test_multihead_attention.py @@ -13,8 +13,8 @@ # limitations under the License. import unittest -import paddle.v2.fluid as fluid -import paddle.v2.fluid.core as core +import paddle.fluid as fluid +import paddle.fluid.core as core import numpy as np diff --git a/python/paddle/fluid/tests/unittests/test_net.py b/python/paddle/fluid/tests/unittests/test_net.py index 796a839117..ae1699d647 100644 --- a/python/paddle/fluid/tests/unittests/test_net.py +++ b/python/paddle/fluid/tests/unittests/test_net.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle.v2.fluid.core as core -from paddle.v2.fluid.op import Operator +import paddle.fluid.core as core +from paddle.fluid.op import Operator import unittest diff --git a/python/paddle/fluid/tests/unittests/test_normalization_wrapper.py b/python/paddle/fluid/tests/unittests/test_normalization_wrapper.py index 094d8071e2..ef34893943 100644 --- a/python/paddle/fluid/tests/unittests/test_normalization_wrapper.py +++ b/python/paddle/fluid/tests/unittests/test_normalization_wrapper.py @@ -13,8 +13,8 @@ # limitations under the License. import unittest -import paddle.v2.fluid as fluid -import paddle.v2.fluid.core as core +import paddle.fluid as fluid +import paddle.fluid.core as core import numpy as np diff --git a/python/paddle/fluid/tests/unittests/test_one_hot_op.py b/python/paddle/fluid/tests/unittests/test_one_hot_op.py index b7db30104a..cd78cce872 100644 --- a/python/paddle/fluid/tests/unittests/test_one_hot_op.py +++ b/python/paddle/fluid/tests/unittests/test_one_hot_op.py @@ -16,10 +16,10 @@ import unittest import numpy as np import math from op_test import OpTest -import paddle.v2.fluid as fluid -import paddle.v2.fluid.core as core -import paddle.v2.fluid.framework as framework -from paddle.v2.fluid.framework import Program, program_guard +import paddle.fluid as fluid +import paddle.fluid.core as core +import paddle.fluid.framework as framework +from paddle.fluid.framework import Program, program_guard class TestOneHotOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_op_support_gpu.py b/python/paddle/fluid/tests/unittests/test_op_support_gpu.py index f8ac55590c..5fafb8280e 100644 --- a/python/paddle/fluid/tests/unittests/test_op_support_gpu.py +++ b/python/paddle/fluid/tests/unittests/test_op_support_gpu.py @@ -13,7 +13,7 @@ # limitations under the License. import unittest -import paddle.v2.fluid.core as core +import paddle.fluid.core as core class TestOpSupportGPU(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_operator.py b/python/paddle/fluid/tests/unittests/test_operator.py index 1f5de93387..5e418fe6ac 100644 --- a/python/paddle/fluid/tests/unittests/test_operator.py +++ b/python/paddle/fluid/tests/unittests/test_operator.py @@ -14,8 +14,8 @@ import unittest -import paddle.v2.fluid.op as op -import paddle.v2.fluid.proto.framework_pb2 as framework_pb2 +import paddle.fluid.op as op +import paddle.fluid.proto.framework_pb2 as framework_pb2 class TestGetAllProtos(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_operator_desc.py b/python/paddle/fluid/tests/unittests/test_operator_desc.py index c64c08ff7f..649fabe4a0 100644 --- a/python/paddle/fluid/tests/unittests/test_operator_desc.py +++ b/python/paddle/fluid/tests/unittests/test_operator_desc.py @@ -14,9 +14,9 @@ import unittest -import paddle.v2.fluid.core as core +import paddle.fluid.core as core -from paddle.v2.fluid.framework import Program, default_startup_program +from paddle.fluid.framework import Program, default_startup_program main_program = default_startup_program() diff --git a/python/paddle/fluid/tests/unittests/test_optimizer.py b/python/paddle/fluid/tests/unittests/test_optimizer.py index 875e9e7c76..6ee7fc819a 100644 --- a/python/paddle/fluid/tests/unittests/test_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_optimizer.py @@ -14,9 +14,9 @@ import unittest -import paddle.v2.fluid.framework as framework -import paddle.v2.fluid.optimizer as optimizer -from paddle.v2.fluid.backward import append_backward +import paddle.fluid.framework as framework +import paddle.fluid.optimizer as optimizer +from paddle.fluid.backward import append_backward class TestOptimizer(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_parallel_op.py b/python/paddle/fluid/tests/unittests/test_parallel_op.py index d65752608b..edaae977df 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_op.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_op.py @@ -14,7 +14,7 @@ import unittest -import paddle.v2.fluid as fluid +import paddle.fluid as fluid import numpy diff --git a/python/paddle/fluid/tests/unittests/test_parameter.py b/python/paddle/fluid/tests/unittests/test_parameter.py index 88356a7ea1..e09865074e 100644 --- a/python/paddle/fluid/tests/unittests/test_parameter.py +++ b/python/paddle/fluid/tests/unittests/test_parameter.py @@ -13,11 +13,11 @@ # limitations under the License. import unittest -from paddle.v2.fluid.framework import default_main_program -import paddle.v2.fluid.core as core -from paddle.v2.fluid.executor import Executor -import paddle.v2.fluid.io as io -from paddle.v2.fluid.initializer import ConstantInitializer +from paddle.fluid.framework import default_main_program +import paddle.fluid.core as core +from paddle.fluid.executor import Executor +import paddle.fluid.io as io +from paddle.fluid.initializer import ConstantInitializer import numpy as np main_program = default_main_program() diff --git a/python/paddle/fluid/tests/unittests/test_pool2d_op.py b/python/paddle/fluid/tests/unittests/test_pool2d_op.py index 77961bc99f..12899ecca3 100644 --- a/python/paddle/fluid/tests/unittests/test_pool2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool2d_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np -import paddle.v2.fluid.core as core +import paddle.fluid.core as core from op_test import OpTest diff --git a/python/paddle/fluid/tests/unittests/test_pool3d_op.py b/python/paddle/fluid/tests/unittests/test_pool3d_op.py index a6afdaedc5..321b5f39ff 100644 --- a/python/paddle/fluid/tests/unittests/test_pool3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool3d_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np -import paddle.v2.fluid.core as core +import paddle.fluid.core as core from op_test import OpTest diff --git a/python/paddle/fluid/tests/unittests/test_print_op.py b/python/paddle/fluid/tests/unittests/test_print_op.py index 1e49ce994b..5c08bf4fc3 100644 --- a/python/paddle/fluid/tests/unittests/test_print_op.py +++ b/python/paddle/fluid/tests/unittests/test_print_op.py @@ -13,12 +13,12 @@ # limitations under the License. import unittest -import paddle.v2.fluid.core as core -from paddle.v2.fluid.executor import Executor -import paddle.v2.fluid.layers as layers -from paddle.v2.fluid.backward import append_backward -from paddle.v2.fluid.framework import switch_main_program -from paddle.v2.fluid.framework import Program +import paddle.fluid.core as core +from paddle.fluid.executor import Executor +import paddle.fluid.layers as layers +from paddle.fluid.backward import append_backward +from paddle.fluid.framework import switch_main_program +from paddle.fluid.framework import Program import numpy as np diff --git a/python/paddle/fluid/tests/unittests/test_profiler.py b/python/paddle/fluid/tests/unittests/test_profiler.py index 62bfb2b8e2..09f23894c4 100644 --- a/python/paddle/fluid/tests/unittests/test_profiler.py +++ b/python/paddle/fluid/tests/unittests/test_profiler.py @@ -15,10 +15,10 @@ import unittest import os import numpy as np -import paddle.v2.fluid as fluid -import paddle.v2.fluid.profiler as profiler -import paddle.v2.fluid.layers as layers -import paddle.v2.fluid.core as core +import paddle.fluid as fluid +import paddle.fluid.profiler as profiler +import paddle.fluid.layers as layers +import paddle.fluid.core as core class TestProfiler(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_program.py b/python/paddle/fluid/tests/unittests/test_program.py index 266e189e50..87a2195f0d 100644 --- a/python/paddle/fluid/tests/unittests/test_program.py +++ b/python/paddle/fluid/tests/unittests/test_program.py @@ -15,8 +15,8 @@ from __future__ import print_function import unittest -from paddle.v2.fluid.framework import Program, default_main_program, program_guard, grad_var_name -import paddle.v2.fluid.layers as layers +from paddle.fluid.framework import Program, default_main_program, program_guard, grad_var_name +import paddle.fluid.layers as layers main_program = default_main_program() diff --git a/python/paddle/fluid/tests/unittests/test_protobuf.py b/python/paddle/fluid/tests/unittests/test_protobuf.py index 90de56514d..c3f1fa8018 100644 --- a/python/paddle/fluid/tests/unittests/test_protobuf.py +++ b/python/paddle/fluid/tests/unittests/test_protobuf.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle.v2.fluid.proto.framework_pb2 as framework_pb2 +import paddle.fluid.proto.framework_pb2 as framework_pb2 import unittest diff --git a/python/paddle/fluid/tests/unittests/test_protobuf_descs.py b/python/paddle/fluid/tests/unittests/test_protobuf_descs.py index c3bef95874..309ea2b9b7 100644 --- a/python/paddle/fluid/tests/unittests/test_protobuf_descs.py +++ b/python/paddle/fluid/tests/unittests/test_protobuf_descs.py @@ -13,7 +13,7 @@ # limitations under the License. import unittest -import paddle.v2.fluid.core as core +import paddle.fluid.core as core class TestOpDesc(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_recurrent_op.py b/python/paddle/fluid/tests/unittests/test_recurrent_op.py index 177d8fc65f..d2d13386ac 100644 --- a/python/paddle/fluid/tests/unittests/test_recurrent_op.py +++ b/python/paddle/fluid/tests/unittests/test_recurrent_op.py @@ -14,12 +14,12 @@ import unittest -import paddle.v2.fluid.layers as layers -from paddle.v2.fluid.framework import Program, grad_var_name -from paddle.v2.fluid.executor import Executor -from paddle.v2.fluid.backward import append_backward +import paddle.fluid.layers as layers +from paddle.fluid.framework import Program, grad_var_name +from paddle.fluid.executor import Executor +from paddle.fluid.backward import append_backward import numpy as np -import paddle.v2.fluid.core as core +import paddle.fluid.core as core class PyRNNBase(object): diff --git a/python/paddle/fluid/tests/unittests/test_recv_op.py b/python/paddle/fluid/tests/unittests/test_recv_op.py index 7a0802afc5..985d892c56 100644 --- a/python/paddle/fluid/tests/unittests/test_recv_op.py +++ b/python/paddle/fluid/tests/unittests/test_recv_op.py @@ -14,8 +14,8 @@ import unittest -import paddle.v2.fluid as fluid -import paddle.v2.fluid.layers as layers +import paddle.fluid as fluid +import paddle.fluid.layers as layers import numpy from multiprocessing import Process import os, sys diff --git a/python/paddle/fluid/tests/unittests/test_registry.py b/python/paddle/fluid/tests/unittests/test_registry.py index 82527a6ec7..d04192cb3a 100644 --- a/python/paddle/fluid/tests/unittests/test_registry.py +++ b/python/paddle/fluid/tests/unittests/test_registry.py @@ -13,7 +13,7 @@ # limitations under the License. import unittest -import paddle.v2.fluid as fluid +import paddle.fluid as fluid import numpy as np import decorators diff --git a/python/paddle/fluid/tests/unittests/test_regularizer.py b/python/paddle/fluid/tests/unittests/test_regularizer.py index 8fc4db1c5a..9b1c4ceada 100644 --- a/python/paddle/fluid/tests/unittests/test_regularizer.py +++ b/python/paddle/fluid/tests/unittests/test_regularizer.py @@ -14,10 +14,10 @@ import unittest -import paddle.v2.fluid.framework as framework -import paddle.v2.fluid.optimizer as optimizer -import paddle.v2.fluid.regularizer as regularizer -from paddle.v2.fluid.backward import append_backward +import paddle.fluid.framework as framework +import paddle.fluid.optimizer as optimizer +import paddle.fluid.regularizer as regularizer +from paddle.fluid.backward import append_backward class TestL2DecayRegularizer(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_reorder_lod_tensor.py b/python/paddle/fluid/tests/unittests/test_reorder_lod_tensor.py index d4e17d1b1e..76d0d2f2fe 100644 --- a/python/paddle/fluid/tests/unittests/test_reorder_lod_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_reorder_lod_tensor.py @@ -13,8 +13,8 @@ # limitations under the License. import unittest -import paddle.v2.fluid as fluid -import paddle.v2.fluid.core as core +import paddle.fluid as fluid +import paddle.fluid.core as core import numpy diff --git a/python/paddle/fluid/tests/unittests/test_rnn_memory_helper_op.py b/python/paddle/fluid/tests/unittests/test_rnn_memory_helper_op.py index 773bd17456..178606f059 100644 --- a/python/paddle/fluid/tests/unittests/test_rnn_memory_helper_op.py +++ b/python/paddle/fluid/tests/unittests/test_rnn_memory_helper_op.py @@ -14,11 +14,11 @@ import unittest -from paddle.v2.fluid.framework import Program -from paddle.v2.fluid.executor import Executor -from paddle.v2.fluid.backward import append_backward +from paddle.fluid.framework import Program +from paddle.fluid.executor import Executor +from paddle.fluid.backward import append_backward import numpy as np -import paddle.v2.fluid.core as core +import paddle.fluid.core as core class RNNMemoryHelperOpTest(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_scope.py b/python/paddle/fluid/tests/unittests/test_scope.py index 2a2efbf098..d249a989a9 100644 --- a/python/paddle/fluid/tests/unittests/test_scope.py +++ b/python/paddle/fluid/tests/unittests/test_scope.py @@ -12,25 +12,25 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle.v2.fluid.core +import paddle.fluid.core import unittest class TestScope(unittest.TestCase): def test_create_destroy(self): - paddle_c = paddle.v2.fluid.core + paddle_c = paddle.fluid.core scope = paddle_c.Scope() self.assertIsNotNone(scope) scope_with_parent = scope.new_scope() self.assertIsNotNone(scope_with_parent) def test_none_variable(self): - paddle_c = paddle.v2.fluid.core + paddle_c = paddle.fluid.core scope = paddle_c.Scope() self.assertIsNone(scope.find_var("test")) def test_create_var_get_var(self): - paddle_c = paddle.v2.fluid.core + paddle_c = paddle.fluid.core scope = paddle_c.Scope() var_a = scope.var("var_a") self.assertIsNotNone(var_a) @@ -39,7 +39,7 @@ class TestScope(unittest.TestCase): self.assertIsNotNone(scope2.find_var('var_a')) def test_var_get_int(self): - paddle_c = paddle.v2.fluid.core + paddle_c = paddle.fluid.core scope = paddle_c.Scope() var = scope.var("test_int") var.set_int(10) diff --git a/python/paddle/fluid/tests/unittests/test_selected_rows.py b/python/paddle/fluid/tests/unittests/test_selected_rows.py index 50c8bb4bca..3d7b86787f 100644 --- a/python/paddle/fluid/tests/unittests/test_selected_rows.py +++ b/python/paddle/fluid/tests/unittests/test_selected_rows.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle.v2.fluid.core as core +import paddle.fluid.core as core import unittest import numpy as np diff --git a/python/paddle/fluid/tests/unittests/test_sgd_op.py b/python/paddle/fluid/tests/unittests/test_sgd_op.py index e5379b961f..c498b23db1 100644 --- a/python/paddle/fluid/tests/unittests/test_sgd_op.py +++ b/python/paddle/fluid/tests/unittests/test_sgd_op.py @@ -14,8 +14,8 @@ import unittest import numpy as np -import paddle.v2.fluid.core as core -from paddle.v2.fluid.op import Operator +import paddle.fluid.core as core +from paddle.fluid.op import Operator from op_test import OpTest diff --git a/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py b/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py index 48874ba8a5..ba34e6a486 100644 --- a/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py +++ b/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py @@ -13,12 +13,12 @@ # limitations under the License. import unittest -import paddle.v2.fluid.core as core -from paddle.v2.fluid.executor import Executor -import paddle.v2.fluid.layers as layers -from paddle.v2.fluid.backward import append_backward -from paddle.v2.fluid.framework import default_main_program, switch_main_program -from paddle.v2.fluid.framework import Program +import paddle.fluid.core as core +from paddle.fluid.executor import Executor +import paddle.fluid.layers as layers +from paddle.fluid.backward import append_backward +from paddle.fluid.framework import default_main_program, switch_main_program +from paddle.fluid.framework import Program import numpy as np diff --git a/python/paddle/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py b/python/paddle/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py index 48e6756a86..0b04ff302d 100644 --- a/python/paddle/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py +++ b/python/paddle/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py @@ -13,12 +13,12 @@ # limitations under the License. import unittest -import paddle.v2.fluid.core as core +import paddle.fluid.core as core import numpy as np -import paddle.v2.fluid.layers as layers -from paddle.v2.fluid.framework import Program, program_guard -from paddle.v2.fluid.executor import Executor -from paddle.v2.fluid.backward import append_backward +import paddle.fluid.layers as layers +from paddle.fluid.framework import Program, program_guard +from paddle.fluid.executor import Executor +from paddle.fluid.backward import append_backward class TestCPULoDTensorArrayOps(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_split_selected_rows_op.py b/python/paddle/fluid/tests/unittests/test_split_selected_rows_op.py index 2aaa05dcac..286d305a77 100644 --- a/python/paddle/fluid/tests/unittests/test_split_selected_rows_op.py +++ b/python/paddle/fluid/tests/unittests/test_split_selected_rows_op.py @@ -13,9 +13,9 @@ # limitations under the License. import unittest -import paddle.v2.fluid.core as core +import paddle.fluid.core as core import numpy as np -from paddle.v2.fluid.op import Operator +from paddle.fluid.op import Operator class TestSpliteSelectedRows(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_split_var.py b/python/paddle/fluid/tests/unittests/test_split_var.py index d7160b78b9..104ceb4fe7 100644 --- a/python/paddle/fluid/tests/unittests/test_split_var.py +++ b/python/paddle/fluid/tests/unittests/test_split_var.py @@ -14,9 +14,9 @@ import math import unittest -from paddle.v2.fluid.distribute_transpiler import split_dense_variable -import paddle.v2.fluid as fluid -import paddle.v2.fluid.core as core +from paddle.fluid.distribute_transpiler import split_dense_variable +import paddle.fluid as fluid +import paddle.fluid.core as core import random diff --git a/python/paddle/fluid/tests/unittests/test_switch.py b/python/paddle/fluid/tests/unittests/test_switch.py index 11296bc04e..528c5cce4b 100644 --- a/python/paddle/fluid/tests/unittests/test_switch.py +++ b/python/paddle/fluid/tests/unittests/test_switch.py @@ -14,11 +14,11 @@ import unittest -import paddle.v2.fluid.core as core -import paddle.v2.fluid.layers as layers -import paddle.v2.fluid.framework as framework -from paddle.v2.fluid.executor import Executor -from paddle.v2.fluid.framework import default_startup_program +import paddle.fluid.core as core +import paddle.fluid.layers as layers +import paddle.fluid.framework as framework +from paddle.fluid.executor import Executor +from paddle.fluid.framework import default_startup_program class TestSwitch(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_tensor.py b/python/paddle/fluid/tests/unittests/test_tensor.py index 8fe234a90f..a369783245 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_tensor.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle.v2.fluid.core as core +import paddle.fluid.core as core import unittest import numpy diff --git a/python/paddle/fluid/tests/unittests/test_unique_name.py b/python/paddle/fluid/tests/unittests/test_unique_name.py index e28810c96b..49ef335618 100644 --- a/python/paddle/fluid/tests/unittests/test_unique_name.py +++ b/python/paddle/fluid/tests/unittests/test_unique_name.py @@ -13,7 +13,7 @@ # limitations under the License. import unittest -import paddle.v2.fluid as fluid +import paddle.fluid as fluid class TestUniqueName(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_variable.py b/python/paddle/fluid/tests/unittests/test_variable.py index 4ae3909d27..49784e21c4 100644 --- a/python/paddle/fluid/tests/unittests/test_variable.py +++ b/python/paddle/fluid/tests/unittests/test_variable.py @@ -13,8 +13,8 @@ # limitations under the License. import unittest -from paddle.v2.fluid.framework import default_main_program, Program, convert_np_dtype_to_dtype_ -import paddle.v2.fluid.core as core +from paddle.fluid.framework import default_main_program, Program, convert_np_dtype_to_dtype_ +import paddle.fluid.core as core import numpy as np diff --git a/python/paddle/fluid/tests/unittests/test_weight_normalization.py b/python/paddle/fluid/tests/unittests/test_weight_normalization.py index c2b81dddb0..2adf917bc5 100644 --- a/python/paddle/fluid/tests/unittests/test_weight_normalization.py +++ b/python/paddle/fluid/tests/unittests/test_weight_normalization.py @@ -15,10 +15,10 @@ import unittest import numpy import collections -import paddle.v2.fluid as fluid -import paddle.v2.fluid.core as core -from paddle.v2.fluid.initializer import ConstantInitializer -from paddle.v2.fluid.param_attr import WeightNormParamAttr +import paddle.fluid as fluid +import paddle.fluid.core as core +from paddle.fluid.initializer import ConstantInitializer +from paddle.fluid.param_attr import WeightNormParamAttr class TestWeightNormalization(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_while_op.py b/python/paddle/fluid/tests/unittests/test_while_op.py index 3fa1d5e0ed..5afeb5ae89 100644 --- a/python/paddle/fluid/tests/unittests/test_while_op.py +++ b/python/paddle/fluid/tests/unittests/test_while_op.py @@ -13,10 +13,10 @@ # limitations under the License. import unittest -import paddle.v2.fluid.layers as layers -from paddle.v2.fluid.executor import Executor -import paddle.v2.fluid.core as core -from paddle.v2.fluid.backward import append_backward +import paddle.fluid.layers as layers +from paddle.fluid.executor import Executor +import paddle.fluid.core as core +from paddle.fluid.backward import append_backward import numpy -- GitLab From fa32516281eb4e31379ae8a149e6b9d7eaa75654 Mon Sep 17 00:00:00 2001 From: superjom Date: Sat, 24 Feb 2018 18:26:02 +0800 Subject: [PATCH 170/217] boot --- doc/api/overview.rst | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/doc/api/overview.rst b/doc/api/overview.rst index 953d2db2b3..5b7da7a477 100644 --- a/doc/api/overview.rst +++ b/doc/api/overview.rst @@ -1,4 +1,13 @@ -API Overview -============ +# V2 API Overview -TBD +The PaddlePaddle V2 API is designed to provide a modern user interface for PaddlePaddle V1(the original layer-based platform of PaddlePaddle), it proposes some high-level concepts such as [Layers](http://www.paddlepaddle.org/docs/develop/api/en/v2/config/layer.html),[Optimizer](http://www.paddlepaddle.org/docs/develop/api/en/v2/config/optimizer.html),[Evaluator](http://www.paddlepaddle.org/docs/develop/api/en/v2/config/evaluators.html) and [Data Reader](http://www.paddlepaddle.org/docs/develop/api/en/v2/data/data_reader.html) to make the model configuration more familiar to users. + +A model is composed of the computation described by a group of `Layers`, with `Evaluator` to define the error, `Optimizer` to update the parameters and `Data Reader` to feed in the data. + +We also provide the [interface for Training and Inference](http://www.paddlepaddle.org/docs/develop/api/en/v2/run_logic.html) to help control the training and inference phrase, it has several easy to use methods + +- `paddle.train` +- `paddle.test` +- `paddle.infer` + +to better expose the internal running details, different [Events](http://www.paddlepaddle.org/docs/develop/api/en/v2/run_logic.html#event) are available to users by writing some callbacks. -- GitLab From ef5d3d4bf132fb34015588a59f7c60a7ac2ce70a Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Sat, 24 Feb 2018 18:11:30 +0800 Subject: [PATCH 171/217] move Fluid API doc out of V2 API doc --- doc/api/v2/fluid/data_feeder.rst | 2 +- doc/api/v2/fluid/evaluator.rst | 4 +- doc/api/v2/fluid/executor.rst | 8 +- doc/api/v2/fluid/gen_doc.py | 6 +- doc/api/v2/fluid/initializer.rst | 8 +- doc/api/v2/fluid/io.rst | 18 +-- doc/api/v2/fluid/layers.rst | 256 +++++++++++++++---------------- doc/api/v2/fluid/nets.rst | 8 +- doc/api/v2/fluid/optimizer.rst | 12 +- doc/api/v2/fluid/param_attr.rst | 4 +- doc/api/v2/fluid/profiler.rst | 6 +- doc/api/v2/fluid/regularizer.rst | 6 +- doc/templates/conf.py.cn.in | 1 + doc/templates/conf.py.en.in | 1 + 14 files changed, 171 insertions(+), 169 deletions(-) diff --git a/doc/api/v2/fluid/data_feeder.rst b/doc/api/v2/fluid/data_feeder.rst index a591c7334f..3df5c0307f 100644 --- a/doc/api/v2/fluid/data_feeder.rst +++ b/doc/api/v2/fluid/data_feeder.rst @@ -8,7 +8,7 @@ data_feeder DataFeeder ---------- -.. autoclass:: paddle.v2.fluid.data_feeder.DataFeeder +.. autoclass:: paddle.fluid.data_feeder.DataFeeder :members: :noindex: diff --git a/doc/api/v2/fluid/evaluator.rst b/doc/api/v2/fluid/evaluator.rst index 00dcecfd62..ae9daeb791 100644 --- a/doc/api/v2/fluid/evaluator.rst +++ b/doc/api/v2/fluid/evaluator.rst @@ -8,14 +8,14 @@ evaluator Accuracy -------- -.. autoclass:: paddle.v2.fluid.evaluator.Accuracy +.. autoclass:: paddle.fluid.evaluator.Accuracy :members: :noindex: ChunkEvaluator -------------- -.. autoclass:: paddle.v2.fluid.evaluator.ChunkEvaluator +.. autoclass:: paddle.fluid.evaluator.ChunkEvaluator :members: :noindex: diff --git a/doc/api/v2/fluid/executor.rst b/doc/api/v2/fluid/executor.rst index a028f6283f..a9cdf264e4 100644 --- a/doc/api/v2/fluid/executor.rst +++ b/doc/api/v2/fluid/executor.rst @@ -8,25 +8,25 @@ executor Executor -------- -.. autoclass:: paddle.v2.fluid.executor.Executor +.. autoclass:: paddle.fluid.executor.Executor :members: :noindex: global_scope ------------ -.. autofunction:: paddle.v2.fluid.executor.global_scope +.. autofunction:: paddle.fluid.executor.global_scope :noindex: scope_guard ----------- -.. autofunction:: paddle.v2.fluid.executor.scope_guard +.. autofunction:: paddle.fluid.executor.scope_guard :noindex: switch_scope ------------ -.. autofunction:: paddle.v2.fluid.executor.switch_scope +.. autofunction:: paddle.fluid.executor.switch_scope :noindex: diff --git a/doc/api/v2/fluid/gen_doc.py b/doc/api/v2/fluid/gen_doc.py index a2147fd3f7..89ab880301 100644 --- a/doc/api/v2/fluid/gen_doc.py +++ b/doc/api/v2/fluid/gen_doc.py @@ -17,7 +17,7 @@ import argparse import sys import types -import paddle.v2.fluid as fluid +import paddle.fluid as fluid def parse_arg(): @@ -70,7 +70,7 @@ class DocGenerator(object): def print_class(self, name): self._print_header_(name, dot='-', is_title=False) - self.stream.write('''.. autoclass:: paddle.v2.fluid.{0}.{1} + self.stream.write('''.. autoclass:: paddle.fluid.{0}.{1} :members: :noindex: @@ -78,7 +78,7 @@ class DocGenerator(object): def print_method(self, name): self._print_header_(name, dot='-', is_title=False) - self.stream.write('''.. autofunction:: paddle.v2.fluid.{0}.{1} + self.stream.write('''.. autofunction:: paddle.fluid.{0}.{1} :noindex: '''.format(self.module_name, name)) diff --git a/doc/api/v2/fluid/initializer.rst b/doc/api/v2/fluid/initializer.rst index c38be033ff..ee69925fda 100644 --- a/doc/api/v2/fluid/initializer.rst +++ b/doc/api/v2/fluid/initializer.rst @@ -8,28 +8,28 @@ initializer Constant -------- -.. autoclass:: paddle.v2.fluid.initializer.Constant +.. autoclass:: paddle.fluid.initializer.Constant :members: :noindex: Uniform ------- -.. autoclass:: paddle.v2.fluid.initializer.Uniform +.. autoclass:: paddle.fluid.initializer.Uniform :members: :noindex: Normal ------ -.. autoclass:: paddle.v2.fluid.initializer.Normal +.. autoclass:: paddle.fluid.initializer.Normal :members: :noindex: Xavier ------ -.. autoclass:: paddle.v2.fluid.initializer.Xavier +.. autoclass:: paddle.fluid.initializer.Xavier :members: :noindex: diff --git a/doc/api/v2/fluid/io.rst b/doc/api/v2/fluid/io.rst index 37c9c273e3..dd9d88b669 100644 --- a/doc/api/v2/fluid/io.rst +++ b/doc/api/v2/fluid/io.rst @@ -8,54 +8,54 @@ io save_vars --------- -.. autofunction:: paddle.v2.fluid.io.save_vars +.. autofunction:: paddle.fluid.io.save_vars :noindex: save_params ----------- -.. autofunction:: paddle.v2.fluid.io.save_params +.. autofunction:: paddle.fluid.io.save_params :noindex: save_persistables ----------------- -.. autofunction:: paddle.v2.fluid.io.save_persistables +.. autofunction:: paddle.fluid.io.save_persistables :noindex: load_vars --------- -.. autofunction:: paddle.v2.fluid.io.load_vars +.. autofunction:: paddle.fluid.io.load_vars :noindex: load_params ----------- -.. autofunction:: paddle.v2.fluid.io.load_params +.. autofunction:: paddle.fluid.io.load_params :noindex: load_persistables ----------------- -.. autofunction:: paddle.v2.fluid.io.load_persistables +.. autofunction:: paddle.fluid.io.load_persistables :noindex: save_inference_model -------------------- -.. autofunction:: paddle.v2.fluid.io.save_inference_model +.. autofunction:: paddle.fluid.io.save_inference_model :noindex: load_inference_model -------------------- -.. autofunction:: paddle.v2.fluid.io.load_inference_model +.. autofunction:: paddle.fluid.io.load_inference_model :noindex: get_inference_program --------------------- -.. autofunction:: paddle.v2.fluid.io.get_inference_program +.. autofunction:: paddle.fluid.io.get_inference_program :noindex: diff --git a/doc/api/v2/fluid/layers.rst b/doc/api/v2/fluid/layers.rst index 58c493fd74..ae35d8c534 100644 --- a/doc/api/v2/fluid/layers.rst +++ b/doc/api/v2/fluid/layers.rst @@ -11,167 +11,167 @@ control_flow split_lod_tensor ---------------- -.. autofunction:: paddle.v2.fluid.layers.split_lod_tensor +.. autofunction:: paddle.fluid.layers.split_lod_tensor :noindex: merge_lod_tensor ---------------- -.. autofunction:: paddle.v2.fluid.layers.merge_lod_tensor +.. autofunction:: paddle.fluid.layers.merge_lod_tensor :noindex: BlockGuard ---------- -.. autoclass:: paddle.v2.fluid.layers.BlockGuard +.. autoclass:: paddle.fluid.layers.BlockGuard :members: :noindex: BlockGuardWithCompletion ------------------------ -.. autoclass:: paddle.v2.fluid.layers.BlockGuardWithCompletion +.. autoclass:: paddle.fluid.layers.BlockGuardWithCompletion :members: :noindex: StaticRNNMemoryLink ------------------- -.. autoclass:: paddle.v2.fluid.layers.StaticRNNMemoryLink +.. autoclass:: paddle.fluid.layers.StaticRNNMemoryLink :members: :noindex: WhileGuard ---------- -.. autoclass:: paddle.v2.fluid.layers.WhileGuard +.. autoclass:: paddle.fluid.layers.WhileGuard :members: :noindex: While ----- -.. autoclass:: paddle.v2.fluid.layers.While +.. autoclass:: paddle.fluid.layers.While :members: :noindex: lod_rank_table -------------- -.. autofunction:: paddle.v2.fluid.layers.lod_rank_table +.. autofunction:: paddle.fluid.layers.lod_rank_table :noindex: max_sequence_len ---------------- -.. autofunction:: paddle.v2.fluid.layers.max_sequence_len +.. autofunction:: paddle.fluid.layers.max_sequence_len :noindex: topk ---- -.. autofunction:: paddle.v2.fluid.layers.topk +.. autofunction:: paddle.fluid.layers.topk :noindex: lod_tensor_to_array ------------------- -.. autofunction:: paddle.v2.fluid.layers.lod_tensor_to_array +.. autofunction:: paddle.fluid.layers.lod_tensor_to_array :noindex: array_to_lod_tensor ------------------- -.. autofunction:: paddle.v2.fluid.layers.array_to_lod_tensor +.. autofunction:: paddle.fluid.layers.array_to_lod_tensor :noindex: increment --------- -.. autofunction:: paddle.v2.fluid.layers.increment +.. autofunction:: paddle.fluid.layers.increment :noindex: array_write ----------- -.. autofunction:: paddle.v2.fluid.layers.array_write +.. autofunction:: paddle.fluid.layers.array_write :noindex: create_array ------------ -.. autofunction:: paddle.v2.fluid.layers.create_array +.. autofunction:: paddle.fluid.layers.create_array :noindex: less_than --------- -.. autofunction:: paddle.v2.fluid.layers.less_than +.. autofunction:: paddle.fluid.layers.less_than :noindex: array_read ---------- -.. autofunction:: paddle.v2.fluid.layers.array_read +.. autofunction:: paddle.fluid.layers.array_read :noindex: shrink_memory ------------- -.. autofunction:: paddle.v2.fluid.layers.shrink_memory +.. autofunction:: paddle.fluid.layers.shrink_memory :noindex: array_length ------------ -.. autofunction:: paddle.v2.fluid.layers.array_length +.. autofunction:: paddle.fluid.layers.array_length :noindex: IfElse ------ -.. autoclass:: paddle.v2.fluid.layers.IfElse +.. autoclass:: paddle.fluid.layers.IfElse :members: :noindex: DynamicRNN ---------- -.. autoclass:: paddle.v2.fluid.layers.DynamicRNN +.. autoclass:: paddle.fluid.layers.DynamicRNN :members: :noindex: ConditionalBlock ---------------- -.. autoclass:: paddle.v2.fluid.layers.ConditionalBlock +.. autoclass:: paddle.fluid.layers.ConditionalBlock :members: :noindex: StaticRNN --------- -.. autoclass:: paddle.v2.fluid.layers.StaticRNN +.. autoclass:: paddle.fluid.layers.StaticRNN :members: :noindex: reorder_lod_tensor_by_rank -------------------------- -.. autofunction:: paddle.v2.fluid.layers.reorder_lod_tensor_by_rank +.. autofunction:: paddle.fluid.layers.reorder_lod_tensor_by_rank :noindex: ParallelDo ---------- -.. autoclass:: paddle.v2.fluid.layers.ParallelDo +.. autoclass:: paddle.fluid.layers.ParallelDo :members: :noindex: Print ----- -.. autofunction:: paddle.v2.fluid.layers.Print +.. autofunction:: paddle.fluid.layers.Print :noindex: device @@ -180,7 +180,7 @@ device get_places ---------- -.. autofunction:: paddle.v2.fluid.layers.get_places +.. autofunction:: paddle.fluid.layers.get_places :noindex: io @@ -189,27 +189,27 @@ io data ---- -.. autofunction:: paddle.v2.fluid.layers.data +.. autofunction:: paddle.fluid.layers.data :noindex: BlockGuardServ -------------- -.. autoclass:: paddle.v2.fluid.layers.BlockGuardServ +.. autoclass:: paddle.fluid.layers.BlockGuardServ :members: :noindex: ListenAndServ ------------- -.. autoclass:: paddle.v2.fluid.layers.ListenAndServ +.. autoclass:: paddle.fluid.layers.ListenAndServ :members: :noindex: Send ---- -.. autofunction:: paddle.v2.fluid.layers.Send +.. autofunction:: paddle.fluid.layers.Send :noindex: nn @@ -218,259 +218,259 @@ nn fc -- -.. autofunction:: paddle.v2.fluid.layers.fc +.. autofunction:: paddle.fluid.layers.fc :noindex: embedding --------- -.. autofunction:: paddle.v2.fluid.layers.embedding +.. autofunction:: paddle.fluid.layers.embedding :noindex: dynamic_lstm ------------ -.. autofunction:: paddle.v2.fluid.layers.dynamic_lstm +.. autofunction:: paddle.fluid.layers.dynamic_lstm :noindex: dynamic_lstmp ------------- -.. autofunction:: paddle.v2.fluid.layers.dynamic_lstmp +.. autofunction:: paddle.fluid.layers.dynamic_lstmp :noindex: dynamic_gru ----------- -.. autofunction:: paddle.v2.fluid.layers.dynamic_gru +.. autofunction:: paddle.fluid.layers.dynamic_gru :noindex: gru_unit -------- -.. autofunction:: paddle.v2.fluid.layers.gru_unit +.. autofunction:: paddle.fluid.layers.gru_unit :noindex: linear_chain_crf ---------------- -.. autofunction:: paddle.v2.fluid.layers.linear_chain_crf +.. autofunction:: paddle.fluid.layers.linear_chain_crf :noindex: crf_decoding ------------ -.. autofunction:: paddle.v2.fluid.layers.crf_decoding +.. autofunction:: paddle.fluid.layers.crf_decoding :noindex: cos_sim ------- -.. autofunction:: paddle.v2.fluid.layers.cos_sim +.. autofunction:: paddle.fluid.layers.cos_sim :noindex: cross_entropy ------------- -.. autofunction:: paddle.v2.fluid.layers.cross_entropy +.. autofunction:: paddle.fluid.layers.cross_entropy :noindex: square_error_cost ----------------- -.. autofunction:: paddle.v2.fluid.layers.square_error_cost +.. autofunction:: paddle.fluid.layers.square_error_cost :noindex: accuracy -------- -.. autofunction:: paddle.v2.fluid.layers.accuracy +.. autofunction:: paddle.fluid.layers.accuracy :noindex: chunk_eval ---------- -.. autofunction:: paddle.v2.fluid.layers.chunk_eval +.. autofunction:: paddle.fluid.layers.chunk_eval :noindex: sequence_conv ------------- -.. autofunction:: paddle.v2.fluid.layers.sequence_conv +.. autofunction:: paddle.fluid.layers.sequence_conv :noindex: conv2d ------ -.. autofunction:: paddle.v2.fluid.layers.conv2d +.. autofunction:: paddle.fluid.layers.conv2d :noindex: sequence_pool ------------- -.. autofunction:: paddle.v2.fluid.layers.sequence_pool +.. autofunction:: paddle.fluid.layers.sequence_pool :noindex: pool2d ------ -.. autofunction:: paddle.v2.fluid.layers.pool2d +.. autofunction:: paddle.fluid.layers.pool2d :noindex: batch_norm ---------- -.. autofunction:: paddle.v2.fluid.layers.batch_norm +.. autofunction:: paddle.fluid.layers.batch_norm :noindex: layer_norm ---------- -.. autofunction:: paddle.v2.fluid.layers.layer_norm +.. autofunction:: paddle.fluid.layers.layer_norm :noindex: beam_search_decode ------------------ -.. autofunction:: paddle.v2.fluid.layers.beam_search_decode +.. autofunction:: paddle.fluid.layers.beam_search_decode :noindex: conv2d_transpose ---------------- -.. autofunction:: paddle.v2.fluid.layers.conv2d_transpose +.. autofunction:: paddle.fluid.layers.conv2d_transpose :noindex: sequence_expand --------------- -.. autofunction:: paddle.v2.fluid.layers.sequence_expand +.. autofunction:: paddle.fluid.layers.sequence_expand :noindex: lstm_unit --------- -.. autofunction:: paddle.v2.fluid.layers.lstm_unit +.. autofunction:: paddle.fluid.layers.lstm_unit :noindex: reduce_sum ---------- -.. autofunction:: paddle.v2.fluid.layers.reduce_sum +.. autofunction:: paddle.fluid.layers.reduce_sum :noindex: reduce_mean ----------- -.. autofunction:: paddle.v2.fluid.layers.reduce_mean +.. autofunction:: paddle.fluid.layers.reduce_mean :noindex: reduce_max ---------- -.. autofunction:: paddle.v2.fluid.layers.reduce_max +.. autofunction:: paddle.fluid.layers.reduce_max :noindex: reduce_min ---------- -.. autofunction:: paddle.v2.fluid.layers.reduce_min +.. autofunction:: paddle.fluid.layers.reduce_min :noindex: sequence_first_step ------------------- -.. autofunction:: paddle.v2.fluid.layers.sequence_first_step +.. autofunction:: paddle.fluid.layers.sequence_first_step :noindex: sequence_last_step ------------------ -.. autofunction:: paddle.v2.fluid.layers.sequence_last_step +.. autofunction:: paddle.fluid.layers.sequence_last_step :noindex: dropout ------- -.. autofunction:: paddle.v2.fluid.layers.dropout +.. autofunction:: paddle.fluid.layers.dropout :noindex: split ----- -.. autofunction:: paddle.v2.fluid.layers.split +.. autofunction:: paddle.fluid.layers.split :noindex: ctc_greedy_decoder ------------------ -.. autofunction:: paddle.v2.fluid.layers.ctc_greedy_decoder +.. autofunction:: paddle.fluid.layers.ctc_greedy_decoder :noindex: edit_distance ------------- -.. autofunction:: paddle.v2.fluid.layers.edit_distance +.. autofunction:: paddle.fluid.layers.edit_distance :noindex: l2_normalize ------------ -.. autofunction:: paddle.v2.fluid.layers.l2_normalize +.. autofunction:: paddle.fluid.layers.l2_normalize :noindex: matmul ------ -.. autofunction:: paddle.v2.fluid.layers.matmul +.. autofunction:: paddle.fluid.layers.matmul :noindex: warpctc ------- -.. autofunction:: paddle.v2.fluid.layers.warpctc +.. autofunction:: paddle.fluid.layers.warpctc :noindex: sequence_reshape ---------------- -.. autofunction:: paddle.v2.fluid.layers.sequence_reshape +.. autofunction:: paddle.fluid.layers.sequence_reshape :noindex: transpose --------- -.. autofunction:: paddle.v2.fluid.layers.transpose +.. autofunction:: paddle.fluid.layers.transpose :noindex: im2sequence ----------- -.. autofunction:: paddle.v2.fluid.layers.im2sequence +.. autofunction:: paddle.fluid.layers.im2sequence :noindex: nce --- -.. autofunction:: paddle.v2.fluid.layers.nce +.. autofunction:: paddle.fluid.layers.nce :noindex: beam_search ----------- -.. autofunction:: paddle.v2.fluid.layers.beam_search +.. autofunction:: paddle.fluid.layers.beam_search :noindex: row_conv -------- -.. autofunction:: paddle.v2.fluid.layers.row_conv +.. autofunction:: paddle.fluid.layers.row_conv :noindex: multiplex --------- -.. autofunction:: paddle.v2.fluid.layers.multiplex +.. autofunction:: paddle.fluid.layers.multiplex :noindex: ops @@ -479,259 +479,259 @@ ops mean ---- -.. autofunction:: paddle.v2.fluid.layers.mean +.. autofunction:: paddle.fluid.layers.mean :noindex: mul --- -.. autofunction:: paddle.v2.fluid.layers.mul +.. autofunction:: paddle.fluid.layers.mul :noindex: reshape ------- -.. autofunction:: paddle.v2.fluid.layers.reshape +.. autofunction:: paddle.fluid.layers.reshape :noindex: scale ----- -.. autofunction:: paddle.v2.fluid.layers.scale +.. autofunction:: paddle.fluid.layers.scale :noindex: sigmoid_cross_entropy_with_logits --------------------------------- -.. autofunction:: paddle.v2.fluid.layers.sigmoid_cross_entropy_with_logits +.. autofunction:: paddle.fluid.layers.sigmoid_cross_entropy_with_logits :noindex: elementwise_add --------------- -.. autofunction:: paddle.v2.fluid.layers.elementwise_add +.. autofunction:: paddle.fluid.layers.elementwise_add :noindex: elementwise_div --------------- -.. autofunction:: paddle.v2.fluid.layers.elementwise_div +.. autofunction:: paddle.fluid.layers.elementwise_div :noindex: elementwise_sub --------------- -.. autofunction:: paddle.v2.fluid.layers.elementwise_sub +.. autofunction:: paddle.fluid.layers.elementwise_sub :noindex: elementwise_mul --------------- -.. autofunction:: paddle.v2.fluid.layers.elementwise_mul +.. autofunction:: paddle.fluid.layers.elementwise_mul :noindex: elementwise_max --------------- -.. autofunction:: paddle.v2.fluid.layers.elementwise_max +.. autofunction:: paddle.fluid.layers.elementwise_max :noindex: elementwise_min --------------- -.. autofunction:: paddle.v2.fluid.layers.elementwise_min +.. autofunction:: paddle.fluid.layers.elementwise_min :noindex: elementwise_pow --------------- -.. autofunction:: paddle.v2.fluid.layers.elementwise_pow +.. autofunction:: paddle.fluid.layers.elementwise_pow :noindex: clip ---- -.. autofunction:: paddle.v2.fluid.layers.clip +.. autofunction:: paddle.fluid.layers.clip :noindex: clip_by_norm ------------ -.. autofunction:: paddle.v2.fluid.layers.clip_by_norm +.. autofunction:: paddle.fluid.layers.clip_by_norm :noindex: sequence_softmax ---------------- -.. autofunction:: paddle.v2.fluid.layers.sequence_softmax +.. autofunction:: paddle.fluid.layers.sequence_softmax :noindex: sigmoid ------- -.. autofunction:: paddle.v2.fluid.layers.sigmoid +.. autofunction:: paddle.fluid.layers.sigmoid :noindex: logsigmoid ---------- -.. autofunction:: paddle.v2.fluid.layers.logsigmoid +.. autofunction:: paddle.fluid.layers.logsigmoid :noindex: exp --- -.. autofunction:: paddle.v2.fluid.layers.exp +.. autofunction:: paddle.fluid.layers.exp :noindex: relu ---- -.. autofunction:: paddle.v2.fluid.layers.relu +.. autofunction:: paddle.fluid.layers.relu :noindex: tanh ---- -.. autofunction:: paddle.v2.fluid.layers.tanh +.. autofunction:: paddle.fluid.layers.tanh :noindex: tanh_shrink ----------- -.. autofunction:: paddle.v2.fluid.layers.tanh_shrink +.. autofunction:: paddle.fluid.layers.tanh_shrink :noindex: softshrink ---------- -.. autofunction:: paddle.v2.fluid.layers.softshrink +.. autofunction:: paddle.fluid.layers.softshrink :noindex: sqrt ---- -.. autofunction:: paddle.v2.fluid.layers.sqrt +.. autofunction:: paddle.fluid.layers.sqrt :noindex: abs --- -.. autofunction:: paddle.v2.fluid.layers.abs +.. autofunction:: paddle.fluid.layers.abs :noindex: ceil ---- -.. autofunction:: paddle.v2.fluid.layers.ceil +.. autofunction:: paddle.fluid.layers.ceil :noindex: floor ----- -.. autofunction:: paddle.v2.fluid.layers.floor +.. autofunction:: paddle.fluid.layers.floor :noindex: round ----- -.. autofunction:: paddle.v2.fluid.layers.round +.. autofunction:: paddle.fluid.layers.round :noindex: reciprocal ---------- -.. autofunction:: paddle.v2.fluid.layers.reciprocal +.. autofunction:: paddle.fluid.layers.reciprocal :noindex: log --- -.. autofunction:: paddle.v2.fluid.layers.log +.. autofunction:: paddle.fluid.layers.log :noindex: square ------ -.. autofunction:: paddle.v2.fluid.layers.square +.. autofunction:: paddle.fluid.layers.square :noindex: softplus -------- -.. autofunction:: paddle.v2.fluid.layers.softplus +.. autofunction:: paddle.fluid.layers.softplus :noindex: softsign -------- -.. autofunction:: paddle.v2.fluid.layers.softsign +.. autofunction:: paddle.fluid.layers.softsign :noindex: brelu ----- -.. autofunction:: paddle.v2.fluid.layers.brelu +.. autofunction:: paddle.fluid.layers.brelu :noindex: leaky_relu ---------- -.. autofunction:: paddle.v2.fluid.layers.leaky_relu +.. autofunction:: paddle.fluid.layers.leaky_relu :noindex: soft_relu --------- -.. autofunction:: paddle.v2.fluid.layers.soft_relu +.. autofunction:: paddle.fluid.layers.soft_relu :noindex: elu --- -.. autofunction:: paddle.v2.fluid.layers.elu +.. autofunction:: paddle.fluid.layers.elu :noindex: relu6 ----- -.. autofunction:: paddle.v2.fluid.layers.relu6 +.. autofunction:: paddle.fluid.layers.relu6 :noindex: pow --- -.. autofunction:: paddle.v2.fluid.layers.pow +.. autofunction:: paddle.fluid.layers.pow :noindex: stanh ----- -.. autofunction:: paddle.v2.fluid.layers.stanh +.. autofunction:: paddle.fluid.layers.stanh :noindex: hard_shrink ----------- -.. autofunction:: paddle.v2.fluid.layers.hard_shrink +.. autofunction:: paddle.fluid.layers.hard_shrink :noindex: thresholded_relu ---------------- -.. autofunction:: paddle.v2.fluid.layers.thresholded_relu +.. autofunction:: paddle.fluid.layers.thresholded_relu :noindex: hard_sigmoid ------------ -.. autofunction:: paddle.v2.fluid.layers.hard_sigmoid +.. autofunction:: paddle.fluid.layers.hard_sigmoid :noindex: swish ----- -.. autofunction:: paddle.v2.fluid.layers.swish +.. autofunction:: paddle.fluid.layers.swish :noindex: tensor @@ -740,66 +740,66 @@ tensor create_tensor ------------- -.. autofunction:: paddle.v2.fluid.layers.create_tensor +.. autofunction:: paddle.fluid.layers.create_tensor :noindex: create_parameter ---------------- -.. autofunction:: paddle.v2.fluid.layers.create_parameter +.. autofunction:: paddle.fluid.layers.create_parameter :noindex: create_global_var ----------------- -.. autofunction:: paddle.v2.fluid.layers.create_global_var +.. autofunction:: paddle.fluid.layers.create_global_var :noindex: cast ---- -.. autofunction:: paddle.v2.fluid.layers.cast +.. autofunction:: paddle.fluid.layers.cast :noindex: concat ------ -.. autofunction:: paddle.v2.fluid.layers.concat +.. autofunction:: paddle.fluid.layers.concat :noindex: sums ---- -.. autofunction:: paddle.v2.fluid.layers.sums +.. autofunction:: paddle.fluid.layers.sums :noindex: assign ------ -.. autofunction:: paddle.v2.fluid.layers.assign +.. autofunction:: paddle.fluid.layers.assign :noindex: fill_constant_batch_size_like ----------------------------- -.. autofunction:: paddle.v2.fluid.layers.fill_constant_batch_size_like +.. autofunction:: paddle.fluid.layers.fill_constant_batch_size_like :noindex: fill_constant ------------- -.. autofunction:: paddle.v2.fluid.layers.fill_constant +.. autofunction:: paddle.fluid.layers.fill_constant :noindex: ones ---- -.. autofunction:: paddle.v2.fluid.layers.ones +.. autofunction:: paddle.fluid.layers.ones :noindex: zeros ----- -.. autofunction:: paddle.v2.fluid.layers.zeros +.. autofunction:: paddle.fluid.layers.zeros :noindex: diff --git a/doc/api/v2/fluid/nets.rst b/doc/api/v2/fluid/nets.rst index 015581b766..7ae3187304 100644 --- a/doc/api/v2/fluid/nets.rst +++ b/doc/api/v2/fluid/nets.rst @@ -8,24 +8,24 @@ nets simple_img_conv_pool -------------------- -.. autofunction:: paddle.v2.fluid.nets.simple_img_conv_pool +.. autofunction:: paddle.fluid.nets.simple_img_conv_pool :noindex: sequence_conv_pool ------------------ -.. autofunction:: paddle.v2.fluid.nets.sequence_conv_pool +.. autofunction:: paddle.fluid.nets.sequence_conv_pool :noindex: glu --- -.. autofunction:: paddle.v2.fluid.nets.glu +.. autofunction:: paddle.fluid.nets.glu :noindex: scaled_dot_product_attention ---------------------------- -.. autofunction:: paddle.v2.fluid.nets.scaled_dot_product_attention +.. autofunction:: paddle.fluid.nets.scaled_dot_product_attention :noindex: diff --git a/doc/api/v2/fluid/optimizer.rst b/doc/api/v2/fluid/optimizer.rst index 1691ebb9a7..9b165f8704 100644 --- a/doc/api/v2/fluid/optimizer.rst +++ b/doc/api/v2/fluid/optimizer.rst @@ -8,42 +8,42 @@ optimizer SGD --- -.. autoclass:: paddle.v2.fluid.optimizer.SGD +.. autoclass:: paddle.fluid.optimizer.SGD :members: :noindex: Momentum -------- -.. autoclass:: paddle.v2.fluid.optimizer.Momentum +.. autoclass:: paddle.fluid.optimizer.Momentum :members: :noindex: Adagrad ------- -.. autoclass:: paddle.v2.fluid.optimizer.Adagrad +.. autoclass:: paddle.fluid.optimizer.Adagrad :members: :noindex: Adam ---- -.. autoclass:: paddle.v2.fluid.optimizer.Adam +.. autoclass:: paddle.fluid.optimizer.Adam :members: :noindex: Adamax ------ -.. autoclass:: paddle.v2.fluid.optimizer.Adamax +.. autoclass:: paddle.fluid.optimizer.Adamax :members: :noindex: DecayedAdagrad -------------- -.. autoclass:: paddle.v2.fluid.optimizer.DecayedAdagrad +.. autoclass:: paddle.fluid.optimizer.DecayedAdagrad :members: :noindex: diff --git a/doc/api/v2/fluid/param_attr.rst b/doc/api/v2/fluid/param_attr.rst index 8083d0d858..8e4ddb2b04 100644 --- a/doc/api/v2/fluid/param_attr.rst +++ b/doc/api/v2/fluid/param_attr.rst @@ -8,14 +8,14 @@ param_attr ParamAttr --------- -.. autoclass:: paddle.v2.fluid.param_attr.ParamAttr +.. autoclass:: paddle.fluid.param_attr.ParamAttr :members: :noindex: WeightNormParamAttr ------------------- -.. autoclass:: paddle.v2.fluid.param_attr.WeightNormParamAttr +.. autoclass:: paddle.fluid.param_attr.WeightNormParamAttr :members: :noindex: diff --git a/doc/api/v2/fluid/profiler.rst b/doc/api/v2/fluid/profiler.rst index 4a1ff7cb69..74d102dcb0 100644 --- a/doc/api/v2/fluid/profiler.rst +++ b/doc/api/v2/fluid/profiler.rst @@ -8,18 +8,18 @@ profiler cuda_profiler ------------- -.. autofunction:: paddle.v2.fluid.profiler.cuda_profiler +.. autofunction:: paddle.fluid.profiler.cuda_profiler :noindex: reset_profiler -------------- -.. autofunction:: paddle.v2.fluid.profiler.reset_profiler +.. autofunction:: paddle.fluid.profiler.reset_profiler :noindex: profiler -------- -.. autofunction:: paddle.v2.fluid.profiler.profiler +.. autofunction:: paddle.fluid.profiler.profiler :noindex: diff --git a/doc/api/v2/fluid/regularizer.rst b/doc/api/v2/fluid/regularizer.rst index 2c17d15599..dc9740c463 100644 --- a/doc/api/v2/fluid/regularizer.rst +++ b/doc/api/v2/fluid/regularizer.rst @@ -8,20 +8,20 @@ regularizer append_regularization_ops ------------------------- -.. autofunction:: paddle.v2.fluid.regularizer.append_regularization_ops +.. autofunction:: paddle.fluid.regularizer.append_regularization_ops :noindex: L1Decay ------- -.. autoclass:: paddle.v2.fluid.regularizer.L1Decay +.. autoclass:: paddle.fluid.regularizer.L1Decay :members: :noindex: L2Decay ------- -.. autoclass:: paddle.v2.fluid.regularizer.L2Decay +.. autoclass:: paddle.fluid.regularizer.L2Decay :members: :noindex: diff --git a/doc/templates/conf.py.cn.in b/doc/templates/conf.py.cn.in index d134aad794..c8da4a790b 100644 --- a/doc/templates/conf.py.cn.in +++ b/doc/templates/conf.py.cn.in @@ -18,6 +18,7 @@ import shlex from recommonmark import parser, transform import paddle import paddle.v2 +import paddle.fluid MarkdownParser = parser.CommonMarkParser AutoStructify = transform.AutoStructify diff --git a/doc/templates/conf.py.en.in b/doc/templates/conf.py.en.in index 1f057d2e83..a4cb2b7170 100644 --- a/doc/templates/conf.py.en.in +++ b/doc/templates/conf.py.en.in @@ -18,6 +18,7 @@ import shlex from recommonmark import parser, transform import paddle import paddle.v2 +import paddle.fluid MarkdownParser = parser.CommonMarkParser -- GitLab From 93e0609f1dc7b35b73d03bdd8adc1a2a313128a0 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Sat, 24 Feb 2018 18:43:09 +0800 Subject: [PATCH 172/217] fix bug --- python/paddle/v2/fluid/layers/nn.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index e8b4cec6ee..8623e1f038 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -1262,7 +1262,7 @@ def conv2d(input, raise ValueError("use_cudnn should be True or False") input_shape = input.shape - filter_shape = [num_filters, num_filter_channels] + filter_size + filter_shape = [num_filters, num_filter_channels] + list(filter_size) def _get_default_param_initializer(): std = (2.0 / (filter_size[0]**2 * num_channels))**0.5 @@ -1826,7 +1826,7 @@ def conv2d_transpose(input, elif isinstance(filter_size, int): filter_size = [filter_size, filter_size] - filter_shape = [input_channel, num_filters] + filter_size + filter_shape = [input_channel, num_filters] + list(filter_size) img_filter = helper.create_parameter( dtype=input.dtype, shape=filter_shape, attr=helper.param_attr) -- GitLab From f2694124291190da55e84f99dafd6ac0d8f067c2 Mon Sep 17 00:00:00 2001 From: superjom Date: Sat, 24 Feb 2018 18:46:18 +0800 Subject: [PATCH 173/217] update format for sphinx --- doc/api/overview.rst | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/doc/api/overview.rst b/doc/api/overview.rst index 5b7da7a477..c1a4582b5f 100644 --- a/doc/api/overview.rst +++ b/doc/api/overview.rst @@ -1,13 +1,16 @@ -# V2 API Overview +V2 API Overview +================ -The PaddlePaddle V2 API is designed to provide a modern user interface for PaddlePaddle V1(the original layer-based platform of PaddlePaddle), it proposes some high-level concepts such as [Layers](http://www.paddlepaddle.org/docs/develop/api/en/v2/config/layer.html),[Optimizer](http://www.paddlepaddle.org/docs/develop/api/en/v2/config/optimizer.html),[Evaluator](http://www.paddlepaddle.org/docs/develop/api/en/v2/config/evaluators.html) and [Data Reader](http://www.paddlepaddle.org/docs/develop/api/en/v2/data/data_reader.html) to make the model configuration more familiar to users. +The PaddlePaddle V2 API is designed to provide a modern user interface for PaddlePaddle V1(the original layer-based platform of PaddlePaddle), +it proposes some high-level concepts such as `Layers`_ ,`Optimizer`_ ,`Evaluator``_ and `Data Reader`_ to make the model configuration more familiar to users. A model is composed of the computation described by a group of `Layers`, with `Evaluator` to define the error, `Optimizer` to update the parameters and `Data Reader` to feed in the data. -We also provide the [interface for Training and Inference](http://www.paddlepaddle.org/docs/develop/api/en/v2/run_logic.html) to help control the training and inference phrase, it has several easy to use methods +We also provide the `interface for Training and Inference``_ are available to users by writing some callbacks. -- GitLab From 2b9de0af5880e4e77120ebeef4e97d516d13b13d Mon Sep 17 00:00:00 2001 From: superjom Date: Sat, 24 Feb 2018 18:58:36 +0800 Subject: [PATCH 174/217] format --- doc/api/overview.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/api/overview.rst b/doc/api/overview.rst index c1a4582b5f..5213585f54 100644 --- a/doc/api/overview.rst +++ b/doc/api/overview.rst @@ -2,15 +2,15 @@ V2 API Overview ================ The PaddlePaddle V2 API is designed to provide a modern user interface for PaddlePaddle V1(the original layer-based platform of PaddlePaddle), -it proposes some high-level concepts such as `Layers`_ ,`Optimizer`_ ,`Evaluator``_ and `Data Reader`_ to make the model configuration more familiar to users. +it proposes some high-level concepts such as `Layers `_ , `Optimizer `_ , `Evaluator `_ and `Data Reader `_ to make the model configuration more familiar to users. A model is composed of the computation described by a group of `Layers`, with `Evaluator` to define the error, `Optimizer` to update the parameters and `Data Reader` to feed in the data. -We also provide the `interface for Training and Inference``_ to help control the training and inference phrase, it has several easy to use methods - `paddle.train` - `paddle.test` - `paddle.infer` -to better expose the internal running details, different `Events`_ are available to users by writing some callbacks. +to better expose the internal running details, different `Events `_ are available to users by writing some callbacks. -- GitLab From 6989d081996fd2d7d85b3d4addbd60048c5f36ec Mon Sep 17 00:00:00 2001 From: superjom Date: Sat, 24 Feb 2018 18:59:23 +0800 Subject: [PATCH 175/217] lowercase Events --- doc/api/overview.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/api/overview.rst b/doc/api/overview.rst index 5213585f54..16b6cf4266 100644 --- a/doc/api/overview.rst +++ b/doc/api/overview.rst @@ -13,4 +13,4 @@ it has several easy to use methods - `paddle.test` - `paddle.infer` -to better expose the internal running details, different `Events `_ are available to users by writing some callbacks. +to better expose the internal running details, different `events `_ are available to users by writing some callbacks. -- GitLab From bb3608494913d43c68d890bce5bbc913dd25571a Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Sat, 24 Feb 2018 20:54:02 +0800 Subject: [PATCH 176/217] fix error directory of fluid inference unitest --- paddle/fluid/inference/tests/book/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/inference/tests/book/CMakeLists.txt b/paddle/fluid/inference/tests/book/CMakeLists.txt index c0aba39b97..4ead540e5d 100644 --- a/paddle/fluid/inference/tests/book/CMakeLists.txt +++ b/paddle/fluid/inference/tests/book/CMakeLists.txt @@ -4,7 +4,7 @@ function(inference_test TARGET_NAME) set(multiValueArgs ARGS) cmake_parse_arguments(inference_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - set(PYTHON_TESTS_DIR ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/tests) + set(PYTHON_TESTS_DIR ${PADDLE_SOURCE_DIR}/python/paddle/fluid/tests) set(arg_list "") if(inference_test_ARGS) foreach(arg ${inference_test_ARGS}) -- GitLab From 7453312dee86984f548a4ddebae62179c7c77139 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Sat, 24 Feb 2018 21:22:53 +0800 Subject: [PATCH 177/217] fix error python path when building framework_py_proto --- paddle/fluid/framework/CMakeLists.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index ef1bc07c2d..0b8cfab573 100644 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -68,9 +68,9 @@ py_proto_compile(framework_py_proto SRCS framework.proto) add_custom_target(framework_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py) add_dependencies(framework_py_proto framework_py_proto_init) add_custom_command(TARGET framework_py_proto POST_BUILD - COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/proto - COMMAND cp *.py ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/proto/ - COMMENT "Copy generated python proto into directory paddle/v2/fluid/proto." + COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_SOURCE_DIR}/python/paddle/fluid/proto + COMMAND cp *.py ${PADDLE_SOURCE_DIR}/python/paddle/fluid/proto/ + COMMENT "Copy generated python proto into directory paddle/fluid/proto." WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) cc_library(backward SRCS backward.cc DEPS net_op) -- GitLab From 2398db5e5ce4b33a093c5b26930c4862226dea99 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Sat, 24 Feb 2018 22:36:45 +0800 Subject: [PATCH 178/217] follow comments --- python/paddle/v2/fluid/layers/nn.py | 62 ++++++++++---------------- python/paddle/v2/fluid/layers/utils.py | 54 ++++++++++++++++++++++ 2 files changed, 78 insertions(+), 38 deletions(-) create mode 100644 python/paddle/v2/fluid/layers/utils.py diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index 8623e1f038..57813af939 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -21,6 +21,7 @@ from ..framework import Variable from ..param_attr import ParamAttr from layer_function_generator import autodoc from tensor import concat +import utils __all__ = [ 'fc', @@ -1138,8 +1139,8 @@ def sequence_conv(input, def conv2d(input, num_filters, filter_size, - stride=None, - padding=None, + stride=1, + padding=0, groups=None, param_attr=None, bias_attr=None, @@ -1252,17 +1253,15 @@ def conv2d(input, raise ValueError("num_channels must be divisible by groups.") num_filter_channels = num_channels / groups - if isinstance(filter_size, int): - filter_size = [filter_size, filter_size] - if isinstance(stride, int): - stride = [stride, stride] - if isinstance(padding, int): - padding = [padding, padding] + filter_size = utils.convert_to_list(filter_size, 2, 'conv2d.filter_size') + stride = utils.convert_to_list(stride, 2, 'conv2d.stride') + padding = utils.convert_to_list(padding, 2, 'conv2d.padding') + if not isinstance(use_cudnn, bool): raise ValueError("use_cudnn should be True or False") input_shape = input.shape - filter_shape = [num_filters, num_filter_channels] + list(filter_size) + filter_shape = [num_filters, num_filter_channels] + filter_size def _get_default_param_initializer(): std = (2.0 / (filter_size[0]**2 * num_channels))**0.5 @@ -1685,9 +1684,9 @@ def conv2d_transpose(input, num_filters, output_size=None, filter_size=None, - padding=None, - stride=None, - dilation=None, + padding=0, + stride=1, + dilation=1, param_attr=None, use_cudnn=True, name=None): @@ -1783,26 +1782,12 @@ def conv2d_transpose(input, raise TypeError("Input of conv2d_transpose must be Variable") input_channel = input.shape[1] - op_attr = dict() - - if isinstance(padding, int): - op_attr['paddings'] = [padding, padding] - elif padding is not None: - op_attr['paddings'] = padding - - if isinstance(stride, int): - op_attr['strides'] = [stride, stride] - elif stride is not None: - op_attr['strides'] = stride - - if isinstance(dilation, int): - op_attr['dilations'] = [dilation, dilation] - elif dilation is not None: - op_attr['dilations'] = dilation + padding = utils.convert_to_list(padding, 2, 'conv2d_transpose.padding') + stride = utils.convert_to_list(stride, 2, 'conv2d_transpose.stride') + dilation = utils.convert_to_list(dilation, 2, 'conv2d_transpose.dilation') if not isinstance(use_cudnn, bool): raise ValueError("use_cudnn should be True or False") - op_attr['use_cudnn'] = use_cudnn if filter_size is None: if output_size is None: @@ -1810,10 +1795,6 @@ def conv2d_transpose(input, if isinstance(output_size, int): output_size = [output_size, output_size] - padding = op_attr.get('paddings', [0, 0]) - stride = op_attr.get('strides', [1, 1]) - dilation = op_attr.get('dilations', [1, 1]) - h_in = input.shape[2] w_in = input.shape[3] @@ -1822,11 +1803,11 @@ def conv2d_transpose(input, filter_size_w = (output_size[1] - (w_in - 1) * stride[1] + 2 * padding[1] - 1) / dilation[1] + 1 filter_size = [filter_size_h, filter_size_w] + else: + filter_size = utils.convert_to_list(filter_size, 2, + 'conv2d_transpose.filter_size') - elif isinstance(filter_size, int): - filter_size = [filter_size, filter_size] - - filter_shape = [input_channel, num_filters] + list(filter_size) + filter_shape = [input_channel, num_filters] + filter_size img_filter = helper.create_parameter( dtype=input.dtype, shape=filter_shape, attr=helper.param_attr) @@ -1836,7 +1817,12 @@ def conv2d_transpose(input, inputs={'Input': [input], 'Filter': [img_filter]}, outputs={'Output': out}, - attrs=op_attr) + attrs={ + 'strides': stride, + 'paddings': padding, + 'dilations': dilation, + 'use_cudnn': use_cudnn + }) return out diff --git a/python/paddle/v2/fluid/layers/utils.py b/python/paddle/v2/fluid/layers/utils.py new file mode 100644 index 0000000000..d04f2f86ac --- /dev/null +++ b/python/paddle/v2/fluid/layers/utils.py @@ -0,0 +1,54 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def convert_to_list(value, n, name): + """Converts a single integer or iterable of integers into an integer list. + + Arguments: + value: The value to validate and convert. Could an int, or any iterable + of ints. + n: The size of the list to be returned. + name: The name of the argument being validated, e.g. "stride" or + "filter_size". This is only used to format error messages. + + Returns: + A list of n integers. + + Raises: + ValueError: If something else than an int/long or iterable thereof was + passed. + """ + if isinstance(value, int): + return [value, ] * n + else: + try: + value_list = list(value) + except TypeError: + raise ValueError("The " + name + + "'s type must be list or tuple. Received: " + str( + value)) + if len(value_list) != n: + raise ValueError("The " + name + "'s length must be " + str(n) + + ". Received: " + str(value)) + for single_value in value_list: + try: + int(single_value) + except (ValueError, TypeError): + raise ValueError( + "The " + name + "'s must be a list or tuple of " + str( + n) + " integers. Received: " + str(value) + " " + "including element " + str(single_value) + " of type" + " " + + str(type(single_value))) + return value_list -- GitLab From 97094e46ff9a1e2d5ac9a201182da33c6415c268 Mon Sep 17 00:00:00 2001 From: jiaozhenyu <35716744+jshower@users.noreply.github.com> Date: Sun, 25 Feb 2018 09:23:31 +0800 Subject: [PATCH 179/217] Update conll05.py Fix #8079 and remove the redundant code --- python/paddle/v2/dataset/conll05.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/python/paddle/v2/dataset/conll05.py b/python/paddle/v2/dataset/conll05.py index 7113202a12..0d544efac9 100644 --- a/python/paddle/v2/dataset/conll05.py +++ b/python/paddle/v2/dataset/conll05.py @@ -51,8 +51,6 @@ def load_label_dict(filename): tag_dict.add(line[2:]) elif line.startswith("I-"): tag_dict.add(line[2:]) - else: - continue index = 0 for tag in tag_dict: d["B-" + tag] = index -- GitLab From c8ed768ccc9adf6156a8d93964805f0e3679000b Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Sat, 24 Feb 2018 23:37:27 +0800 Subject: [PATCH 180/217] refine pool2d --- python/paddle/v2/fluid/layers/nn.py | 31 ++++++++++++----------------- 1 file changed, 13 insertions(+), 18 deletions(-) diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index 57813af939..b8224f5604 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -1253,9 +1253,9 @@ def conv2d(input, raise ValueError("num_channels must be divisible by groups.") num_filter_channels = num_channels / groups - filter_size = utils.convert_to_list(filter_size, 2, 'conv2d.filter_size') - stride = utils.convert_to_list(stride, 2, 'conv2d.stride') - padding = utils.convert_to_list(padding, 2, 'conv2d.padding') + filter_size = utils.convert_to_list(filter_size, 2, 'filter_size') + stride = utils.convert_to_list(stride, 2, 'stride') + padding = utils.convert_to_list(padding, 2, 'padding') if not isinstance(use_cudnn, bool): raise ValueError("use_cudnn should be True or False") @@ -1433,8 +1433,8 @@ def sequence_last_step(input): def pool2d(input, pool_size, pool_type, - pool_stride=None, - pool_padding=None, + pool_stride=1, + pool_padding=0, global_pooling=False, use_cudnn=True, name=None): @@ -1442,20 +1442,15 @@ def pool2d(input, This function adds the operator for pooling in 2 dimensions, using the pooling configurations mentioned in input parameters. """ - if pool_padding is None: - pool_padding = [0, 0] - if pool_stride is None: - pool_stride = [1, 1] if pool_type not in ["max", "avg"]: raise ValueError( "Unknown pool_type: '%s'. It can only be 'max' or 'avg'.", str(pool_type)) - if isinstance(pool_size, int): - pool_size = [pool_size, pool_size] - if isinstance(pool_stride, int): - pool_stride = [pool_stride, pool_stride] - if isinstance(pool_padding, int): - pool_padding = [pool_padding, pool_padding] + + pool_size = utils.convert_to_list(pool_size, 2, 'pool_size') + pool_padding = utils.convert_to_list(pool_padding, 2, 'pool_padding') + pool_stride = utils.convert_to_list(pool_stride, 2, 'pool_stride') + if not isinstance(use_cudnn, bool): raise ValueError("use_cudnn should be True or False") @@ -1782,9 +1777,9 @@ def conv2d_transpose(input, raise TypeError("Input of conv2d_transpose must be Variable") input_channel = input.shape[1] - padding = utils.convert_to_list(padding, 2, 'conv2d_transpose.padding') - stride = utils.convert_to_list(stride, 2, 'conv2d_transpose.stride') - dilation = utils.convert_to_list(dilation, 2, 'conv2d_transpose.dilation') + padding = utils.convert_to_list(padding, 2, 'padding') + stride = utils.convert_to_list(stride, 2, 'stride') + dilation = utils.convert_to_list(dilation, 2, 'dilation') if not isinstance(use_cudnn, bool): raise ValueError("use_cudnn should be True or False") -- GitLab From 470d6717928b0dfd9901ca9801c1d2c897eefa43 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Mon, 26 Feb 2018 10:11:30 +0800 Subject: [PATCH 181/217] follow comment --- python/paddle/fluid/layers/nn.py | 9 +++++++-- python/paddle/{v2 => }/fluid/layers/utils.py | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) rename python/paddle/{v2 => }/fluid/layers/utils.py (96%) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index ef309ac1b0..ead7041b7b 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -1431,8 +1431,8 @@ def sequence_last_step(input): def pool2d(input, - pool_size, - pool_type, + pool_size=-1, + pool_type="max", pool_stride=1, pool_padding=0, global_pooling=False, @@ -1447,6 +1447,11 @@ def pool2d(input, "Unknown pool_type: '%s'. It can only be 'max' or 'avg'.", str(pool_type)) + if global_pooling is False and pool_size == -1: + raise ValueError( + "When the global_pooling is False, pool_size must be passed " + "and be a valid value. Received pool_size: " + str(pool_size)) + pool_size = utils.convert_to_list(pool_size, 2, 'pool_size') pool_padding = utils.convert_to_list(pool_padding, 2, 'pool_padding') pool_stride = utils.convert_to_list(pool_stride, 2, 'pool_stride') diff --git a/python/paddle/v2/fluid/layers/utils.py b/python/paddle/fluid/layers/utils.py similarity index 96% rename from python/paddle/v2/fluid/layers/utils.py rename to python/paddle/fluid/layers/utils.py index d04f2f86ac..d79e8078ff 100644 --- a/python/paddle/v2/fluid/layers/utils.py +++ b/python/paddle/fluid/layers/utils.py @@ -47,7 +47,7 @@ def convert_to_list(value, n, name): int(single_value) except (ValueError, TypeError): raise ValueError( - "The " + name + "'s must be a list or tuple of " + str( + "The " + name + "'s type must be a list or tuple of " + str( n) + " integers. Received: " + str(value) + " " "including element " + str(single_value) + " of type" + " " + str(type(single_value))) -- GitLab From 71f84c907693ab5469b7eb2252deebd4e18f1d0d Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Mon, 26 Feb 2018 10:17:26 +0800 Subject: [PATCH 182/217] move paddle/v2/fluid to paddle/fluid in documentation --- doc/design/concurrent_programming.md | 2 +- doc/design/fluid.md | 2 +- doc/design/memory_optimization.md | 2 +- doc/howto/cluster/fluid_cluster_train_en.md | 6 +++--- doc/howto/optimization/cpu_profiling_cn.md | 14 +++++++------- doc/howto/optimization/cpu_profiling_en.md | 14 +++++++------- doc/howto/read_source.md | 12 ++++++------ 7 files changed, 26 insertions(+), 26 deletions(-) diff --git a/doc/design/concurrent_programming.md b/doc/design/concurrent_programming.md index afc65e831d..f022e67fd3 100644 --- a/doc/design/concurrent_programming.md +++ b/doc/design/concurrent_programming.md @@ -12,7 +12,7 @@ The following table compares concepts in Fluid and Go | Go | Fluid | |----|-------| -|user-defined functions | [layers](https://github.com/PaddlePaddle/Paddle/tree/develop/python/paddle/v2/fluid) | +|user-defined functions | [layers](https://github.com/PaddlePaddle/Paddle/tree/develop/python/paddle/fluid) | | control-flow and built-in functions | [intrinsics/operators](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators) | | goroutines, channels | [class ThreadPool](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/framework/thread_pool.h) | | runtime | [class Executor](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/executor.h) | diff --git a/doc/design/fluid.md b/doc/design/fluid.md index 2acc168007..f78fa8c191 100644 --- a/doc/design/fluid.md +++ b/doc/design/fluid.md @@ -89,7 +89,7 @@ with train_loop.block(): h[t] = the_step(input[t]) ``` -An actual Fluid example is described [here](https://github.com/PaddlePaddle/Paddle/blob/a91efdde6910ce92a78e3aa7157412c4c88d9ee8/python/paddle/v2/fluid/tests/test_while_op.py#L36-L44). +An actual Fluid example is described [here](https://github.com/PaddlePaddle/Paddle/blob/bde090a97564b9c61a6aaa38b72ccc4889d102d9/python/paddle/fluid/tests/unittests/test_while_op.py#L50-L58). From the example, the Fluid programs look very similar to their PyTorch equivalent programs, except that Fluid's loop structure, wrapped with Python's `with` statement, could run much faster than just a Python loop. diff --git a/doc/design/memory_optimization.md b/doc/design/memory_optimization.md index 1f68cef4cc..285464ada7 100644 --- a/doc/design/memory_optimization.md +++ b/doc/design/memory_optimization.md @@ -101,7 +101,7 @@ In-place is a built-in attribute of an operator. Since we treat in-place and oth #### contruct control flow graph -Following is the ProgramDesc protobuf of [machine translation](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/fluid/tests/book/test_machine_translation.py) example. +Following is the ProgramDesc protobuf of [machine translation](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/book/test_machine_translation.py) example. - Block0: diff --git a/doc/howto/cluster/fluid_cluster_train_en.md b/doc/howto/cluster/fluid_cluster_train_en.md index ae825d9a51..b4465e8269 100644 --- a/doc/howto/cluster/fluid_cluster_train_en.md +++ b/doc/howto/cluster/fluid_cluster_train_en.md @@ -32,7 +32,7 @@ The non-cluster version of this demo with fluid API is as follows: ``` python import paddle.v2 as paddle -import paddle.v2.fluid as fluid +import paddle.fluid as fluid x = fluid.layers.data(name='x', shape=[13], dtype='float32') y_predict = fluid.layers.fc(input=x, size=1, act=None) @@ -125,11 +125,11 @@ for pass_id in range(100): ### E2E demo -Please find the complete demo from [here](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/fluid/tests/book_distribute/notest_dist_fit_a_line.py). +Please find the complete demo from [here](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/book_distribute/notest_dist_fit_a_line.py). First `cd` into the folder that contains the `python` files. In this case: ```bash -cd /paddle/python/paddle/v2/fluid/tests/book_distribute +cd /paddle/python/paddle/fluid/tests/book_distribute ``` In parameter server node run the following in the command line: diff --git a/doc/howto/optimization/cpu_profiling_cn.md b/doc/howto/optimization/cpu_profiling_cn.md index 14eba0e2f3..d59be670c2 100644 --- a/doc/howto/optimization/cpu_profiling_cn.md +++ b/doc/howto/optimization/cpu_profiling_cn.md @@ -35,7 +35,7 @@ cprofilev -a 0.0.0.0 -p 3214 -f profile.out main.py ``` ncalls tottime percall cumtime percall filename:lineno(function) 1 0.284 0.284 29.514 29.514 main.py:1() - 4696 0.128 0.000 15.748 0.003 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/executor.py:20(run) + 4696 0.128 0.000 15.748 0.003 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/executor.py:20(run) 4696 12.040 0.003 12.040 0.003 {built-in method run} 1 0.144 0.144 6.534 6.534 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/__init__.py:14() ``` @@ -61,9 +61,9 @@ cprofilev -a 0.0.0.0 -p 3214 -f profile.out main.py ```text 4696 12.040 0.003 12.040 0.003 {built-in method run} 300005 0.874 0.000 1.681 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/dataset/mnist.py:38(reader) - 107991 0.676 0.000 1.519 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:219(__init__) - 4697 0.626 0.000 2.291 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:428(sync_with_cpp) - 1 0.618 0.618 0.618 0.618 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/__init__.py:1() + 107991 0.676 0.000 1.519 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:219(__init__) + 4697 0.626 0.000 2.291 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:428(sync_with_cpp) + 1 0.618 0.618 0.618 0.618 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/__init__.py:1() ``` 可以看到最耗时的函数是C++端的`run`函数。这需要联合我们第二节`Python`与`C++`混合代码的性能分析来进行调优。而`sync_with_cpp`函数的总共耗时很长,每次调用的耗时也很长。于是我们可以点击`sync_with_cpp`的详细信息,了解其调用关系。 @@ -76,9 +76,9 @@ Called By: Function was called by... ncalls tottime cumtime -/home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:428(sync_with_cpp) <- 4697 0.626 2.291 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:562(sync_with_cpp) -/home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:562(sync_with_cpp) <- 4696 0.019 2.316 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:487(clone) - 1 0.000 0.001 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:534(append_backward) +/home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:428(sync_with_cpp) <- 4697 0.626 2.291 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:562(sync_with_cpp) +/home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:562(sync_with_cpp) <- 4696 0.019 2.316 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:487(clone) + 1 0.000 0.001 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:534(append_backward) Called: diff --git a/doc/howto/optimization/cpu_profiling_en.md b/doc/howto/optimization/cpu_profiling_en.md index 368af40cc7..01e5fddf61 100644 --- a/doc/howto/optimization/cpu_profiling_en.md +++ b/doc/howto/optimization/cpu_profiling_en.md @@ -49,7 +49,7 @@ port, we will see the output like the following: ``` ncalls tottime percall cumtime percall filename:lineno(function) 1 0.284 0.284 29.514 29.514 main.py:1() - 4696 0.128 0.000 15.748 0.003 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/executor.py:20(run) + 4696 0.128 0.000 15.748 0.003 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/executor.py:20(run) 4696 12.040 0.003 12.040 0.003 {built-in method run} 1 0.144 0.144 6.534 6.534 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/__init__.py:14() ``` @@ -74,9 +74,9 @@ focus on. We can sort above profiling file by tottime: ```text 4696 12.040 0.003 12.040 0.003 {built-in method run} 300005 0.874 0.000 1.681 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/dataset/mnist.py:38(reader) - 107991 0.676 0.000 1.519 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:219(__init__) - 4697 0.626 0.000 2.291 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:428(sync_with_cpp) - 1 0.618 0.618 0.618 0.618 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/__init__.py:1() + 107991 0.676 0.000 1.519 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:219(__init__) + 4697 0.626 0.000 2.291 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:428(sync_with_cpp) + 1 0.618 0.618 0.618 0.618 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/__init__.py:1() ``` We can see that the most time-consuming function is the `built-in @@ -93,9 +93,9 @@ Called By: Function was called by... ncalls tottime cumtime -/home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:428(sync_with_cpp) <- 4697 0.626 2.291 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:562(sync_with_cpp) -/home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:562(sync_with_cpp) <- 4696 0.019 2.316 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:487(clone) - 1 0.000 0.001 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:534(append_backward) +/home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:428(sync_with_cpp) <- 4697 0.626 2.291 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:562(sync_with_cpp) +/home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:562(sync_with_cpp) <- 4696 0.019 2.316 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:487(clone) + 1 0.000 0.001 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/fluid/framework.py:534(append_backward) Called: diff --git a/doc/howto/read_source.md b/doc/howto/read_source.md index 31987920f3..edf46aff8c 100644 --- a/doc/howto/read_source.md +++ b/doc/howto/read_source.md @@ -1,6 +1,6 @@ # PaddlePaddle Fluid Source Code Overview -Examples: https://github.com/PaddlePaddle/Paddle/tree/develop/python/paddle/v2/fluid/tests/book +Examples: https://github.com/PaddlePaddle/Paddle/tree/develop/python/paddle/fluid/tests/book Core: https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/framework @@ -26,16 +26,16 @@ sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) sgd_optimizer.minimize(avg_cost) ``` -- Variables: `x`, `y`, `y_predict`, `cost` and `avg_cost`. [Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/fluid/framework.py#) -- Layers: `fluid.layers.data`, `fluid.layers.fc` and `fluid.layers.mean` are layers. [Python](https://github.com/PaddlePaddle/Paddle/tree/develop/python/paddle/v2/fluid/layers) +- Variables: `x`, `y`, `y_predict`, `cost` and `avg_cost`. [Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/framework.py#) +- Layers: `fluid.layers.data`, `fluid.layers.fc` and `fluid.layers.mean` are layers. [Python](https://github.com/PaddlePaddle/Paddle/tree/develop/python/paddle/fluid/layers) - Every Layer has one or more operators and variables/parameters - All the operators are defined at [`paddle/operators/`](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators). Other worth-looking files: - Base class: [`paddle/framework/operator.h`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/operator.h) - Operator Registration: [`paddle/framework/op_registry.h`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/op_registry.h) - Operator Lookup: [`paddle/framework/op_info.h`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/op_info.h) - Optimizer: `fluid.optimizer.SGD`. It does the following - - Add backward operators. [[Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/fluid/backward.py)] - - Add optimizer operators. [[Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/fluid/optimizer.py)] + - Add backward operators. [[Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/backward.py)] + - Add optimizer operators. [[Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/optimizer.py)] # Run Time @@ -57,7 +57,7 @@ exe.run(fluid.default_main_program(), - Place: `place`. one of CPU, GPU or FPGA. [C++](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/platform/place.h) - The device handle are at [paddle/platform/device_context.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/platform/device_context.h) -- Executor: `fluid.Executor(place)`. [[Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/fluid/executor.py), [C++](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/executor.cc)] +- Executor: `fluid.Executor(place)`. [[Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/executor.py), [C++](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/executor.cc)] - Feeds the data: `feed=feeder.feed(data)` - Evaluates all the operators - Fetches the result: `fetch_list=[avg_cost]` -- GitLab From 962326b06e9cc12ae9dc3b1ef353202be14f2a35 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Mon, 26 Feb 2018 10:39:53 +0800 Subject: [PATCH 183/217] move doc/api/v2/fluid to doc/api/fluid --- doc/api/{v2 => }/fluid/data_feeder.rst | 0 doc/api/{v2 => }/fluid/evaluator.rst | 0 doc/api/{v2 => }/fluid/executor.rst | 0 doc/api/{v2 => }/fluid/gen_doc.py | 0 doc/api/{v2 => }/fluid/gen_doc.sh | 0 doc/api/fluid/index.rst | 18 ++++++++++++++++++ doc/api/{v2 => }/fluid/initializer.rst | 0 doc/api/{v2 => }/fluid/io.rst | 0 doc/api/{v2 => }/fluid/layers.rst | 0 doc/api/{v2 => }/fluid/nets.rst | 0 doc/api/{v2 => }/fluid/optimizer.rst | 0 doc/api/{v2 => }/fluid/param_attr.rst | 0 doc/api/{v2 => }/fluid/profiler.rst | 0 doc/api/{v2 => }/fluid/regularizer.rst | 0 doc/api/index_en.rst | 2 +- doc/api/v2/fluid.rst | 18 ------------------ 16 files changed, 19 insertions(+), 19 deletions(-) rename doc/api/{v2 => }/fluid/data_feeder.rst (100%) rename doc/api/{v2 => }/fluid/evaluator.rst (100%) rename doc/api/{v2 => }/fluid/executor.rst (100%) rename doc/api/{v2 => }/fluid/gen_doc.py (100%) rename doc/api/{v2 => }/fluid/gen_doc.sh (100%) create mode 100644 doc/api/fluid/index.rst rename doc/api/{v2 => }/fluid/initializer.rst (100%) rename doc/api/{v2 => }/fluid/io.rst (100%) rename doc/api/{v2 => }/fluid/layers.rst (100%) rename doc/api/{v2 => }/fluid/nets.rst (100%) rename doc/api/{v2 => }/fluid/optimizer.rst (100%) rename doc/api/{v2 => }/fluid/param_attr.rst (100%) rename doc/api/{v2 => }/fluid/profiler.rst (100%) rename doc/api/{v2 => }/fluid/regularizer.rst (100%) delete mode 100644 doc/api/v2/fluid.rst diff --git a/doc/api/v2/fluid/data_feeder.rst b/doc/api/fluid/data_feeder.rst similarity index 100% rename from doc/api/v2/fluid/data_feeder.rst rename to doc/api/fluid/data_feeder.rst diff --git a/doc/api/v2/fluid/evaluator.rst b/doc/api/fluid/evaluator.rst similarity index 100% rename from doc/api/v2/fluid/evaluator.rst rename to doc/api/fluid/evaluator.rst diff --git a/doc/api/v2/fluid/executor.rst b/doc/api/fluid/executor.rst similarity index 100% rename from doc/api/v2/fluid/executor.rst rename to doc/api/fluid/executor.rst diff --git a/doc/api/v2/fluid/gen_doc.py b/doc/api/fluid/gen_doc.py similarity index 100% rename from doc/api/v2/fluid/gen_doc.py rename to doc/api/fluid/gen_doc.py diff --git a/doc/api/v2/fluid/gen_doc.sh b/doc/api/fluid/gen_doc.sh similarity index 100% rename from doc/api/v2/fluid/gen_doc.sh rename to doc/api/fluid/gen_doc.sh diff --git a/doc/api/fluid/index.rst b/doc/api/fluid/index.rst new file mode 100644 index 0000000000..b0710d8b19 --- /dev/null +++ b/doc/api/fluid/index.rst @@ -0,0 +1,18 @@ +====================== +Fluid +====================== + +.. toctree:: + :maxdepth: 1 + + layers.rst + data_feeder.rst + executor.rst + initializer.rst + evaluator.rst + nets.rst + optimizer.rst + param_attr.rst + profiler.rst + regularizer.rst + io.rst diff --git a/doc/api/v2/fluid/initializer.rst b/doc/api/fluid/initializer.rst similarity index 100% rename from doc/api/v2/fluid/initializer.rst rename to doc/api/fluid/initializer.rst diff --git a/doc/api/v2/fluid/io.rst b/doc/api/fluid/io.rst similarity index 100% rename from doc/api/v2/fluid/io.rst rename to doc/api/fluid/io.rst diff --git a/doc/api/v2/fluid/layers.rst b/doc/api/fluid/layers.rst similarity index 100% rename from doc/api/v2/fluid/layers.rst rename to doc/api/fluid/layers.rst diff --git a/doc/api/v2/fluid/nets.rst b/doc/api/fluid/nets.rst similarity index 100% rename from doc/api/v2/fluid/nets.rst rename to doc/api/fluid/nets.rst diff --git a/doc/api/v2/fluid/optimizer.rst b/doc/api/fluid/optimizer.rst similarity index 100% rename from doc/api/v2/fluid/optimizer.rst rename to doc/api/fluid/optimizer.rst diff --git a/doc/api/v2/fluid/param_attr.rst b/doc/api/fluid/param_attr.rst similarity index 100% rename from doc/api/v2/fluid/param_attr.rst rename to doc/api/fluid/param_attr.rst diff --git a/doc/api/v2/fluid/profiler.rst b/doc/api/fluid/profiler.rst similarity index 100% rename from doc/api/v2/fluid/profiler.rst rename to doc/api/fluid/profiler.rst diff --git a/doc/api/v2/fluid/regularizer.rst b/doc/api/fluid/regularizer.rst similarity index 100% rename from doc/api/v2/fluid/regularizer.rst rename to doc/api/fluid/regularizer.rst diff --git a/doc/api/index_en.rst b/doc/api/index_en.rst index 77337982be..fc8dbd07eb 100644 --- a/doc/api/index_en.rst +++ b/doc/api/index_en.rst @@ -8,4 +8,4 @@ API v2/model_configs.rst v2/data.rst v2/run_logic.rst - v2/fluid.rst + fluid/index.rst diff --git a/doc/api/v2/fluid.rst b/doc/api/v2/fluid.rst deleted file mode 100644 index 5f15cad2b5..0000000000 --- a/doc/api/v2/fluid.rst +++ /dev/null @@ -1,18 +0,0 @@ -====================== -Fluid -====================== - -.. toctree:: - :maxdepth: 1 - - fluid/layers.rst - fluid/data_feeder.rst - fluid/executor.rst - fluid/initializer.rst - fluid/evaluator.rst - fluid/nets.rst - fluid/optimizer.rst - fluid/param_attr.rst - fluid/profiler.rst - fluid/regularizer.rst - fluid/io.rst -- GitLab From ea9e62b8fcf7326a17a8cd72f1f9171807f92d8a Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 26 Feb 2018 10:45:03 +0800 Subject: [PATCH 184/217] optimize code --- python/paddle/fluid/optimizer.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 9309ec3916..93a19de92e 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -38,13 +38,13 @@ class Optimizer(object): def __init__(self, learning_rate, global_step=None, regularization=None): if not isinstance(learning_rate, float) and \ not isinstance(learning_rate, framework.Variable): - raise ValueError("learning rate should be float or Variable") + raise TypeError("learning rate should be float or Variable") self._global_step = global_step self.regularization = regularization self._learning_rate = learning_rate # each program should have a independent learning rate # program -> Variable(learning_rate) - self._learning_rate_map = defaultdict(lambda: None) + self._learning_rate_map = dict() if isinstance(self._learning_rate, framework.Variable): self._learning_rate_map[framework.default_main_program( )] = self._learning_rate @@ -62,7 +62,7 @@ class Optimizer(object): return else: if not isinstance(self._learning_rate, float): - raise ValueError( + raise TypeError( "learning rate variable is create outside optimizer," "can not create new learning rate variable for new program") @@ -82,7 +82,7 @@ class Optimizer(object): """ if program is None: program = framework.default_main_program() - return self._learning_rate_map[program] + return self._learning_rate_map.get(program, None) def _append_optimize_op(self, block, param_and_grad): """ append optimize operator to block and return all the added optimize_op -- GitLab From 777a281a4fa00b8975ae85aa0fbec39a0cac4ac0 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 26 Feb 2018 10:48:53 +0800 Subject: [PATCH 185/217] Fix CI and enhance gitignore --- .gitignore | 1 - python/paddle/fluid/.gitignore | 1 + python/paddle/fluid/layers/layer_function_generator.py | 8 ++++++-- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index fe0d13f4d9..2badc3bdaa 100644 --- a/.gitignore +++ b/.gitignore @@ -27,7 +27,6 @@ third_party/ cmake-build-* # generated while compiling -python/paddle/v2/fluid/core.so paddle/pybind/pybind.h CMakeFiles cmake_install.cmake diff --git a/python/paddle/fluid/.gitignore b/python/paddle/fluid/.gitignore index 2ff540d576..80c1cf3fcb 100644 --- a/python/paddle/fluid/.gitignore +++ b/python/paddle/fluid/.gitignore @@ -1 +1,2 @@ proto +core.so diff --git a/python/paddle/fluid/layers/layer_function_generator.py b/python/paddle/fluid/layers/layer_function_generator.py index 16a401dc7b..bd79022a0c 100644 --- a/python/paddle/fluid/layers/layer_function_generator.py +++ b/python/paddle/fluid/layers/layer_function_generator.py @@ -130,7 +130,7 @@ def generate_layer_fn(op_type): o_name = not_intermediate_outputs[0].name intermediate_output_names = [output.name for output in intermediate_outputs] - def infer_and_check_dtype(op_proto, **kwargs): + def infer_and_check_dtype(op_proto, *args, **kwargs): """ This function performs the sanity check for dtype and instance type. @@ -141,6 +141,10 @@ def generate_layer_fn(op_type): val = kwargs.pop(name, []) if not isinstance(val, list) and not isinstance(val, tuple): val = [val] + if len(val) == 0: + val = [args[0]] + args = args[1:] + for each in val: if not isinstance(each, Variable): raise ValueError("input of {0} must be variable".format( @@ -158,7 +162,7 @@ def generate_layer_fn(op_type): def func(*args, **kwargs): helper = LayerHelper(op_type, **kwargs) - dtype = infer_and_check_dtype(op_proto, **kwargs) + dtype = infer_and_check_dtype(op_proto, *args, **kwargs) inputs = dict() for ipt in op_proto.inputs: -- GitLab From 6d25a43a9431c7df83fda04f3eee399e20988548 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Mon, 26 Feb 2018 11:27:25 +0800 Subject: [PATCH 186/217] set the default option of WITH_FAST_BUNDLE_TEST be OFF --- CMakeLists.txt | 2 +- paddle/scripts/docker/build.sh | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 5db5c228be..7500e8ed3c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -60,7 +60,7 @@ option(USE_NNPACK "Compile PaddlePaddle with NNPACK library" OFF) option(WITH_DISTRIBUTE "Compile with grpc distributed support" OFF) option(USE_EIGEN_FOR_BLAS "Use matrix multiplication in Eigen" OFF) option(WITH_ARM_FP16 "Use half precision support on armv8.2-a cpu" OFF) -option(WITH_FAST_BUNDLE_TEST "Bundle tests that can be run in a single process together to reduce launch overhead" ON) +option(WITH_FAST_BUNDLE_TEST "Bundle tests that can be run in a single process together to reduce launch overhead" OFF) # CMAKE_BUILD_TYPE if(NOT CMAKE_BUILD_TYPE) diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index 8ec3d0c657..2220a593b3 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -49,6 +49,7 @@ function cmake_gen() { -DCUDNN_ROOT=/usr/ -DWITH_STYLE_CHECK=${WITH_STYLE_CHECK:-ON} -DWITH_TESTING=${WITH_TESTING:-ON} + -DWITH_FAST_BUNDLE_TEST=ON -DCMAKE_EXPORT_COMPILE_COMMANDS=ON ======================================== EOF @@ -72,6 +73,7 @@ EOF -DCUDNN_ROOT=/usr/ \ -DWITH_STYLE_CHECK=${WITH_STYLE_CHECK:-ON} \ -DWITH_TESTING=${WITH_TESTING:-ON} \ + -DWITH_FAST_BUNDLE_TEST=ON \ -DCMAKE_EXPORT_COMPILE_COMMANDS=ON } -- GitLab From 5d30142889406f7496a97a1a56a9e0daa1cad522 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Mon, 26 Feb 2018 12:48:54 +0800 Subject: [PATCH 187/217] follow comment from panxin --- python/paddle/fluid/layers/utils.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/python/paddle/fluid/layers/utils.py b/python/paddle/fluid/layers/utils.py index d79e8078ff..49ec308883 100644 --- a/python/paddle/fluid/layers/utils.py +++ b/python/paddle/fluid/layers/utils.py @@ -11,10 +11,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import numpy as np -def convert_to_list(value, n, name): - """Converts a single integer or iterable of integers into an integer list. +def convert_to_list(value, n, name, dtype=np.int): + """ + Converts a single numerical type or iterable of numerical + types into an numerical type list. Arguments: value: The value to validate and convert. Could an int, or any iterable @@ -22,15 +25,16 @@ def convert_to_list(value, n, name): n: The size of the list to be returned. name: The name of the argument being validated, e.g. "stride" or "filter_size". This is only used to format error messages. + dtype: the numerical type of the element of the list to be returned. Returns: - A list of n integers. + A list of n dtypes. Raises: ValueError: If something else than an int/long or iterable thereof was passed. """ - if isinstance(value, int): + if isinstance(value, dtype): return [value, ] * n else: try: @@ -44,11 +48,12 @@ def convert_to_list(value, n, name): ". Received: " + str(value)) for single_value in value_list: try: - int(single_value) + dtype(single_value) except (ValueError, TypeError): raise ValueError( "The " + name + "'s type must be a list or tuple of " + str( - n) + " integers. Received: " + str(value) + " " + n) + " " + str(dtype) + " . Received: " + str( + value) + " " "including element " + str(single_value) + " of type" + " " + str(type(single_value))) return value_list -- GitLab From 84b7b4b96d9b2ebb83fc6e96604bb85380a5abff Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 26 Feb 2018 14:25:38 +0800 Subject: [PATCH 188/217] add c-api quick start --- doc/howto/capi/index_cn.rst | 1 + doc/howto/capi/quick_start.md | 23 +++++++++++++++++++++++ 2 files changed, 24 insertions(+) create mode 100644 doc/howto/capi/quick_start.md diff --git a/doc/howto/capi/index_cn.rst b/doc/howto/capi/index_cn.rst index e589a6d346..6981c7ce38 100644 --- a/doc/howto/capi/index_cn.rst +++ b/doc/howto/capi/index_cn.rst @@ -4,6 +4,7 @@ C-API预测库 .. toctree:: :maxdepth: 1 + quick_start.md compile_paddle_lib_cn.md organization_of_the_inputs_cn.md workflow_of_capi_cn.md diff --git a/doc/howto/capi/quick_start.md b/doc/howto/capi/quick_start.md new file mode 100644 index 0000000000..cb696b6959 --- /dev/null +++ b/doc/howto/capi/quick_start.md @@ -0,0 +1,23 @@ +## 开始使用 + +### 概述 +当我们训练完一个神经网络模型之后,下一步就是用模型来做预测。预测就是准备输入数据,经过模型处理之后,得到预测结果的过程。 + +相比于模型训练,预测有如下特点: + +1. 预测不需要训练过程中反向传播和参数更新的部分。 +1. 预测不需要标签(label)。 +1. 预测很多时候需要和用户系统整合在一起。 + +因为上述特点,模型预测SDK需要单独设计,并具备以下特点: + +1. 预测SDK不包含反向传播和参数更新部分,以减小SDK的体积。 +1. 预测SDK需要提供一个简洁的用户接口,方便使用。 +1. 因为输入数据可能有多种结构,对输入数据的格式做清晰简洁的封装。 +1. 为了和用户系统兼容,SDK的接口需要是满足C标准的接口。 + +PaddlePaddle提供了C-API,用于解决上述问题。关于C-API的使用,我们提供了如下指南: + +1. [C-API使用流程](./workflow_of_capi_cn.md) +1. [安装与编译C-API预测库](./compile_paddle_lib_cn.md) +1. [输入/输出数据组织](./organization_of_the_inputs_cn.md) -- GitLab From 28668ad06e5b895d0b777b300570900af99a3897 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 26 Feb 2018 14:47:06 +0800 Subject: [PATCH 189/217] move quick start to index_cn.rst --- doc/howto/capi/index_cn.rst | 17 +++++++++++++++++ doc/howto/capi/quick_start.md | 23 ----------------------- 2 files changed, 17 insertions(+), 23 deletions(-) delete mode 100644 doc/howto/capi/quick_start.md diff --git a/doc/howto/capi/index_cn.rst b/doc/howto/capi/index_cn.rst index 6981c7ce38..e240a3e3dc 100644 --- a/doc/howto/capi/index_cn.rst +++ b/doc/howto/capi/index_cn.rst @@ -1,6 +1,23 @@ C-API预测库 ================== +当我们训练完一个神经网络模型之后,下一步就是用模型来做预测。预测就是准备输入数据,经过模型处理之后,得到预测结果的过程。 + +相比于模型训练,预测有如下特点: + +1. 预测不需要训练过程中反向传播和参数更新的部分。 +1. 预测不需要标签(label)。 +1. 预测很多时候需要和用户系统整合在一起。 + +因为上述特点,模型预测SDK需要单独设计,并具备以下特点: + +1. 预测SDK不包含反向传播和参数更新部分,以减小SDK的体积。 +1. 预测SDK需要提供一个简洁的用户接口,方便使用。 +1. 因为输入数据可能有多种结构,对输入数据的格式做清晰简洁的封装。 +1. 为了和用户系统兼容,SDK的接口需要是满足C标准的接口。 + +PaddlePaddle提供了C-API,用于解决上述问题。关于C-API的使用,我们提供了如下指南: + .. toctree:: :maxdepth: 1 diff --git a/doc/howto/capi/quick_start.md b/doc/howto/capi/quick_start.md deleted file mode 100644 index cb696b6959..0000000000 --- a/doc/howto/capi/quick_start.md +++ /dev/null @@ -1,23 +0,0 @@ -## 开始使用 - -### 概述 -当我们训练完一个神经网络模型之后,下一步就是用模型来做预测。预测就是准备输入数据,经过模型处理之后,得到预测结果的过程。 - -相比于模型训练,预测有如下特点: - -1. 预测不需要训练过程中反向传播和参数更新的部分。 -1. 预测不需要标签(label)。 -1. 预测很多时候需要和用户系统整合在一起。 - -因为上述特点,模型预测SDK需要单独设计,并具备以下特点: - -1. 预测SDK不包含反向传播和参数更新部分,以减小SDK的体积。 -1. 预测SDK需要提供一个简洁的用户接口,方便使用。 -1. 因为输入数据可能有多种结构,对输入数据的格式做清晰简洁的封装。 -1. 为了和用户系统兼容,SDK的接口需要是满足C标准的接口。 - -PaddlePaddle提供了C-API,用于解决上述问题。关于C-API的使用,我们提供了如下指南: - -1. [C-API使用流程](./workflow_of_capi_cn.md) -1. [安装与编译C-API预测库](./compile_paddle_lib_cn.md) -1. [输入/输出数据组织](./organization_of_the_inputs_cn.md) -- GitLab From 901dabc7f50e66d2efe18498acc8a9953f973150 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 26 Feb 2018 14:48:01 +0800 Subject: [PATCH 190/217] clean --- doc/howto/capi/index_cn.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/howto/capi/index_cn.rst b/doc/howto/capi/index_cn.rst index e240a3e3dc..8197c4bc1c 100644 --- a/doc/howto/capi/index_cn.rst +++ b/doc/howto/capi/index_cn.rst @@ -21,7 +21,6 @@ PaddlePaddle提供了C-API,用于解决上述问题。关于C-API的使用, .. toctree:: :maxdepth: 1 - quick_start.md compile_paddle_lib_cn.md organization_of_the_inputs_cn.md workflow_of_capi_cn.md -- GitLab From d2e727637731a1a93b24898ee04ae697847760fa Mon Sep 17 00:00:00 2001 From: qijun Date: Mon, 26 Feb 2018 15:10:41 +0800 Subject: [PATCH 191/217] fix warp ctc dir error --- paddle/testing/paddle_gtest_main.cc | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/paddle/testing/paddle_gtest_main.cc b/paddle/testing/paddle_gtest_main.cc index 270f2f4c18..0fea6a8079 100644 --- a/paddle/testing/paddle_gtest_main.cc +++ b/paddle/testing/paddle_gtest_main.cc @@ -28,10 +28,9 @@ int main(int argc, char** argv) { } #ifdef PADDLE_WITH_CUDA new_argv.push_back( - strdup("--tryfromenv=fraction_of_gpu_memory_to_use,use_pinned_memory," - "warpctc_dir")); + strdup("--tryfromenv=fraction_of_gpu_memory_to_use,use_pinned_memory")); #else - new_argv.push_back(strdup("--tryfromenv=use_pinned_memory,warpctc_dir")); + new_argv.push_back(strdup("--tryfromenv=use_pinned_memory")); #endif int new_argc = static_cast(new_argv.size()); char** new_argv_address = new_argv.data(); -- GitLab From 13922fb489dec953262a0b9e2dbe4c99c1c7296a Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 26 Feb 2018 15:13:07 +0800 Subject: [PATCH 192/217] fix rst format --- doc/howto/capi/index_cn.rst | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/doc/howto/capi/index_cn.rst b/doc/howto/capi/index_cn.rst index 8197c4bc1c..7f10071798 100644 --- a/doc/howto/capi/index_cn.rst +++ b/doc/howto/capi/index_cn.rst @@ -5,16 +5,16 @@ C-API预测库 相比于模型训练,预测有如下特点: -1. 预测不需要训练过程中反向传播和参数更新的部分。 -1. 预测不需要标签(label)。 -1. 预测很多时候需要和用户系统整合在一起。 +#. 预测不需要训练过程中反向传播和参数更新的部分。 +#. 预测不需要标签(label)。 +#. 预测很多时候需要和用户系统整合在一起。 因为上述特点,模型预测SDK需要单独设计,并具备以下特点: -1. 预测SDK不包含反向传播和参数更新部分,以减小SDK的体积。 -1. 预测SDK需要提供一个简洁的用户接口,方便使用。 -1. 因为输入数据可能有多种结构,对输入数据的格式做清晰简洁的封装。 -1. 为了和用户系统兼容,SDK的接口需要是满足C标准的接口。 +#. 预测SDK不包含反向传播和参数更新部分,以减小SDK的体积。 +#. 预测SDK需要提供一个简洁的用户接口,方便使用。 +#. 因为输入数据可能有多种结构,对输入数据的格式做清晰简洁的封装。 +#. 为了和用户系统兼容,SDK的接口需要是满足C标准的接口。 PaddlePaddle提供了C-API,用于解决上述问题。关于C-API的使用,我们提供了如下指南: -- GitLab From 6a686794ac13fc300346f8ef343da5b898b681e3 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Mon, 26 Feb 2018 15:47:41 +0800 Subject: [PATCH 193/217] fix comment typo --- paddle/fluid/framework/framework.proto | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/framework/framework.proto b/paddle/fluid/framework/framework.proto index 23064541a0..53725d3d80 100644 --- a/paddle/fluid/framework/framework.proto +++ b/paddle/fluid/framework/framework.proto @@ -115,7 +115,7 @@ message VarType { CHANNEL = 16; // Any runtime decided variable type is raw // raw variables should manage their own allocations - // in operators likc nccl_op + // in operators like nccl_op RAW = 17; } -- GitLab From 4161328eb528a396449a8be124aa203969ebc066 Mon Sep 17 00:00:00 2001 From: wanghaox Date: Mon, 26 Feb 2018 16:06:22 +0800 Subject: [PATCH 194/217] add warrper for detection map operator --- python/paddle/fluid/layers/detection.py | 40 +++++++++++++++++++++ python/paddle/fluid/tests/test_detection.py | 38 ++++++++++++++++++++ 2 files changed, 78 insertions(+) diff --git a/python/paddle/fluid/layers/detection.py b/python/paddle/fluid/layers/detection.py index 5ae4da1ea3..2c2f80b4bf 100644 --- a/python/paddle/fluid/layers/detection.py +++ b/python/paddle/fluid/layers/detection.py @@ -16,6 +16,7 @@ All layers just related to the detection neural network. """ from layer_function_generator import generate_layer_fn +from layer_function_generator import autodoc from ..layer_helper import LayerHelper import tensor import ops @@ -28,6 +29,7 @@ __all__ = [ 'target_assign', 'detection_output', 'ssd_loss', + 'detection_map', ] __auto__ = [ @@ -132,6 +134,44 @@ def detection_output(scores, return nmsed_outs +@autodoc() +def detection_map(detect_res, + label, + pos_count=None, + true_pos=None, + false_pos=None, + overlap_threshold=0.3, + evaluate_difficult=True, + ap_type='integral'): + helper = LayerHelper("detection_map", **locals()) + + map_out = helper.create_tmp_variable(dtype='float32') + accum_pos_count_out = helper.create_tmp_variable(dtype='int32') + accum_true_pos_out = helper.create_tmp_variable(dtype='float32') + accum_false_pos_out = helper.create_tmp_variable(dtype='float32') + helper.append_op( + type="detection_map", + inputs={ + 'Label': label, + 'DetectRes': detect_res, + 'PosCount': pos_count, + 'TruePos': true_pos, + 'FalsePos': false_pos + }, + outputs={ + 'MAP': map_out, + 'AccumPosCount': accum_pos_count_out, + 'AccumTruePos': accum_true_pos_out, + 'AccumFalsePos': accum_false_pos_out + }, + attrs={ + 'overlap_threshold': overlap_threshold, + 'evaluate_difficult': evaluate_difficult, + 'ap_type': ap_type + }) + return map_out, accum_pos_count_out, accum_true_pos_out, accum_false_pos_out + + def bipartite_match(dist_matrix, name=None): """ **Bipartite matchint operator** diff --git a/python/paddle/fluid/tests/test_detection.py b/python/paddle/fluid/tests/test_detection.py index 1dc6d107d2..fc25786499 100644 --- a/python/paddle/fluid/tests/test_detection.py +++ b/python/paddle/fluid/tests/test_detection.py @@ -145,5 +145,43 @@ class TestMultiBoxHead(unittest.TestCase): return mbox_locs, mbox_confs, box, var +class TestDetectionMAP(unittest.TestCase): + def test_detection_map(self): + program = Program() + with program_guard(program): + detect_res = layers.data( + name='detect_res', + shape=[10, 6], + append_batch_size=False, + dtype='float32') + label = layers.data( + name='label', + shape=[10, 6], + append_batch_size=False, + dtype='float32') + + map_out, accum_pos_count_out, accum_true_pos_out, accum_false_pos_out = layers.detection_map( + detect_res=detect_res, label=label) + self.assertIsNotNone(map_out) + self.assertIsNotNone(accum_pos_count_out) + self.assertIsNotNone(accum_true_pos_out) + self.assertIsNotNone(accum_false_pos_out) + self.assertEqual(map_out.shape, (1, )) + map_out, accum_pos_count_out2, accum_true_pos_out2, accum_false_pos_out2 = layers.detection_map( + detect_res=detect_res, label=label) + self.assertIsNotNone(map_out) + self.assertIsNotNone(accum_pos_count_out2) + self.assertIsNotNone(accum_true_pos_out2) + self.assertIsNotNone(accum_false_pos_out2) + self.assertEqual(map_out.shape, (1, )) + self.assertEqual(accum_pos_count_out.shape, + accum_pos_count_out2.shape) + self.assertEqual(accum_true_pos_out.shape, + accum_true_pos_out2.shape) + self.assertEqual(accum_false_pos_out.shape, + accum_false_pos_out2.shape) + print(str(program)) + + if __name__ == '__main__': unittest.main() -- GitLab From 7a1d6ae5f61e466a403b26d6876463883f0946ae Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Mon, 26 Feb 2018 19:15:30 +0800 Subject: [PATCH 195/217] Fix send_recv unit test --- paddle/fluid/operators/listen_and_serv_op.cc | 2 ++ paddle/fluid/operators/send_recv_op_test.cc | 26 ++++++++++++-------- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index ee0e3533ce..8e9923c87c 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -129,6 +129,8 @@ class ListenAndServOp : public framework::OperatorBase { } if (exit_flag) { rpc_service_->ShutDown(); + rpc_service_->SetCond(1); + break; } try { executor.Run(*program, &recv_scope, block->ID(), /*global_block*/ diff --git a/paddle/fluid/operators/send_recv_op_test.cc b/paddle/fluid/operators/send_recv_op_test.cc index 008c012a32..e9fb845b47 100644 --- a/paddle/fluid/operators/send_recv_op_test.cc +++ b/paddle/fluid/operators/send_recv_op_test.cc @@ -95,7 +95,7 @@ void AddOp(const std::string &type, const f::VariableNameMap &inputs, for (auto kv : outputs) { for (auto v : kv.second) { auto var = block->Var(v); - var->SetDataType(f::proto::DataType::FP32); + var->SetDataType(f::proto::VarType::FP32); } } @@ -122,33 +122,37 @@ void StartServerNet(bool is_sparse) { // sub program run in listen_and_serv_op, for simple test we use sum f::ProgramDesc program; - f::BlockDesc *block = program.MutableBlock(0); + f::BlockDesc *optimize_block = program.MutableBlock(0); // X for server side tensors, RX for received tensers, must be of same shape. - AddOp("sum", {{"X", {"x0", "x1"}}}, {{"Out", {"Out"}}}, {}, block); + AddOp("sum", {{"X", {"x0", "x1"}}}, {{"Out", {"Out"}}}, {}, optimize_block); f::AttributeMap attrs; attrs.insert({"endpoint", std::string("127.0.0.1:6174")}); + attrs.insert({"Fanin", 1}); attrs.insert({"ParamList", std::vector({"Out"})}); attrs.insert({"GradList", std::vector({"x1"})}); - attrs.insert({"OptimizeBlock", block}); + attrs.insert({"OptimizeBlock", optimize_block}); listen_and_serv_op = - f::OpRegistry::CreateOp("listen_and_serv", {}, {}, attrs); + f::OpRegistry::CreateOp("listen_and_serv", {{"X", {"x1"}}}, {}, attrs); listen_and_serv_op->Run(scope, place); } TEST(SendRecvOp, CPUDense) { std::thread server_thread(StartServerNet, false); - sleep(10); // wait server to start + sleep(5); // wait server to start // local net f::Scope scope; p::CPUPlace place; InitTensorsInScope(scope, place); + // create rpc client var + scope.Var("RPC_CLIENT_VAR"); f::AttributeMap attrs; attrs.insert({"endpoints", std::vector({"127.0.0.1:6174"})}); attrs.insert({"epmap", std::vector({"127.0.0.1:6174"})}); - auto send_op = f::OpRegistry::CreateOp("send", {{"X", {"x1"}}}, - {{"Out", {"Out"}}}, attrs); + auto send_op = f::OpRegistry::CreateOp( + "send", {{"X", {"x1"}}}, + {{"Out", {"Out"}}, {"RPCClient", {"RPC_CLIENT_VAR"}}}, attrs); send_op->Run(scope, place); auto in_var = scope.Var("x1"); @@ -175,11 +179,13 @@ TEST(SendRecvOp, CPUSparse) { p::CPUPlace place; p::CPUDeviceContext ctx(place); InitSelectedRowsInScope(scope, place); + scope.Var("RPC_CLIENT_VAR"); f::AttributeMap attrs; attrs.insert({"endpoints", std::vector({"127.0.0.1:6174"})}); attrs.insert({"epmap", std::vector({"127.0.0.1:6174"})}); - auto send_op = f::OpRegistry::CreateOp("send", {{"X", {"x1"}}}, - {{"Out", {"Out"}}}, attrs); + auto send_op = f::OpRegistry::CreateOp( + "send", {{"X", {"x1"}}}, + {{"Out", {"Out"}}, {"RPCClient", {"RPC_CLIENT_VAR"}}}, attrs); send_op->Run(scope, place); auto x0 = scope.Var("x0")->GetMutable(); -- GitLab From 1cf5a19476cb46071a8bf8962c4bb530a48bb17f Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Mon, 26 Feb 2018 19:58:28 +0800 Subject: [PATCH 196/217] Fix c-api doc style --- doc/howto/capi/workflow_of_capi_cn.md | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/howto/capi/workflow_of_capi_cn.md b/doc/howto/capi/workflow_of_capi_cn.md index a61d2267bf..1ccc72eefb 100644 --- a/doc/howto/capi/workflow_of_capi_cn.md +++ b/doc/howto/capi/workflow_of_capi_cn.md @@ -65,6 +65,7 @@ output_file = "output.paddle.model" merge_v2_model(net, param_file, output_file) ``` + 对[手写数字识别](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/capi/examples/model_inference/dense)这个示例,可直接运行 `python` [merge_v2_model.py](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/capi/examples/model_inference/dense/merge_v2_model.py)。序列化结果会写入当前运行目录下的`output.paddle.model`文件中。使用这种方式,运行时C-API可以通过指定`output.paddle.model`文件的路径来加载预测模型。 #### 注意事项 -- GitLab From 34605d26410a89e0e3b90a9236e8f9e4149834ec Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 26 Feb 2018 20:07:59 +0800 Subject: [PATCH 197/217] accelerate the cuda concat op, avoid many times copy (#8585) * "try enhance concat op" * "enhance the concat operator" --- paddle/fluid/operators/concat_op.h | 47 ++++++++++++++++++++++++++---- 1 file changed, 41 insertions(+), 6 deletions(-) diff --git a/paddle/fluid/operators/concat_op.h b/paddle/fluid/operators/concat_op.h index eb0e43ad2d..208a4481c6 100644 --- a/paddle/fluid/operators/concat_op.h +++ b/paddle/fluid/operators/concat_op.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/strided_memcpy.h" @@ -34,12 +35,46 @@ class ConcatKernel : public framework::OpKernel { auto out_stride = framework::stride_numel(out->dims()); size_t output_offset = 0; - for (auto* in : ins) { - auto in_stride = framework::stride_numel(in->dims()); - StridedNumelCopyWithAxis(ctx.device_context(), axis, - out->data() + output_offset, out_stride, - in->data(), in_stride, in_stride[axis]); - output_offset += in_stride[axis]; + + // If axis >=1, copy to out immediately need to call many times + // of cuda memcpy. Copy the input to cpu and do the stride copy, + // then copy to gpu output. + + if (platform::is_gpu_place(place) && axis >= 1) { + platform::CPUPlace copy_place; + auto& cpu_ctx = *platform::DeviceContextPool::Instance().Get(copy_place); + framework::Tensor cpu_out; + cpu_out.Resize(out->dims()); + cpu_out.mutable_data(copy_place); + auto& dev_ctx = ctx.device_context(); + std::vector> cpu_ins; + for (auto* in : ins) { + std::unique_ptr cpu_in(new framework::Tensor); + framework::TensorCopy(*in, copy_place, dev_ctx, cpu_in.get()); + cpu_ins.emplace_back(std::move(cpu_in)); + } + // TODO(dzhwinter): overlap copy and compute stream + // https://devblogs.nvidia.com/how-overlap-data-transfers-cuda-cc/ + dev_ctx.Wait(); + + for (auto& in : cpu_ins) { + auto& cpu_in = *in.get(); + auto in_stride = framework::stride_numel(cpu_in.dims()); + + StridedNumelCopyWithAxis( + cpu_ctx, axis, cpu_out.data() + output_offset, out_stride, + cpu_in.data(), in_stride, in_stride[axis]); + output_offset += in_stride[axis]; + } + framework::TensorCopy(cpu_out, place, dev_ctx, out); + } else { + for (auto* in : ins) { + auto in_stride = framework::stride_numel(in->dims()); + StridedNumelCopyWithAxis(ctx.device_context(), axis, + out->data() + output_offset, out_stride, + in->data(), in_stride, in_stride[axis]); + output_offset += in_stride[axis]; + } } } }; -- GitLab From b9ec24c6e93c6e6494622da9a809f60b576cf7a3 Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Sat, 24 Feb 2018 00:11:48 -0800 Subject: [PATCH 198/217] Extend current profiler for timeline and more features. --- CMakeLists.txt | 1 + cmake/configure.cmake | 10 +- cmake/cuda.cmake | 3 +- cmake/cupti.cmake | 41 +++ paddle/fluid/framework/executor.cc | 4 +- paddle/fluid/framework/framework.proto | 2 + paddle/fluid/framework/op_desc.h | 2 + paddle/fluid/platform/CMakeLists.txt | 5 +- paddle/fluid/platform/device_tracer.cc | 285 ++++++++++++++++++ paddle/fluid/platform/device_tracer.h | 72 +++++ paddle/fluid/platform/dynload/CMakeLists.txt | 8 +- paddle/fluid/platform/dynload/cupti.cc | 35 +++ paddle/fluid/platform/dynload/cupti.h | 86 ++++++ .../fluid/platform/dynload/dynamic_loader.cc | 16 + .../fluid/platform/dynload/dynamic_loader.h | 2 + paddle/fluid/platform/profiler.cc | 38 ++- paddle/fluid/platform/profiler.h | 10 +- paddle/fluid/platform/profiler.proto | 30 ++ paddle/fluid/platform/profiler_test.cc | 2 +- paddle/fluid/pybind/pybind.cc | 1 + python/paddle/fluid/profiler.py | 11 +- .../fluid/tests/unittests/CMakeLists.txt | 2 + .../fluid/tests/unittests/test_profiler.py | 25 +- .../v2/fluid/tests/unittests/test_nvprof.py | 46 +++ 24 files changed, 699 insertions(+), 38 deletions(-) create mode 100644 cmake/cupti.cmake create mode 100644 paddle/fluid/platform/device_tracer.cc create mode 100644 paddle/fluid/platform/device_tracer.h create mode 100644 paddle/fluid/platform/dynload/cupti.cc create mode 100644 paddle/fluid/platform/dynload/cupti.h create mode 100644 paddle/fluid/platform/profiler.proto create mode 100644 python/paddle/v2/fluid/tests/unittests/test_nvprof.py diff --git a/CMakeLists.txt b/CMakeLists.txt index 5db5c228be..21616c7cd4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -146,6 +146,7 @@ include(external/cares) include(external/grpc) include(cudnn) # set cudnn libraries, must before configure +include(cupti) include(configure) # add paddle env configuration include(generic) # simplify cmake module include(package) # set paddle packages diff --git a/cmake/configure.cmake b/cmake/configure.cmake index ae3295fe41..7730453fc9 100644 --- a/cmake/configure.cmake +++ b/cmake/configure.cmake @@ -59,6 +59,7 @@ endif(NOT WITH_GOLANG) if(NOT WITH_GPU) add_definitions(-DHPPL_STUB_FUNC) + add_definitions("-DCUPTI_LIB_PATH=\"\"") list(APPEND CMAKE_CXX_SOURCE_FILE_EXTENSIONS cu) else() @@ -73,7 +74,14 @@ else() if(NOT CUDNN_FOUND) message(FATAL_ERROR "Paddle needs cudnn to compile") endif() - + if(CUPTI_FOUND) + include_directories(${CUPTI_INCLUDE_DIR}) + add_definitions(-DPADDLE_WITH_CUPTI) + add_definitions("-DCUPTI_LIB_PATH=\"${CUPTI_LIBRARY_PATH}\"") + else() + add_definitions("-DCUPTI_LIB_PATH=\"\"") + message(STATUS "Cannot find CUPTI, GPU Profiling is incorrect.") + endif() set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler ${SIMD_FLAG}") # Include cuda and cudnn diff --git a/cmake/cuda.cmake b/cmake/cuda.cmake index de94bd5008..7edc863772 100644 --- a/cmake/cuda.cmake +++ b/cmake/cuda.cmake @@ -155,7 +155,8 @@ endif() include_directories(${CUDA_INCLUDE_DIRS}) list(APPEND EXTERNAL_LIBS ${CUDA_LIBRARIES} ${CUDA_rt_LIBRARY}) if(NOT WITH_DSO) - list(APPEND EXTERNAL_LIBS ${CUDNN_LIBRARY} ${CUDA_CUBLAS_LIBRARIES} ${CUDA_curand_LIBRARY} ${NCCL_LIBRARY}) + # TODO(panyx0718): CUPTI only allows DSO? + list(APPEND EXTERNAL_LIBS ${CUDNN_LIBRARY} ${CUPTI_LIBRARY} ${CUDA_CUBLAS_LIBRARIES} ${CUDA_curand_LIBRARY} ${NCCL_LIBRARY}) endif(NOT WITH_DSO) # setting nvcc arch flags diff --git a/cmake/cupti.cmake b/cmake/cupti.cmake new file mode 100644 index 0000000000..72ed0f1e58 --- /dev/null +++ b/cmake/cupti.cmake @@ -0,0 +1,41 @@ +if(NOT WITH_GPU) + return() +endif() + + +set(CUPTI_ROOT "/usr" CACHE PATH "CUPTI ROOT") +find_path(CUPTI_INCLUDE_DIR cupti.h + PATHS ${CUPTI_ROOT} ${CUPTI_ROOT}/include + $ENV{CUPTI_ROOT} $ENV{CUPTI_ROOT}/include + ${CUDA_TOOLKIT_ROOT_DIR}/extras/CUPTI/include + NO_DEFAULT_PATH + ) + +get_filename_component(__libpath_hist ${CUDA_CUDART_LIBRARY} PATH) + +set(TARGET_ARCH "x86_64") +if(NOT ${CMAKE_SYSTEM_PROCESSOR}) + set(TARGET_ARCH ${CMAKE_SYSTEM_PROCESSOR}) +endif() + +list(APPEND CUPTI_CHECK_LIBRARY_DIRS + ${CUPTI_ROOT} + ${CUPTI_ROOT}/lib64 + ${CUPTI_ROOT}/lib + ${CUPTI_ROOT}/lib/${TARGET_ARCH}-linux-gnu + $ENV{CUPTI_ROOT} + $ENV{CUPTI_ROOT}/lib64 + $ENV{CUPTI_ROOT}/lib + /usr/lib + ${CUDA_TOOLKIT_ROOT_DIR}/extras/CUPTI/lib64) +find_library(CUPTI_LIBRARY NAMES libcupti.so libcupti.dylib # libcupti_static.a + PATHS ${CUPTI_CHECK_LIBRARY_DIRS} ${CUPTI_INCLUDE_DIR} ${__libpath_hist} + NO_DEFAULT_PATH + DOC "Path to cuPTI library.") + +get_filename_component(CUPTI_LIBRARY_PATH ${CUPTI_LIBRARY} DIRECTORY) +if(CUPTI_INCLUDE_DIR AND CUPTI_LIBRARY) + set(CUPTI_FOUND ON) +else() + set(CUPTI_FOUND OFF) +endif() diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index 88863ab99e..d3155d33d0 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -127,7 +127,9 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id, auto op = paddle::framework::OpRegistry::CreateOp(*op_desc); platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); - platform::RecordEvent record_event(op->Type(), pool.Get(place_)); + // TODO(panyx0718): Need a program id to distinguish programs. + platform::RecordEvent record_event(op->Type(), pool.Get(place_), + op_desc->Block()->ID()); VLOG(3) << place_ << " " << op->DebugStringEx(local_scope); op->Run(*local_scope, place_); diff --git a/paddle/fluid/framework/framework.proto b/paddle/fluid/framework/framework.proto index 53725d3d80..38f22b8914 100644 --- a/paddle/fluid/framework/framework.proto +++ b/paddle/fluid/framework/framework.proto @@ -167,4 +167,6 @@ message BlockDesc { // Please refer to // https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/program.md // for more details. +// TODO(panyx0718): A model can have multiple programs. Need a +// way to distinguish them. Maybe ID or name? message ProgramDesc { repeated BlockDesc blocks = 1; } diff --git a/paddle/fluid/framework/op_desc.h b/paddle/fluid/framework/op_desc.h index b72aad6fb5..614dd8cd00 100644 --- a/paddle/fluid/framework/op_desc.h +++ b/paddle/fluid/framework/op_desc.h @@ -125,6 +125,8 @@ class OpDesc { BlockDesc *Block() { return this->block_; } + const BlockDesc &BlockRef() const { return *this->block_; } + void SetBlock(BlockDesc *block) { this->block_ = block; } private: diff --git a/paddle/fluid/platform/CMakeLists.txt b/paddle/fluid/platform/CMakeLists.txt index 0d0cee21d1..28a668c86a 100644 --- a/paddle/fluid/platform/CMakeLists.txt +++ b/paddle/fluid/platform/CMakeLists.txt @@ -1,3 +1,5 @@ +proto_library(profiler_proto SRCS profiler.proto) + if(WITH_GPU) cc_library(enforce SRCS enforce.cc DEPS) else() @@ -37,7 +39,8 @@ nv_test(cudnn_helper_test SRCS cudnn_helper_test.cc DEPS dynload_cuda) nv_test(transform_test SRCS transform_test.cu DEPS paddle_memory place device_context) nv_test(nccl_test SRCS nccl_test.cu DEPS dynload_cuda gpu_info device_context) -cc_library(profiler SRCS profiler.cc DEPS device_context) +cc_library(device_tracer SRCS device_tracer.cc DEPS profiler_proto ${GPU_CTX_DEPS}) +cc_library(profiler SRCS profiler.cc DEPS device_context device_tracer) cc_test(profiler_test SRCS profiler_test.cc DEPS profiler) nv_test(float16_gpu_test SRCS float16_test.cu) diff --git a/paddle/fluid/platform/device_tracer.cc b/paddle/fluid/platform/device_tracer.cc new file mode 100644 index 0000000000..c940ddbae3 --- /dev/null +++ b/paddle/fluid/platform/device_tracer.cc @@ -0,0 +1,285 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/platform/device_tracer.h" +#include +#include +#include "glog/logging.h" +#include "paddle/fluid/framework/block_desc.h" +#include "paddle/fluid/string/printf.h" + +namespace paddle { +namespace platform { +namespace { + +thread_local const char *cur_annotation = nullptr; +std::once_flag tracer_once_flag; +DeviceTracer *tracer = nullptr; +} // namespace +#ifdef PADDLE_WITH_CUPTI + +namespace { +// TODO(panyx0718): Revisit the buffer size here. +uint64_t kBufSize = 32 * 1024; +uint64_t kAlignSize = 8; + +#define ALIGN_BUFFER(buffer, align) \ + (((uintptr_t)(buffer) & ((align)-1)) \ + ? ((buffer) + (align) - ((uintptr_t)(buffer) & ((align)-1))) \ + : (buffer)) + +#define CUPTI_CALL(call) \ + do { \ + CUptiResult _status = call; \ + if (_status != CUPTI_SUCCESS) { \ + const char *errstr; \ + dynload::cuptiGetResultString(_status, &errstr); \ + fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \ + __FILE__, __LINE__, #call, errstr); \ + exit(-1); \ + } \ + } while (0) + +void EnableActivity() { + // Device activity record is created when CUDA initializes, so we + // want to enable it before cuInit() or any CUDA runtime call. + CUPTI_CALL(dynload::cuptiActivityEnable(CUPTI_ACTIVITY_KIND_MEMCPY)); + CUPTI_CALL(dynload::cuptiActivityEnable(CUPTI_ACTIVITY_KIND_KERNEL)); + CUPTI_CALL(dynload::cuptiActivityEnable(CUPTI_ACTIVITY_KIND_DEVICE)); + CUPTI_CALL(dynload::cuptiActivityEnable(CUPTI_ACTIVITY_KIND_MEMSET)); + CUPTI_CALL(dynload::cuptiActivityEnable(CUPTI_ACTIVITY_KIND_OVERHEAD)); + // We don't track these activities for now. + // CUPTI_CALL(dynload::cuptiActivityEnable(CUPTI_ACTIVITY_KIND_CONTEXT)); + // CUPTI_CALL(dynload::cuptiActivityEnable(CUPTI_ACTIVITY_KIND_DRIVER)); + // CUPTI_CALL(dynload::cuptiActivityEnable(CUPTI_ACTIVITY_KIND_RUNTIME)); + // CUPTI_CALL(dynload::cuptiActivityEnable(CUPTI_ACTIVITY_KIND_NAME)); + // CUPTI_CALL(dynload::cuptiActivityEnable(CUPTI_ACTIVITY_KIND_MARKER)); +} + +void DisableActivity() { + CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_MEMCPY)); + CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_KERNEL)); + CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_DEVICE)); + // Disable all other activity record kinds. + CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_CONTEXT)); + CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_DRIVER)); + CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_RUNTIME)); + CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_MEMSET)); + CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_NAME)); + CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_MARKER)); + CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_OVERHEAD)); +} + +void CUPTIAPI bufferRequested(uint8_t **buffer, size_t *size, + size_t *maxNumRecords) { + uint8_t *buf = (uint8_t *)malloc(kBufSize + kAlignSize); + *size = kBufSize; + *buffer = ALIGN_BUFFER(buf, kAlignSize); + *maxNumRecords = 0; +} + +void CUPTIAPI bufferCompleted(CUcontext ctx, uint32_t streamId, uint8_t *buffer, + size_t size, size_t validSize) { + CUptiResult status; + CUpti_Activity *record = NULL; + if (validSize > 0) { + do { + status = dynload::cuptiActivityGetNextRecord(buffer, validSize, &record); + if (status == CUPTI_SUCCESS) { + switch (record->kind) { + case CUPTI_ACTIVITY_KIND_KERNEL: + case CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL: { + auto *kernel = + reinterpret_cast(record); + tracer->AddKernelRecords(kernel->start, kernel->end, + kernel->deviceId, kernel->streamId, + kernel->correlationId); + break; + } + default: { break; } + } + } else if (status == CUPTI_ERROR_MAX_LIMIT_REACHED) { + // Seems not an error in this case. + break; + } else { + CUPTI_CALL(status); + } + } while (1); + + size_t dropped; + CUPTI_CALL( + dynload::cuptiActivityGetNumDroppedRecords(ctx, streamId, &dropped)); + if (dropped != 0) { + fprintf(stderr, "Dropped %u activity records\n", (unsigned int)dropped); + } + } + free(buffer); +} +} // namespace + +class DeviceTracerImpl : public DeviceTracer { + public: + DeviceTracerImpl() : enabled_(false) {} + + void AddAnnotation(uint64_t id, const std::string &anno) { + std::lock_guard l(trace_mu_); + correlations_[id] = anno; + } + + void AddKernelRecords(uint64_t start, uint64_t end, uint32_t device_id, + uint32_t stream_id, uint32_t correlation_id) { + std::lock_guard l(trace_mu_); + kernel_records_.push_back( + KernelRecord{start, end, device_id, stream_id, correlation_id}); + } + + bool IsEnabled() { + std::lock_guard l(trace_mu_); + return enabled_; + } + + void Enable() { + std::lock_guard l(trace_mu_); + if (enabled_) { + fprintf(stderr, "DeviceTracer already enabled\n"); + return; + } + EnableActivity(); + + // Register callbacks for buffer requests and completed by CUPTI. + CUPTI_CALL(dynload::cuptiActivityRegisterCallbacks(bufferRequested, + bufferCompleted)); + + CUptiResult ret; + ret = dynload::cuptiSubscribe( + &subscriber_, static_cast(ApiCallback), this); + if (ret == CUPTI_ERROR_MAX_LIMIT_REACHED) { + fprintf(stderr, "CUPTI subcriber limit reached.\n"); + } else if (ret != CUPTI_SUCCESS) { + fprintf(stderr, "Failed to create CUPTI subscriber.\n"); + } + CUPTI_CALL( + dynload::cuptiEnableCallback(1, subscriber_, CUPTI_CB_DOMAIN_DRIVER_API, + CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel)); + + CUPTI_CALL(dynload::cuptiGetTimestamp(&start_ns_)); + enabled_ = true; + } + + proto::Profile GenProfile() { + std::lock_guard l(trace_mu_); + proto::Profile profile_pb; + profile_pb.set_start_ns(start_ns_); + profile_pb.set_end_ns(end_ns_); + std::map> event_times; + for (const KernelRecord &r : kernel_records_) { + if (correlations_.find(r.correlation_id) == correlations_.end()) { + fprintf(stderr, "cannot relate a kernel activity\n"); + continue; + } + auto *event = profile_pb.add_events(); + event->set_name(correlations_.at(r.correlation_id)); + event->set_start_ns(r.start_ns); + event->set_end_ns(r.end_ns); + event->set_stream_id(r.stream_id); + event->set_device_id(r.device_id); + event_times[event->name()].push_back(r.end_ns - r.start_ns); + } + for (const auto &et : event_times) { + fprintf( + stderr, "%s: total: %fms invoked cuda kernels: %lu\n", + et.first.c_str(), + std::accumulate(et.second.begin(), et.second.end(), 0) / 1000000.0, + et.second.size()); + } + return profile_pb; + } + + void Disable() { + // flush might cause additional calls to DeviceTracker. + dynload::cuptiActivityFlushAll(CUPTI_ACTIVITY_FLAG_FLUSH_FORCED); + std::lock_guard l(trace_mu_); + DisableActivity(); + dynload::cuptiUnsubscribe(subscriber_); + CUPTI_CALL(dynload::cuptiGetTimestamp(&end_ns_)); + PADDLE_ENFORCE(dynload::cuptiFinalize()); + enabled_ = false; + } + + private: + static void CUPTIAPI ApiCallback(void *userdata, CUpti_CallbackDomain domain, + CUpti_CallbackId cbid, const void *cbdata) { + auto *cbInfo = reinterpret_cast(cbdata); + DeviceTracer *tracer = reinterpret_cast(userdata); + + if ((domain == CUPTI_CB_DOMAIN_DRIVER_API) && + (cbid == CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel)) { + if (cbInfo->callbackSite == CUPTI_API_ENTER) { + const std::string anno = + cur_annotation ? cur_annotation : cbInfo->symbolName; + tracer->AddAnnotation(cbInfo->correlationId, anno); + } + } else { + VLOG(1) << "Unhandled API Callback for " << domain << " " << cbid; + } + } + + std::mutex trace_mu_; + bool enabled_; + uint64_t start_ns_; + uint64_t end_ns_; + std::vector kernel_records_; + std::unordered_map correlations_; + CUpti_SubscriberHandle subscriber_; +}; + +#endif // PADDLE_WITH_CUPTI + +class DeviceTracerDummy : public DeviceTracer { + public: + DeviceTracerDummy() {} + + void AddAnnotation(uint64_t id, const std::string &anno) {} + + void AddKernelRecords(uint64_t start, uint64_t end, uint32_t device_id, + uint32_t stream_id, uint32_t correlation_id) {} + + bool IsEnabled() { return false; } + + void Enable() {} + + proto::Profile GenProfile() { return proto::Profile(); } + + void Disable() {} +}; + +void CreateTracer(DeviceTracer **t) { +#ifdef PADDLE_WITH_CUPTI + *t = new DeviceTracerImpl(); +#else + *t = new DeviceTracerDummy(); +#endif // PADDLE_WITH_CUPTI +} + +DeviceTracer *GetDeviceTracer() { + std::call_once(tracer_once_flag, CreateTracer, &tracer); + return tracer; +} + +void SetCurAnnotation(const char *anno) { cur_annotation = anno; } + +void ClearCurAnnotation() { cur_annotation = nullptr; } + +} // namespace platform +} // namespace paddle diff --git a/paddle/fluid/platform/device_tracer.h b/paddle/fluid/platform/device_tracer.h new file mode 100644 index 0000000000..36734561a2 --- /dev/null +++ b/paddle/fluid/platform/device_tracer.h @@ -0,0 +1,72 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/fluid/platform/dynload/cupti.h" +#include "paddle/fluid/platform/profiler.pb.h" + +namespace paddle { +namespace platform { + +/////////////////////// +// WARN: Under Development. Don't depend on it yet. +////////////////////// + +// DeviceTracer performs the following tasks: +// 1. Register cuda callbacks for various events: kernel, memcpy, etc. +// 2. Collect cuda statistics: start/end ts, memory, etc. +// 3. Generate a protobuf for further analysis. +class DeviceTracer { + public: + struct KernelRecord { + uint64_t start_ns; + uint64_t end_ns; + uint32_t device_id; + uint32_t stream_id; + uint32_t correlation_id; + }; + + virtual ~DeviceTracer() {} + // Needs to be called once before use. + virtual void Enable() = 0; + // Needs to be called once after use. + virtual void Disable() = 0; + + // Add a pair to correlate internal cuda id with high level + // annotation (string). So cuda statistics can be represented by + // human-readable annotations. + virtual void AddAnnotation(uint64_t id, const std::string& anno) = 0; + + // Add a cuda kernel stats. `correlation_id` will be mapped to annotation + // added before for human readability. + virtual void AddKernelRecords(uint64_t start, uint64_t end, + uint32_t device_id, uint32_t stream_id, + uint32_t correlation_id) = 0; + + // Generate a proto after done (Disabled). + virtual proto::Profile GenProfile() = 0; + + virtual bool IsEnabled() = 0; +}; + +// Get a DeviceTracer. +DeviceTracer* GetDeviceTracer(); + +// Set a name for the cuda kernel operation being launched by the thread. +void SetCurAnnotation(const char* anno); +// Clear the name after the operation is done. +void ClearCurAnnotation(); + +} // namespace platform +} // namespace paddle diff --git a/paddle/fluid/platform/dynload/CMakeLists.txt b/paddle/fluid/platform/dynload/CMakeLists.txt index 264b4ebf2c..567c137a55 100644 --- a/paddle/fluid/platform/dynload/CMakeLists.txt +++ b/paddle/fluid/platform/dynload/CMakeLists.txt @@ -1,4 +1,8 @@ cc_library(dynamic_loader SRCS dynamic_loader.cc DEPS glog gflags enforce) -nv_library(dynload_cuda SRCS cublas.cc cudnn.cc curand.cc nccl.cc - DEPS dynamic_loader) + +list(APPEND CUDA_SRCS cublas.cc cudnn.cc curand.cc nccl.cc) +if (CUPTI_FOUND) + list(APPEND CUDA_SRCS cupti.cc) +endif(CUPTI_FOUND) +nv_library(dynload_cuda SRCS ${CUDA_SRCS} DEPS dynamic_loader) cc_library(dynload_warpctc SRCS warpctc.cc DEPS dynamic_loader warpctc) diff --git a/paddle/fluid/platform/dynload/cupti.cc b/paddle/fluid/platform/dynload/cupti.cc new file mode 100644 index 0000000000..035ae574ac --- /dev/null +++ b/paddle/fluid/platform/dynload/cupti.cc @@ -0,0 +1,35 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifdef PADDLE_WITH_CUPTI + +#include "paddle/fluid/platform/dynload/cupti.h" +#include "paddle/fluid/platform/enforce.h" + +namespace paddle { +namespace platform { +namespace dynload { + +std::once_flag cupti_dso_flag; +void *cupti_dso_handle = nullptr; + +#define DEFINE_WRAP(__name) DynLoad__##__name __name + +CUPTI_ROUTINE_EACH(DEFINE_WRAP); + +} // namespace dynload +} // namespace platform +} // namespace paddle + +#endif // PADDLE_WITH_CUPTI diff --git a/paddle/fluid/platform/dynload/cupti.h b/paddle/fluid/platform/dynload/cupti.h new file mode 100644 index 0000000000..d8b73980f6 --- /dev/null +++ b/paddle/fluid/platform/dynload/cupti.h @@ -0,0 +1,86 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#ifdef PADDLE_WITH_CUPTI +#include +#include +#include +#include +#include "paddle/fluid/platform/dynload/dynamic_loader.h" + +namespace paddle { +namespace platform { +namespace dynload { + +extern std::once_flag cupti_dso_flag; +extern void *cupti_dso_handle; + +/** + * The following macro definition can generate structs + * (for each function) to dynamic load cupti routine + * via operator overloading. + * + * note: default dynamic linked libs + */ +#ifdef PADDLE_USE_DSO +#define DECLARE_DYNAMIC_LOAD_CUPTI_WRAP(__name) \ + struct DynLoad__##__name { \ + template \ + inline CUptiResult CUPTIAPI operator()(Args... args) { \ + typedef CUptiResult CUPTIAPI (*cuptiFunc)(Args...); \ + std::call_once(cupti_dso_flag, \ + paddle::platform::dynload::GetCUPTIDsoHandle, \ + &cupti_dso_handle); \ + void *p_##__name = dlsym(cupti_dso_handle, #__name); \ + return reinterpret_cast(p_##__name)(args...); \ + } \ + }; \ + extern DynLoad__##__name __name +#else +#define DECLARE_DYNAMIC_LOAD_CUPTI_WRAP(__name) \ + struct DynLoad__##__name { \ + template \ + inline CUptiResult CUPTIAPI operator()(Args... args) { \ + return __name(args...); \ + } \ + }; \ + extern DynLoad__##__name __name +#endif + +#define CUPTI_ROUTINE_EACH(__macro) \ + __macro(cuptiActivityEnable); \ + __macro(cuptiActivityDisable); \ + __macro(cuptiActivityRegisterCallbacks); \ + __macro(cuptiActivityGetAttribute); \ + __macro(cuptiActivitySetAttribute); \ + __macro(cuptiGetTimestamp); \ + __macro(cuptiActivityGetNextRecord); \ + __macro(cuptiGetResultString); \ + __macro(cuptiActivityGetNumDroppedRecords); \ + __macro(cuptiActivityFlushAll); \ + __macro(cuptiFinalize); \ + __macro(cuptiSubscribe); \ + __macro(cuptiUnsubscribe); \ + __macro(cuptiEnableCallback); + +CUPTI_ROUTINE_EACH(DECLARE_DYNAMIC_LOAD_CUPTI_WRAP); + +#undef DECLARE_DYNAMIC_LOAD_CUPTI_WRAP +} // namespace dynload +} // namespace platform +} // namespace paddle + +#endif // PADDLE_WITH_CUPTI diff --git a/paddle/fluid/platform/dynload/dynamic_loader.cc b/paddle/fluid/platform/dynload/dynamic_loader.cc index db1eb41f28..8eb5966e57 100644 --- a/paddle/fluid/platform/dynload/dynamic_loader.cc +++ b/paddle/fluid/platform/dynload/dynamic_loader.cc @@ -40,10 +40,14 @@ DEFINE_string(nccl_dir, "", "libcurand. For instance, /usr/local/cuda/lib64. If default, " "dlopen will search cuda from LD_LIBRARY_PATH"); +DEFINE_string(cupti_dir, "", "Specify path for loading cupti.so."); + namespace paddle { namespace platform { namespace dynload { +static const char* cupti_lib_path = CUPTI_LIB_PATH; + static inline std::string join(const std::string& part1, const std::string& part2) { // directory separator @@ -143,6 +147,18 @@ void GetCUDNNDsoHandle(void** dso_handle) { #endif } +void GetCUPTIDsoHandle(void** dso_handle) { + std::string cupti_path = cupti_lib_path; + if (!FLAGS_cupti_dir.empty()) { + cupti_path = FLAGS_cupti_dir; + } +#if defined(__APPLE__) || defined(__OSX__) + GetDsoHandleFromSearchPath(cupti_path, "libcupti.dylib", dso_handle, false); +#else + GetDsoHandleFromSearchPath(cupti_path, "libcupti.so", dso_handle, false); +#endif +} + void GetCurandDsoHandle(void** dso_handle) { #if defined(__APPLE__) || defined(__OSX__) GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "libcurand.dylib", dso_handle); diff --git a/paddle/fluid/platform/dynload/dynamic_loader.h b/paddle/fluid/platform/dynload/dynamic_loader.h index 4ffc335332..b5b9c4af91 100644 --- a/paddle/fluid/platform/dynload/dynamic_loader.h +++ b/paddle/fluid/platform/dynload/dynamic_loader.h @@ -34,6 +34,8 @@ void GetCublasDsoHandle(void** dso_handle); */ void GetCUDNNDsoHandle(void** dso_handle); +void GetCUPTIDsoHandle(void** dso_handle); + /** * @brief load the DSO of CURAND * diff --git a/paddle/fluid/platform/profiler.cc b/paddle/fluid/platform/profiler.cc index 4804df7966..201fc87294 100644 --- a/paddle/fluid/platform/profiler.cc +++ b/paddle/fluid/platform/profiler.cc @@ -15,7 +15,13 @@ limitations under the License. */ #include "paddle/fluid/platform/profiler.h" #include #include +#ifdef PADDLE_WITH_CUDA +#include +#endif // PADDLE_WITH_CUDA #include "glog/logging.h" +#include "paddle/fluid/framework/block_desc.h" +#include "paddle/fluid/platform/device_tracer.h" +#include "paddle/fluid/string/printf.h" namespace paddle { namespace platform { @@ -126,15 +132,20 @@ void PopEvent(const std::string& name, const DeviceContext* dev_ctx) { GetEventList().Record(EventKind::kPopRange, name, g_thread_id, dev_ctx); } -RecordEvent::RecordEvent(const std::string& name, - const DeviceContext* dev_ctx) { +RecordEvent::RecordEvent(const std::string& name, const DeviceContext* dev_ctx, + int32_t block_id) { if (g_state == ProfilerState::kDisabled) return; dev_ctx_ = dev_ctx; name_ = name; PushEvent(name_, dev_ctx_); + + full_name_ = string::Sprintf("%s_b%d", name, block_id); + // Maybe need the same push/pop behavior. + SetCurAnnotation(full_name_.c_str()); } RecordEvent::~RecordEvent() { + ClearCurAnnotation(); if (g_state == ProfilerState::kDisabled) return; PopEvent(name_, dev_ctx_); } @@ -147,7 +158,14 @@ void EnableProfiler(ProfilerState state) { "The profiling state should be disabled when calling ", "EnableProfiler."); g_state = state; - g_profiler_place = (g_state == ProfilerState::kCUDA) ? "CUDA" : "CPU"; + if (g_state == ProfilerState::kCUDA) { + g_profiler_place = "CUDA"; + } else if (g_state == ProfilerState::kCPU) { + g_profiler_place = "CPU"; + } else { + g_profiler_place = "All"; + GetDeviceTracer()->Enable(); + } #ifdef PADDLE_WITH_CUDA if (g_state == ProfilerState::kCUDA) { // Generate some dummy evenets first to reduce the startup overhead. @@ -190,6 +208,12 @@ void DisableProfiler(EventSortingKey sorted_key) { Mark("_stop_profiler_", nullptr); g_state = ProfilerState::kDisabled; + DeviceTracer* tracer = GetDeviceTracer(); + if (g_profiler_place == "All" && tracer && tracer->IsEnabled()) { + tracer->Disable(); + tracer->GenProfile(); + } + std::vector> all_events = GetAllEvents(); ParseEvents(all_events, sorted_key); ResetProfiler(); @@ -254,9 +278,11 @@ void ParseEvents(std::vector>& events, } if (rit != pushed_events.rend()) { - double event_time = (g_profiler_place == "CUDA") - ? rit->CudaElapsedMs(events[i][j]) - : rit->CpuElapsedMs(events[i][j]); + double event_time = + (g_profiler_place == "CUDA" || g_profiler_place == "All") + ? rit->CudaElapsedMs(events[i][j]) + : rit->CpuElapsedMs(events[i][j]); + std::string event_name = "thread" + std::to_string(rit->thread_id()) + "::" + rit->name(); max_name_width = std::max(max_name_width, event_name.size()); diff --git a/paddle/fluid/platform/profiler.h b/paddle/fluid/platform/profiler.h index a3d22df700..830b86c88e 100644 --- a/paddle/fluid/platform/profiler.h +++ b/paddle/fluid/platform/profiler.h @@ -18,6 +18,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/profiler.pb.h" namespace paddle { namespace platform { @@ -93,6 +94,7 @@ enum ProfilerState { kDisabled, // disabled state kCPU, // CPU profiling state kCUDA, // GPU profiling state + kAll, // Profile both CPU and GPU. (Currently experimental). }; void Mark(const std::string& name, const DeviceContext* dev_ctx); @@ -102,7 +104,8 @@ void PushEvent(const std::string& name, const DeviceContext* dev_ctx); void PopEvent(const std::string& name, const DeviceContext* dev_ctx); struct RecordEvent { - explicit RecordEvent(const std::string& name, const DeviceContext* dev_ctx); + RecordEvent(const std::string& name, const DeviceContext* dev_ctx, + int32_t block_id); ~RecordEvent(); @@ -110,9 +113,12 @@ struct RecordEvent { const DeviceContext* dev_ctx_; // Event name std::string name_; + // Need to distinguish name by op type, block_id, program_id and perhaps + // different kernel invocations within an op. + std::string full_name_; }; -// Return the event list of all threads. Asummed the returned value calls +// Return the event list of all threads. Assumed the returned value calls // event_lists, event_lists[i][j] represents the j-th Event of i-th thread. std::vector> GetAllEvents(); diff --git a/paddle/fluid/platform/profiler.proto b/paddle/fluid/platform/profiler.proto new file mode 100644 index 0000000000..65c707d261 --- /dev/null +++ b/paddle/fluid/platform/profiler.proto @@ -0,0 +1,30 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +syntax = "proto2"; +package paddle.platform.proto; + +message Event { + optional string name = 1; + optional uint64 start_ns = 2; + optional uint64 end_ns = 3; + optional uint32 device_id = 5; + optional uint32 stream_id = 6; +} + +message Profile { + repeated Event events = 1; + optional uint64 start_ns = 2; + optional uint64 end_ns = 3; +} \ No newline at end of file diff --git a/paddle/fluid/platform/profiler_test.cc b/paddle/fluid/platform/profiler_test.cc index dae4d2206e..8bc480857a 100644 --- a/paddle/fluid/platform/profiler_test.cc +++ b/paddle/fluid/platform/profiler_test.cc @@ -95,7 +95,7 @@ TEST(RecordEvent, RecordEvent) { */ for (int i = 1; i < 5; ++i) { std::string name = "evs_op_" + std::to_string(i); - RecordEvent record_event(name, dev_ctx); + RecordEvent record_event(name, dev_ctx, 0); int counter = 1; while (counter != i * 1000) counter++; } diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index b3e03f3347..ac7d1efb57 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -459,6 +459,7 @@ All parameter, weight, gradient are variables in Paddle. .value("kDisabled", platform::ProfilerState::kDisabled) .value("kCPU", platform::ProfilerState::kCPU) .value("kCUDA", platform::ProfilerState::kCUDA) + .value("kAll", platform::ProfilerState::kAll) .export_values(); py::enum_(m, "EventSortingKey", py::arithmetic()) diff --git a/python/paddle/fluid/profiler.py b/python/paddle/fluid/profiler.py index 4611986c99..59e75209d3 100644 --- a/python/paddle/fluid/profiler.py +++ b/python/paddle/fluid/profiler.py @@ -97,9 +97,14 @@ def profiler(state, sorted_key=None): The `ave` means sorting by the average execution time. """ - if state not in ['CPU', 'GPU']: - raise ValueError("The state must be 'CPU' or 'GPU'.") - prof_state = core.ProfilerState.kCUDA if state == "GPU" else core.ProfilerState.kCPU + if state not in ['CPU', 'GPU', "All"]: + raise ValueError("The state must be 'CPU' or 'GPU' or 'All'.") + if state == "GPU": + prof_state = core.ProfilerState.kCUDA + elif state == "CPU": + prof_state = core.ProfilerState.kCPU + else: + prof_state = core.ProfilerState.kAll core.enable_profiler(prof_state) yield diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index 9355f51311..f96c2ca4f0 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -41,6 +41,7 @@ list(REMOVE_ITEM TEST_OPS test_while_op) list(REMOVE_ITEM TEST_OPS test_lod_array_length_op) list(REMOVE_ITEM TEST_OPS test_reorder_lod_tensor) list(REMOVE_ITEM TEST_OPS test_profiler) +list(REMOVE_ITEM TEST_OPS test_nvprof) list(REMOVE_ITEM TEST_OPS test_normalization_wrapper) list(REMOVE_ITEM TEST_OPS test_executor_and_mul) list(REMOVE_ITEM TEST_OPS test_assign_value_op) @@ -75,6 +76,7 @@ py_test_modules(test_while_op MODULES test_while_op) py_test_modules(test_lod_array_length_op MODULES test_lod_array_length_op) py_test_modules(test_reorder_lod_tensor MODULES test_reorder_lod_tensor) py_test_modules(test_profiler MODULES test_profiler) +py_test_modules(test_nvprof MODULES test_nvprof) py_test_modules(test_normalization_wrapper MODULES test_normalization_wrapper) py_test_modules(test_executor_and_mul MODULES test_executor_and_mul) py_test_modules(test_assign_value_op MODULES test_assign_value_op) diff --git a/python/paddle/fluid/tests/unittests/test_profiler.py b/python/paddle/fluid/tests/unittests/test_profiler.py index d9444b50a2..f6f581ff7d 100644 --- a/python/paddle/fluid/tests/unittests/test_profiler.py +++ b/python/paddle/fluid/tests/unittests/test_profiler.py @@ -22,27 +22,9 @@ import paddle.fluid.core as core class TestProfiler(unittest.TestCase): - def test_nvprof(self): - if not fluid.core.is_compiled_with_cuda(): - return - epoc = 8 - dshape = [4, 3, 28, 28] - data = layers.data(name='data', shape=[3, 28, 28], dtype='float32') - conv = layers.conv2d(data, 20, 3, stride=[1, 1], padding=[1, 1]) - - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - - output_file = 'cuda_profiler.txt' - with profiler.cuda_profiler(output_file, 'csv') as nvprof: - for i in range(epoc): - input = np.random.random(dshape).astype('float32') - exe.run(fluid.default_main_program(), feed={'data': input}) - os.remove(output_file) - def net_profiler(self, state): - if state == 'GPU' and not core.is_compiled_with_cuda(): + enable_if_gpu = state == 'GPU' or state == "All" + if enable_if_gpu and not core.is_compiled_with_cuda(): return startup_program = fluid.Program() main_program = fluid.Program() @@ -85,6 +67,9 @@ class TestProfiler(unittest.TestCase): def test_cuda_profiler(self): self.net_profiler('GPU') + def test_all_profiler(self): + self.net_profiler('All') + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/v2/fluid/tests/unittests/test_nvprof.py b/python/paddle/v2/fluid/tests/unittests/test_nvprof.py new file mode 100644 index 0000000000..7252a1daf6 --- /dev/null +++ b/python/paddle/v2/fluid/tests/unittests/test_nvprof.py @@ -0,0 +1,46 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import os +import numpy as np +import paddle.v2.fluid as fluid +import paddle.v2.fluid.profiler as profiler +import paddle.v2.fluid.layers as layers +import paddle.v2.fluid.core as core + + +class TestNVProf(unittest.TestCase): + def test_nvprof(self): + if not fluid.core.is_compiled_with_cuda(): + return + epoc = 8 + dshape = [4, 3, 28, 28] + data = layers.data(name='data', shape=[3, 28, 28], dtype='float32') + conv = layers.conv2d(data, 20, 3, stride=[1, 1], padding=[1, 1]) + + place = fluid.CUDAPlace(0) + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + output_file = 'cuda_profiler.txt' + with profiler.cuda_profiler(output_file, 'csv') as nvprof: + for i in range(epoc): + input = np.random.random(dshape).astype('float32') + exe.run(fluid.default_main_program(), feed={'data': input}) + os.remove(output_file) + + +if __name__ == '__main__': + unittest.main() -- GitLab From 9bbce4935331facdc22a2c7639468fc236ceae3f Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Mon, 26 Feb 2018 04:33:03 -0800 Subject: [PATCH 199/217] Fix version date. --- paddle/fluid/platform/device_tracer.cc | 2 +- paddle/fluid/platform/device_tracer.h | 2 +- paddle/fluid/platform/dynload/cupti.cc | 2 +- paddle/fluid/platform/dynload/cupti.h | 2 +- paddle/fluid/platform/profiler.proto | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/paddle/fluid/platform/device_tracer.cc b/paddle/fluid/platform/device_tracer.cc index c940ddbae3..87bbdfa5fd 100644 --- a/paddle/fluid/platform/device_tracer.cc +++ b/paddle/fluid/platform/device_tracer.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/device_tracer.h b/paddle/fluid/platform/device_tracer.h index 36734561a2..06cea84cc8 100644 --- a/paddle/fluid/platform/device_tracer.h +++ b/paddle/fluid/platform/device_tracer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/dynload/cupti.cc b/paddle/fluid/platform/dynload/cupti.cc index 035ae574ac..a25660c6ed 100644 --- a/paddle/fluid/platform/dynload/cupti.cc +++ b/paddle/fluid/platform/dynload/cupti.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/dynload/cupti.h b/paddle/fluid/platform/dynload/cupti.h index d8b73980f6..a79868c18c 100644 --- a/paddle/fluid/platform/dynload/cupti.h +++ b/paddle/fluid/platform/dynload/cupti.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/fluid/platform/profiler.proto b/paddle/fluid/platform/profiler.proto index 65c707d261..bdd86a0440 100644 --- a/paddle/fluid/platform/profiler.proto +++ b/paddle/fluid/platform/profiler.proto @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. -- GitLab From 16fc5e3893769f8cdcb37ddca742166134e114f9 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Mon, 26 Feb 2018 22:43:03 +0800 Subject: [PATCH 200/217] refine cmake for cudnn --- paddle/fluid/operators/CMakeLists.txt | 34 +++++++++++++++------------ 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 8f14fd376a..58ef4e0515 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -11,6 +11,7 @@ function(op_library TARGET) set(cc_srcs) set(cu_srcs) set(cu_cc_srcs) + set(cudnn_cu_cc_srcs) set(op_common_deps operator op_registry math_function) set(options "") set(oneValueArgs "") @@ -34,6 +35,8 @@ function(op_library TARGET) foreach(src ${op_library_SRCS}) if (${src} MATCHES ".*\\.cu$") list(APPEND cu_srcs ${src}) + elseif(${src} MATCHES ".*_cudnn_op.cu.cc$") + list(APPEND cudnn_cu_cc_srcs ${src}) elseif(${src} MATCHES ".*\\.cu.cc$") list(APPEND cu_cc_srcs ${src}) elseif(${src} MATCHES ".*\\.cc$") @@ -54,7 +57,7 @@ function(op_library TARGET) set(DEPS_OPS ${TARGET} ${DEPS_OPS} PARENT_SCOPE) endif() if (WITH_GPU) - nv_library(${TARGET} SRCS ${cc_srcs} ${cu_cc_srcs} ${cu_srcs} DEPS ${op_library_DEPS} + nv_library(${TARGET} SRCS ${cc_srcs} ${cu_cc_srcs} ${cudnn_cu_cc_srcs} ${cu_srcs} DEPS ${op_library_DEPS} ${op_common_deps}) else() cc_library(${TARGET} SRCS ${cc_srcs} DEPS ${op_library_DEPS} @@ -98,6 +101,12 @@ function(op_library TARGET) set(pybind_flag 1) endif() + # pybind USE_OP_DEVICE_KERNEL for CUDNN + list(LENGTH cudnn_cu_cc_srcs cudnn_cu_cc_srcs_len) + if (${cudnn_cu_cc_srcs_len} GREATER 0) + file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL(${TARGET}, CUDNN);\n") + endif() + # pybind USE_OP if (${pybind_flag} EQUAL 0) file(APPEND ${pybind_file} "USE_OP(${TARGET});\n") @@ -159,21 +168,16 @@ op_library(create_reader_op DEPS reader) # Regist multiple Kernel to pybind if (WITH_GPU) - -op_library(conv_op SRCS conv_op.cc conv_op.cu.cc conv_cudnn_op.cu.cc DEPS - vol2col depthwise_conv) - -op_library(edit_distance_op SRCS edit_distance_op.cc edit_distance_op.cu DEPS math_function) -op_library(pool_op SRCS pool_op.cc pool_op.cu.cc pool_cudnn_op.cu.cc DEPS pooling) -op_library(conv_transpose_op SRCS conv_transpose_op.cc conv_transpose_op.cu.cc - conv_transpose_cudnn_op.cu.cc DEPS vol2col) -file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL(conv2d, CUDNN);\n") -file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL(pool2d, CUDNN);\n") -file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL(conv2d_transpose, CUDNN);\n") + op_library(conv_op SRCS conv_op.cc conv_op.cu.cc conv_cudnn_op.cu.cc DEPS + vol2col depthwise_conv) + op_library(edit_distance_op SRCS edit_distance_op.cc edit_distance_op.cu DEPS math_function) + op_library(pool_op SRCS pool_op.cc pool_op.cu.cc pool_cudnn_op.cu.cc DEPS pooling) + op_library(conv_transpose_op SRCS conv_transpose_op.cc conv_transpose_op.cu.cc + conv_transpose_cudnn_op.cu.cc DEPS vol2col) else() -op_library(conv_op SRCS conv_op.cc DEPS vol2col) -op_library(pool_op SRCS pool_op.cc DEPS pooling) -op_library(conv_transpose_op SRCS conv_transpose_op.cc DEPS vol2col) + op_library(conv_op SRCS conv_op.cc DEPS vol2col) + op_library(pool_op SRCS pool_op.cc DEPS pooling) + op_library(conv_transpose_op SRCS conv_transpose_op.cc DEPS vol2col) endif() cc_library(batch_size_like SRCS batch_size_like.cc DEPS op_registry) -- GitLab From 4948f7b3fe20ff1aa87bd23d84e4fdba42a88e73 Mon Sep 17 00:00:00 2001 From: qingqing01 Date: Tue, 27 Feb 2018 10:33:56 +0800 Subject: [PATCH 201/217] Enhance bipartite_match_op to support argmax matching after bipartite matching. (#8580) * Enhance bipartite_match_op to support argmax matching after bipartite matching. * Fix typo error. --- paddle/fluid/operators/bipartite_match_op.cc | 57 ++++++++++++++++++- python/paddle/fluid/layers/detection.py | 19 ++++++- .../unittests/test_bipartite_match_op.py | 44 +++++++++++++- 3 files changed, 112 insertions(+), 8 deletions(-) diff --git a/paddle/fluid/operators/bipartite_match_op.cc b/paddle/fluid/operators/bipartite_match_op.cc index c536cf6b6b..2b3f26c0a8 100644 --- a/paddle/fluid/operators/bipartite_match_op.cc +++ b/paddle/fluid/operators/bipartite_match_op.cc @@ -94,6 +94,38 @@ class BipartiteMatchKernel : public framework::OpKernel { } } + void ArgMaxMatch(const Tensor& dist, int* match_indices, T* match_dist, + T overlap_threshold) const { + constexpr T kEPS = static_cast(1e-6); + int64_t row = dist.dims()[0]; + int64_t col = dist.dims()[1]; + auto* dist_data = dist.data(); + for (int64_t j = 0; j < col; ++j) { + if (match_indices[j] != -1) { + // the j-th column has been matched to one entity. + continue; + } + int max_row_idx = -1; + T max_dist = -1; + for (int i = 0; i < row; ++i) { + T dist = dist_data[i * col + j]; + if (dist < kEPS) { + // distance is 0 between m-th row and j-th column + continue; + } + if (dist >= overlap_threshold && dist > max_dist) { + max_row_idx = i; + max_dist = dist; + } + } + if (max_row_idx != -1) { + PADDLE_ENFORCE_EQ(match_indices[j], -1); + match_indices[j] = max_row_idx; + match_dist[j] = max_dist; + } + } + } + void Compute(const framework::ExecutionContext& context) const override { auto* dist_mat = context.Input("DistMat"); auto* match_indices = context.Output("ColToRowMatchIndices"); @@ -120,13 +152,21 @@ class BipartiteMatchKernel : public framework::OpKernel { int* indices = match_indices->data(); T* dist = match_dist->data(); + auto type = context.Attr("match_type"); + auto threshold = context.Attr("dist_threshold"); if (n == 1) { BipartiteMatch(*dist_mat, indices, dist); + if (type == "per_prediction") { + ArgMaxMatch(*dist_mat, indices, dist, threshold); + } } else { auto lod = dist_mat->lod().back(); for (size_t i = 0; i < lod.size() - 1; ++i) { Tensor one_ins = dist_mat->Slice(lod[i], lod[i + 1]); BipartiteMatch(one_ins, indices + i * col, dist + i * col); + if (type == "per_prediction") { + ArgMaxMatch(one_ins, indices + i * col, dist + i * col, threshold); + } } } } @@ -147,6 +187,19 @@ class BipartiteMatchOpMaker : public framework::OpProtoAndCheckerMaker { "This tensor can contain LoD information to represent a batch of " "inputs. One instance of this batch can contain different numbers of " "entities."); + AddAttr( + "match_type", + "(string, defalut: per_prediction) " + "The type of matching method, should be 'bipartite' or " + "'per_prediction', 'bipartite' by defalut.") + .SetDefault("bipartite") + .InEnum({"bipartite", "per_prediction"}); + AddAttr( + "dist_threshold", + "(float, defalut: 0.5) " + "If `match_type` is 'per_prediction', this threshold is to determine " + "the extra matching bboxes based on the maximum distance.") + .SetDefault(0.5); AddOutput("ColToRowMatchIndices", "(Tensor) A 2-D Tensor with shape [N, M] in int type. " "N is the batch size. If ColToRowMatchIndices[i][j] is -1, it " @@ -168,10 +221,10 @@ distance matrix. For input 2D matrix, the bipartite matching algorithm can find the matched column for each row, also can find the matched row for each column. And this operator only calculate matched indices from column to row. For each instance, the number of matched indices is the number of -of columns of the input ditance matrix. +of columns of the input distance matrix. There are two outputs to save matched indices and distance. -A simple description, this algothrim matched the best (maximum distance) +A simple description, this algorithm matched the best (maximum distance) row entity to the column entity and the matched indices are not duplicated in each row of ColToRowMatchIndices. If the column entity is not matched any row entity, set -1 in ColToRowMatchIndices. diff --git a/python/paddle/fluid/layers/detection.py b/python/paddle/fluid/layers/detection.py index 5ae4da1ea3..25522249c8 100644 --- a/python/paddle/fluid/layers/detection.py +++ b/python/paddle/fluid/layers/detection.py @@ -132,7 +132,10 @@ def detection_output(scores, return nmsed_outs -def bipartite_match(dist_matrix, name=None): +def bipartite_match(dist_matrix, + match_type=None, + dist_threshold=None, + name=None): """ **Bipartite matchint operator** @@ -164,6 +167,11 @@ def bipartite_match(dist_matrix, name=None): This tensor can contain LoD information to represent a batch of inputs. One instance of this batch can contain different numbers of entities. + match_type(string|None): The type of matching method, should be + 'bipartite' or 'per_prediction', 'bipartite' by defalut. + dist_threshold(float|None): If `match_type` is 'per_prediction', + this threshold is to determine the extra matching bboxes based + on the maximum distance, 0.5 by defalut. Returns: match_indices(Variable): A 2-D Tensor with shape [N, M] in int type. N is the batch size. If match_indices[i][j] is -1, it @@ -183,6 +191,10 @@ def bipartite_match(dist_matrix, name=None): helper.append_op( type='bipartite_match', inputs={'DistMat': dist_matrix}, + attrs={ + 'match_type': match_type, + 'dist_threshold': dist_threshold, + }, outputs={ 'ColToRowMatchIndices': match_indices, 'ColToRowMatchDist': match_distance @@ -333,7 +345,7 @@ def ssd_loss(location, loc_loss_weight (float): Weight for localization loss, 1.0 by default. conf_loss_weight (float): Weight for confidence loss, 1.0 by default. match_type (str): The type of matching method during training, should - be 'bipartite' or 'per_prediction'. + be 'bipartite' or 'per_prediction', 'per_prediction' by defalut. mining_type (str): The hard example mining type, should be 'hard_example' or 'max_negative', now only support `max_negative`. @@ -381,7 +393,8 @@ def ssd_loss(location, # 1.1 Compute IOU similarity between ground-truth boxes and prior boxes. iou = iou_similarity(x=gt_box, y=prior_box) # 1.2 Compute matched boundding box by bipartite matching algorithm. - matched_indices, matched_dist = bipartite_match(iou) + matched_indices, matched_dist = bipartite_match(iou, match_type, + overlap_threshold) # 2. Compute confidence for mining hard examples # 2.1. Get the target label based on matched indices diff --git a/python/paddle/fluid/tests/unittests/test_bipartite_match_op.py b/python/paddle/fluid/tests/unittests/test_bipartite_match_op.py index 9f9af2f55e..f7461ee6da 100644 --- a/python/paddle/fluid/tests/unittests/test_bipartite_match_op.py +++ b/python/paddle/fluid/tests/unittests/test_bipartite_match_op.py @@ -46,7 +46,20 @@ def bipartite_match(distance, match_indices, match_dist): idx += 1 -def batch_bipartite_match(distance, lod): +def argmax_match(distance, match_indices, match_dist, threshold): + r, c = distance.shape + for j in xrange(c): + if match_indices[j] != -1: + continue + col_dist = distance[:, j] + indices = np.argwhere(col_dist >= threshold).flatten() + if len(indices) < 1: + continue + match_indices[j] = indices[np.argmax(col_dist[indices])] + match_dist[j] = col_dist[match_indices[j]] + + +def batch_bipartite_match(distance, lod, match_type=None, dist_threshold=None): """Bipartite Matching algorithm for batch input. Arg: distance (numpy.array) : The distance of two entries with shape [M, N]. @@ -59,6 +72,9 @@ def batch_bipartite_match(distance, lod): for i in range(len(lod) - 1): bipartite_match(distance[lod[i]:lod[i + 1], :], match_indices[i, :], match_dist[i, :]) + if match_type == 'per_prediction': + argmax_match(distance[lod[i]:lod[i + 1], :], match_indices[i, :], + match_dist[i, :], dist_threshold) return match_indices, match_dist @@ -71,8 +87,8 @@ class TestBipartiteMatchOpWithLoD(OpTest): self.inputs = {'DistMat': (dist, lod)} self.outputs = { - 'ColToRowMatchIndices': (match_indices), - 'ColToRowMatchDist': (match_dist), + 'ColToRowMatchIndices': match_indices, + 'ColToRowMatchDist': match_dist, } def test_check_output(self): @@ -96,5 +112,27 @@ class TestBipartiteMatchOpWithoutLoD(OpTest): self.check_output() +class TestBipartiteMatchOpWithPerPredictionType(OpTest): + def setUp(self): + self.op_type = 'bipartite_match' + lod = [[0, 5, 11, 23]] + dist = np.random.random((23, 237)).astype('float32') + match_indices, match_dist = batch_bipartite_match(dist, lod[0], + 'per_prediction', 0.5) + + self.inputs = {'DistMat': (dist, lod)} + self.outputs = { + 'ColToRowMatchIndices': match_indices, + 'ColToRowMatchDist': match_dist, + } + self.attrs = { + 'match_type': 'per_prediction', + 'dist_threshold': 0.5, + } + + def test_check_output(self): + self.check_output() + + if __name__ == '__main__': unittest.main() -- GitLab From 62fe2f28a19a1c67f0234e35a1f302bda3f3b2c1 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Tue, 27 Feb 2018 10:06:23 +0800 Subject: [PATCH 202/217] follow comments --- paddle/fluid/operators/CMakeLists.txt | 37 ++++++++++----------------- 1 file changed, 14 insertions(+), 23 deletions(-) diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 58ef4e0515..0bda0e05e0 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -12,6 +12,7 @@ function(op_library TARGET) set(cu_srcs) set(cu_cc_srcs) set(cudnn_cu_cc_srcs) + set(CUDNN_FILE) set(op_common_deps operator op_registry math_function) set(options "") set(oneValueArgs "") @@ -31,6 +32,10 @@ function(op_library TARGET) if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.cu) list(APPEND cu_srcs ${TARGET}.cu) endif() + string(REPLACE "_op" "_cudnn_op" CUDNN_FILE "${TARGET}") + if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${CUDNN_FILE}.cu.cc) + list(APPEND cudnn_cu_cc_srcs ${CUDNN_FILE}.cu.cc) + endif() else() foreach(src ${op_library_SRCS}) if (${src} MATCHES ".*\\.cu$") @@ -103,7 +108,7 @@ function(op_library TARGET) # pybind USE_OP_DEVICE_KERNEL for CUDNN list(LENGTH cudnn_cu_cc_srcs cudnn_cu_cc_srcs_len) - if (${cudnn_cu_cc_srcs_len} GREATER 0) + if (WITH_GPU AND ${cudnn_cu_cc_srcs_len} GREATER 0) file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL(${TARGET}, CUDNN);\n") endif() @@ -161,38 +166,24 @@ op_library(lstm_op DEPS sequence2batch lstm_compute) op_library(lstmp_op DEPS sequence2batch lstm_compute) op_library(gru_op DEPS sequence2batch gru_compute) op_library(recurrent_op DEPS executor) -op_library(warpctc_op DEPS dynload_warpctc sequence_padding sequence_scale math_function) +op_library(warpctc_op DEPS dynload_warpctc sequence_padding sequence_scale) op_library(cos_sim_op DEPS cos_sim_functor) op_library(parallel_do_op DEPS executor) op_library(create_reader_op DEPS reader) # Regist multiple Kernel to pybind if (WITH_GPU) - op_library(conv_op SRCS conv_op.cc conv_op.cu.cc conv_cudnn_op.cu.cc DEPS - vol2col depthwise_conv) - op_library(edit_distance_op SRCS edit_distance_op.cc edit_distance_op.cu DEPS math_function) - op_library(pool_op SRCS pool_op.cc pool_op.cu.cc pool_cudnn_op.cu.cc DEPS pooling) - op_library(conv_transpose_op SRCS conv_transpose_op.cc conv_transpose_op.cu.cc - conv_transpose_cudnn_op.cu.cc DEPS vol2col) + op_library(conv_op DEPS vol2col depthwise_conv) else() - op_library(conv_op SRCS conv_op.cc DEPS vol2col) - op_library(pool_op SRCS pool_op.cc DEPS pooling) - op_library(conv_transpose_op SRCS conv_transpose_op.cc DEPS vol2col) + op_library(conv_op DEPS vol2col) endif() +op_library(pool_op DEPS pooling) +op_library(conv_transpose_op DEPS vol2col) cc_library(batch_size_like SRCS batch_size_like.cc DEPS op_registry) - -op_library(fill_constant_batch_size_like_op - SRCS fill_constant_batch_size_like_op.cc fill_constant_batch_size_like_op.cu.cc - DEPS batch_size_like) - -op_library(uniform_random_batch_size_like_op - SRCS uniform_random_batch_size_like_op.cc - DEPS batch_size_like uniform_random_op) - -op_library(gaussian_random_batch_size_like_op - SRCS gaussian_random_batch_size_like_op.cc - DEPS batch_size_like gaussian_random_op) +op_library(fill_constant_batch_size_like_op DEPS batch_size_like) +op_library(uniform_random_batch_size_like_op DEPS batch_size_like uniform_random_op) +op_library(gaussian_random_batch_size_like_op DEPS batch_size_like gaussian_random_op) # FIXME(typhoonzero): save/load depends lodtensor serialization functions op_library(save_op DEPS lod_tensor) -- GitLab From 1783ab1dca5e7d4cdf41b3cdfe6ea9127da15911 Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Mon, 26 Feb 2018 17:23:56 -0800 Subject: [PATCH 203/217] move test_nvprof to new location --- .../paddle/{v2 => }/fluid/tests/unittests/test_nvprof.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) rename python/paddle/{v2 => }/fluid/tests/unittests/test_nvprof.py (90%) diff --git a/python/paddle/v2/fluid/tests/unittests/test_nvprof.py b/python/paddle/fluid/tests/unittests/test_nvprof.py similarity index 90% rename from python/paddle/v2/fluid/tests/unittests/test_nvprof.py rename to python/paddle/fluid/tests/unittests/test_nvprof.py index 7252a1daf6..226e5e5d11 100644 --- a/python/paddle/v2/fluid/tests/unittests/test_nvprof.py +++ b/python/paddle/fluid/tests/unittests/test_nvprof.py @@ -15,10 +15,10 @@ import unittest import os import numpy as np -import paddle.v2.fluid as fluid -import paddle.v2.fluid.profiler as profiler -import paddle.v2.fluid.layers as layers -import paddle.v2.fluid.core as core +import paddle.fluid as fluid +import paddle.fluid.profiler as profiler +import paddle.fluid.layers as layers +import paddle.fluid.core as core class TestNVProf(unittest.TestCase): -- GitLab From ef35c4ed1a7a021fb199691e2d2c7ef30e5ee559 Mon Sep 17 00:00:00 2001 From: gongweibao Date: Tue, 27 Feb 2018 14:41:29 +0800 Subject: [PATCH 204/217] Tensorflow benchmark (#8522) Tensorflow benchmark --- benchmark/cluster/vgg16/Dockerfile | 35 +- benchmark/cluster/vgg16/fluid_trainer.yaml | 2 +- benchmark/cluster/vgg16/tf_k8s | 82 +++++ benchmark/cluster/vgg16/tf_pserver.yaml | 56 ++++ benchmark/cluster/vgg16/tf_trainer.yaml | 58 ++++ benchmark/cluster/vgg16/vgg16_fluid.py | 35 +- benchmark/cluster/vgg16/vgg16_tf.py | 362 +++++++++++++++++++++ 7 files changed, 609 insertions(+), 21 deletions(-) create mode 100644 benchmark/cluster/vgg16/tf_k8s create mode 100644 benchmark/cluster/vgg16/tf_pserver.yaml create mode 100644 benchmark/cluster/vgg16/tf_trainer.yaml create mode 100644 benchmark/cluster/vgg16/vgg16_tf.py diff --git a/benchmark/cluster/vgg16/Dockerfile b/benchmark/cluster/vgg16/Dockerfile index 98356cd761..13ad8e1b62 100644 --- a/benchmark/cluster/vgg16/Dockerfile +++ b/benchmark/cluster/vgg16/Dockerfile @@ -1,18 +1,35 @@ -#FROM python:2.7.14 FROM nvidia/cuda:8.0-cudnn5-runtime-ubuntu16.04 -RUN apt-get update && apt-get install -y python -RUN pip install -U kubernetes opencv-python && apt-get update -y && apt-get install -y iputils-ping libgtk2.0-dev -# NOTE: By default CI built wheel packages turn WITH_DISTRIBUTE=OFF, -# so we must build one with distribute support to install in this image. + +# you can get mirror list here: +# https://launchpad.net/ubuntu/+archivemirrors +ARG UBUNTU_MIRROR +RUN /bin/bash -c 'if [[ -n ${UBUNTU_MIRROR} ]]; then sed -i 's#http://archive.ubuntu.com/ubuntu#${UBUNTU_MIRROR}#g' /etc/apt/sources.list; fi' + +RUN apt-get update && apt-get install -y python python-dev python-pip iputils-ping libgtk2.0-dev +RUN pip install -U kubernetes opencv-python + RUN pip install paddlepaddle +# if network is slowly, you may need to add proxy here. +# ENV https_proxy= RUN sh -c 'echo "import paddle.v2 as paddle\npaddle.dataset.cifar.train10()" | python' RUN pip uninstall -y paddlepaddle +# unset proxy if it is setted. +# ENV https_proxy="" + +# NOTE: By default CI built wheel packages turn WITH_DISTRIBUTE=OFF, +# so we must build one with distribute support to install in this image. +ADD *.whl / +RUN pip install /*.whl && rm -f /*.whl +ENV LD_LIBRARY_PATH=/usr/local/lib + +# tf k8s +RUN pip install tensorflow==1.4.0 +ADD tf_k8s /usr/bin +RUN chmod +x /usr/bin/tf_k8s +ADD vgg16_tf.py /workspace/ # below lines may change a lot for debugging ADD https://raw.githubusercontent.com/PaddlePaddle/cloud/develop/docker/paddle_k8s /usr/bin ADD https://raw.githubusercontent.com/PaddlePaddle/cloud/develop/docker/k8s_tools.py /root -ADD *.whl / -RUN pip install /*.whl && rm -f /*.whl && \ -chmod +x /usr/bin/paddle_k8s -ENV LD_LIBRARY_PATH=/usr/local/lib +RUN chmod +x /usr/bin/paddle_k8s ADD vgg16_fluid.py vgg16_v2.py /workspace/ diff --git a/benchmark/cluster/vgg16/fluid_trainer.yaml b/benchmark/cluster/vgg16/fluid_trainer.yaml index 0a0ed25ebe..3d56caac00 100644 --- a/benchmark/cluster/vgg16/fluid_trainer.yaml +++ b/benchmark/cluster/vgg16/fluid_trainer.yaml @@ -11,7 +11,7 @@ spec: paddle-job: vgg16job spec: imagePullSecrets: - - name: job-registry-secret + - name: job-registry-secret hostNetwork: true containers: - name: trainer diff --git a/benchmark/cluster/vgg16/tf_k8s b/benchmark/cluster/vgg16/tf_k8s new file mode 100644 index 0000000000..4fc263d5f6 --- /dev/null +++ b/benchmark/cluster/vgg16/tf_k8s @@ -0,0 +1,82 @@ +#!/bin/bash +check_trainer_ret() { + ret=$1 + stdbuf -oL echo "job returned $ret...setting pod return message..." + stdbuf -oL echo "===============================" + + if [ $ret -eq 136 ] ; then + echo "Error Arithmetic Operation(Floating Point Exception)" > /dev/termination-log + elif [ $ret -eq 139 ] ; then + echo "Segmentation Fault" > /dev/termination-log + elif [ $ret -eq 1 ] ; then + echo "General Error" > /dev/termination-log + elif [ $ret -eq 134 ] ; then + echo "Program Abort" > /dev/termination-log + fi + stdbuf -oL echo "termination log wroted..." + exit $ret +} + +g_pservers="" +g_trainers="" + +wait_running_pods(){ + pserver_label="tf-job-pserver=${JOB_NAME}" + trainer_label="tf-job-trainer=${JOB_NAME}" + + stdbuf -oL python /root/k8s_tools.py wait_pods_running ${pserver_label} ${PSERVERS_NUM} + stdbuf -oL python /root/k8s_tools.py wait_pods_running ${trainer_label} ${TRAINERS_NUM} + + g_pservers=$(python /root/k8s_tools.py fetch_endpoints ${pserver_label} ${PORT}) + g_trainers=$(python /root/k8s_tools.py fetch_endpoints ${trainer_label} ${PORT}) +} + +start_tf_pserver(){ + wait_running_pods + + label="tf-job-pserver=${JOB_NAME}" + pserver_id=$(python /root/k8s_tools.py fetch_id ${label}) + + cmd="${ENTRY} --ps_hosts=${g_pservers} --worker_hosts=${g_trainers} \ + --job_name=${TF_JOB_NAME} --task_index=${pserver_id}" + + stdbuf -oL sh -c "cd ${TRAINER_PACKAGE} && ${cmd}" +} + +start_tf_trainer(){ + wait_running_pods + + label="tf-job-trainer=${JOB_NAME}" + trainer_id=$(python /root/k8s_tools.py fetch_id ${label}) + + cmd="${ENTRY} --ps_hosts=${g_pservers} --worker_hosts=${g_trainers} \ + --job_name=${TF_JOB_NAME} --task_index=${trainer_id} --batch_size=${BATCH_SIZE}" + + stdbuf -oL sh -c "cd ${TRAINER_PACKAGE} && ${cmd}" + check_trainer_ret $? +} + +start_tf(){ + if [[ "${TF_JOB_NAME}" == "worker" ]]; then + start_tf_trainer + else + start_tf_pserver + fi +} + +usage() { + echo "usage: tf_k8s []:" + echo " start_tf Start tensorflow jobs" +} + +case "$1" in + start_tf) + start_tf + ;; + --help) + usage + ;; + *) + usage + ;; +esac diff --git a/benchmark/cluster/vgg16/tf_pserver.yaml b/benchmark/cluster/vgg16/tf_pserver.yaml new file mode 100644 index 0000000000..5e37c70081 --- /dev/null +++ b/benchmark/cluster/vgg16/tf_pserver.yaml @@ -0,0 +1,56 @@ +apiVersion: extensions/v1beta1 +kind: ReplicaSet +metadata: + name: vgg16job-tf-pserver +spec: + replicas: 10 + template: + metadata: + labels: + tf-job-pserver: vgg16job-tf + spec: + hostNetwork: true + imagePullSecrets: + - name: job-registry-secret + containers: + - name: pserver + image: "registry.baidu.com/paddlepaddle/fluid_benchmark_tf:vgg16" + imagePullPolicy: Always + command: ["tf_k8s", "start_tf"] + ports: + - name: jobport-30236 + containerPort: 30236 + env: + - name: PORT + value: "32036" + - name: ENTRY + value: "python vgg16_tf.py" + - name: JOB_NAME + value: vgg16job-tf + - name: PSERVERS_NUM + value: "10" + - name: TF_JOB_NAME + value: "ps" + - name: TRAINERS_NUM + value: "20" + - name: BATCH_SIZE + value: "128" + - name: TRAINER_PACKAGE + value: "/workspace" + - name: NUM_PASSES + value: "1" + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: "metadata.namespace" + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: "status.podIP" + resources: + requests: + memory: 10Gi + cpu: 4 + limits: + memory: 10Gi + cpu: 4 diff --git a/benchmark/cluster/vgg16/tf_trainer.yaml b/benchmark/cluster/vgg16/tf_trainer.yaml new file mode 100644 index 0000000000..08795df3ad --- /dev/null +++ b/benchmark/cluster/vgg16/tf_trainer.yaml @@ -0,0 +1,58 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: vgg16job-tf-trainer +spec: + parallelism: 20 + completions: 20 + template: + metadata: + labels: + tf-job-trainer: vgg16job-tf + spec: + imagePullSecrets: + - name: job-registry-secret + hostNetwork: true + containers: + - name: trainer + image: "registry.baidu.com/paddlepaddle/fluid_benchmark_tf:vgg16" + imagePullPolicy: Always + command: ["tf_k8s", "start_tf"] + ports: + - name: jobport-30236 + containerPort: 30236 + env: + - name: PORT + value: "32036" + - name: JOB_NAME + value: vgg16job-tf + - name: TF_JOB_NAME + value: "worker" + - name: ENTRY + value: "python vgg16_tf.py" + - name: PSERVERS_NUM + value: "10" + - name: BATCH_SIZE + value: "128" + - name: TRAINERS_NUM + value: "20" + - name: TRAINER_PACKAGE + value: "/workspace" + - name: NUM_PASSES + value: "1" + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: "metadata.namespace" + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: "status.podIP" + resources: + requests: + memory: 40Gi + cpu: 2 + limits: + memory: 40Gi + cpu: 2 + restartPolicy: Never diff --git a/benchmark/cluster/vgg16/vgg16_fluid.py b/benchmark/cluster/vgg16/vgg16_fluid.py index 99395699f2..7323241f4d 100644 --- a/benchmark/cluster/vgg16/vgg16_fluid.py +++ b/benchmark/cluster/vgg16/vgg16_fluid.py @@ -68,6 +68,21 @@ parser.add_argument( type=str2bool, default=True, help='Whether to run as local mode.') + +parser.add_argument( + "--ps_hosts", + type=str, + default="", + help="Comma-separated list of hostname:port pairs") +parser.add_argument( + "--trainer_hosts", + type=str, + default="", + help="Comma-separated list of hostname:port pairs") + +# Flags for defining the tf.train.Server +parser.add_argument( + "--task_index", type=int, default=0, help="Index of task within the job") args = parser.parse_args() @@ -180,8 +195,9 @@ def main(): iters += 1 num_samples += len(data) print( - "Pass = %d, Iters = %d, Loss = %f, Accuracy = %f, spent %f" - % (pass_id, iters, loss, acc, time.time() - ts) + "Pass = %d, Iters = %d, Loss = %f, Accuracy = %f, Speed = %.2f img/s" + % (pass_id, iters, loss, acc, + len(data) / (time.time() - ts)) ) # The accuracy is the accumulation of batches, but not the current batch. pass_elapsed = time.time() - start_time @@ -209,27 +225,24 @@ def main(): batch_size=args.batch_size) train_loop(exe, fluid.default_main_program()) else: - pserver_ips = os.getenv("PADDLE_INIT_PSERVERS") # all pserver endpoints - eplist = [] - for ip in pserver_ips.split(","): - eplist.append(':'.join([ip, "6174"])) - pserver_endpoints = ",".join(eplist) - print("pserver endpoints: ", pserver_endpoints) trainers = int(os.getenv("TRAINERS")) # total trainer count print("trainers total: ", trainers) - current_endpoint = os.getenv( - "POD_IP") + ":6174" # current pserver endpoint + training_role = os.getenv( "TRAINING_ROLE", "TRAINER") # get the training role: trainer/pserver + t = fluid.DistributeTranspiler() t.transpile( optimize_ops, params_grads, - pservers=pserver_endpoints, + trainer_id=args.task_index, + pservers=args.ps_hosts, trainers=trainers) if training_role == "PSERVER": + current_endpoint = os.getenv("POD_IP") + ":" + os.getenv( + "PADDLE_INIT_PORT") if not current_endpoint: print("need env SERVER_ENDPOINT") exit(1) diff --git a/benchmark/cluster/vgg16/vgg16_tf.py b/benchmark/cluster/vgg16/vgg16_tf.py new file mode 100644 index 0000000000..996df0e314 --- /dev/null +++ b/benchmark/cluster/vgg16/vgg16_tf.py @@ -0,0 +1,362 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""VGG16 benchmark in TensorFlow +You can get distribution example template structure here: +https://medium.com/clusterone/how-to-write-distributed-tensorflow-code-with-an-example-on-tensorport-70bf3306adcb +https://www.tensorflow.org/deploy/distributed +""" + +import tensorflow as tf +import paddle.v2 as paddle +import numpy as np +import argparse +import time + +parser = argparse.ArgumentParser(description=__doc__) +parser.add_argument( + '--batch_size', type=int, default=128, help="Batch size for training.") +parser.add_argument( + '--learning_rate', + type=float, + default=1e-3, + help="Learning rate for training.") +parser.add_argument('--num_passes', type=int, default=50, help="No. of passes.") +parser.add_argument( + '--device', + type=str, + default='CPU', + choices=['CPU', 'GPU'], + help="The device type.") +parser.add_argument( + '--data_format', + type=str, + default='NHWC', + choices=['NCHW', 'NHWC'], + help='The data order, NCHW=[batch, channels, height, width].' + 'Only support NHWC right now.') +parser.add_argument( + '--data_set', + type=str, + default='cifar10', + choices=['cifar10', 'flowers'], + help='Optional dataset for benchmark.') + +parser.add_argument( + "--ps_hosts", + type=str, + default="", + help="Comma-separated list of hostname:port pairs") +parser.add_argument( + "--worker_hosts", + type=str, + default="", + help="Comma-separated list of hostname:port pairs") +parser.add_argument( + "--job_name", type=str, default="", help="One of 'worker', 'ps'") +# Flags for defining the tf.train.Server +parser.add_argument( + "--task_index", type=int, default=0, help="Index of task within the job") + +args = parser.parse_args() + + +class VGG16Model(object): + def __init__(self): + self.parameters = [] + + def batch_norm_relu(self, inputs, is_training): + """Performs a batch normalization followed by a ReLU.""" + # We set fused=True for a significant speed boost. See + # https://www.tensorflow.org/speed/speed_guide#common_fused_ops + inputs = tf.layers.batch_normalization( + inputs=inputs, + axis=1 if args.data_format == 'NCHW' else -1, + momentum=0.9, + epsilon=1e-05, + center=True, + scale=True, + training=is_training, + fused=True) + inputs = tf.nn.relu(inputs) + return inputs + + def conv_bn_layer(self, + name, + images, + kernel_shape, + is_training, + drop_rate=0.0): + with tf.name_scope(name) as scope: + kernel = tf.Variable( + tf.truncated_normal( + kernel_shape, dtype=tf.float32, stddev=1e-1), + name='weights') + conv = tf.nn.conv2d( + images, + kernel, [1, 1, 1, 1], + data_format=args.data_format, + padding='SAME') + biases = tf.Variable( + tf.constant( + 0.0, shape=[kernel_shape[-1]], dtype=tf.float32), + trainable=True, + name='biases') + out = tf.nn.bias_add(conv, biases) + out = self.batch_norm_relu(out, is_training) + out = tf.layers.dropout(out, rate=drop_rate, training=is_training) + return out + + def fc_layer(self, name, inputs, shape): + with tf.name_scope(name) as scope: + fc_w = tf.Variable( + tf.truncated_normal( + shape, dtype=tf.float32, stddev=1e-1), + name='weights') + fc_b = tf.Variable( + tf.constant( + 0.0, shape=[shape[-1]], dtype=tf.float32), + trainable=True, + name='biases') + out = tf.nn.bias_add(tf.matmul(inputs, fc_w), fc_b) + return out + + def network(self, images, class_dim, is_training): + """ VGG16 model structure. + + TODO(kuke): enable this network to support the 'NCHW' data format + """ + + # conv1 + conv1_1 = self.conv_bn_layer( + 'conv1_1', images, [3, 3, 3, 64], is_training, drop_rate=0.3) + conv1_2 = self.conv_bn_layer( + 'conv1_2', conv1_1, [3, 3, 64, 64], is_training, drop_rate=0.0) + # pool1 + pool1 = tf.nn.max_pool( + conv1_2, + ksize=[1, 2, 2, 1], + strides=[1, 2, 2, 1], + padding='SAME', + name='pool1') + # conv2 + conv2_1 = self.conv_bn_layer( + 'conv2_1', pool1, [3, 3, 64, 128], is_training, drop_rate=0.4) + conv2_2 = self.conv_bn_layer( + 'conv2_2', conv2_1, [3, 3, 128, 128], is_training, drop_rate=0.0) + # pool2 + pool2 = tf.nn.max_pool( + conv2_2, + ksize=[1, 2, 2, 1], + strides=[1, 2, 2, 1], + padding='SAME', + name='pool2') + # conv3 + conv3_1 = self.conv_bn_layer( + 'conv3_1', pool2, [3, 3, 128, 256], is_training, drop_rate=0.4) + conv3_2 = self.conv_bn_layer( + 'conv3_2', conv3_1, [3, 3, 256, 256], is_training, drop_rate=0.4) + conv3_3 = self.conv_bn_layer( + 'conv3_3', conv3_2, [3, 3, 256, 256], is_training, drop_rate=0.0) + # pool3 + pool3 = tf.nn.max_pool( + conv3_3, + ksize=[1, 2, 2, 1], + strides=[1, 2, 2, 1], + padding='SAME', + name='pool3') + # conv4 + conv4_1 = self.conv_bn_layer( + 'conv4_1', pool3, [3, 3, 256, 512], is_training, drop_rate=0.4) + conv4_2 = self.conv_bn_layer( + 'conv4_2', conv4_1, [3, 3, 512, 512], is_training, drop_rate=0.4) + conv4_3 = self.conv_bn_layer( + 'conv4_3', conv4_2, [3, 3, 512, 512], is_training, drop_rate=0.0) + # pool4 + pool4 = tf.nn.max_pool( + conv4_3, + ksize=[1, 2, 2, 1], + strides=[1, 2, 2, 1], + padding='SAME', + name='pool4') + # conv5 + conv5_1 = self.conv_bn_layer( + 'conv5_1', pool4, [3, 3, 512, 512], is_training, drop_rate=0.4) + conv5_2 = self.conv_bn_layer( + 'conv5_2', conv5_1, [3, 3, 512, 512], is_training, drop_rate=0.4) + conv5_3 = self.conv_bn_layer( + 'conv5_3', conv5_2, [3, 3, 512, 512], is_training, drop_rate=0.0) + # pool5 + pool5 = tf.nn.max_pool( + conv5_3, + ksize=[1, 2, 2, 1], + strides=[1, 2, 2, 1], + padding='SAME', + name='pool4') + # flatten + shape = int(np.prod(pool5.get_shape()[1:])) + pool5_flat = tf.reshape(pool5, [-1, shape]) + # fc1 + drop = tf.layers.dropout(pool5_flat, rate=0.5, training=is_training) + fc1 = self.fc_layer('fc1', drop, [shape, 512]) + # fc2 + bn = self.batch_norm_relu(fc1, is_training) + drop = tf.layers.dropout(bn, rate=0.5, training=is_training) + fc2 = self.fc_layer('fc2', drop, [512, 512]) + + fc3 = self.fc_layer('fc3', fc2, [512, class_dim]) + + return fc3 + + +def run_benchmark(cluster_spec, server): + """Run benchmark on cifar10 or flowers.""" + + if args.data_set == "cifar10": + class_dim = 10 + raw_shape = (3, 32, 32) + dat_shape = (None, 32, 32, 3) if args.data_format == 'NHWC' else ( + None, 3, 32, 32) + else: + class_dim = 102 + raw_shape = (3, 224, 224) + dat_shape = (None, 224, 224, 3) if args.data_format == 'NHWC' else ( + None, 3, 224, 224) + + device = tf.train.replica_device_setter( + worker_device="/job:worker/task:{}".format(args.task_index), + cluster=cluster_spec) + + with tf.device(device): + images = tf.placeholder(tf.float32, shape=dat_shape) + labels = tf.placeholder(tf.int64, shape=(None, )) + is_training = tf.placeholder('bool') + onehot_labels = tf.one_hot(labels, depth=class_dim) + + vgg16 = VGG16Model() + logits = vgg16.network(images, class_dim, is_training) + loss = tf.losses.softmax_cross_entropy( + onehot_labels=onehot_labels, logits=logits) + avg_loss = tf.reduce_mean(loss) + + correct = tf.equal(tf.argmax(logits, 1), labels) + accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) + + optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate) + update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) + global_step = tf.Variable(0, name='global_step', trainable=False) + with tf.control_dependencies(update_ops): + train_op = optimizer.minimize(avg_loss, global_step=global_step) + + summary_op = tf.summary.merge_all() + init_op = tf.global_variables_initializer() + + # data reader + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.cifar.train10() + if args.data_set == 'cifar10' else paddle.dataset.flowers.train(), + buf_size=5120), + batch_size=args.batch_size) + test_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.cifar.test10() + if args.data_set == 'cifar10' else paddle.dataset.flowers.test(), + buf_size=5120), + batch_size=args.batch_size) + + # test + def test(): + test_accs = [] + for batch_id, data in enumerate(test_reader()): + test_images = np.array( + map(lambda x: np.transpose(x[0].reshape(raw_shape), + axes=[1, 2, 0]) if args.data_format == 'NHWC' else x[0], data)).astype("float32") + test_labels = np.array(map(lambda x: x[1], data)).astype('int64') + test_accs.append( + accuracy.eval(feed_dict={ + images: test_images, + labels: test_labels, + is_training: False + })) + return np.mean(test_accs) + + config = tf.ConfigProto( + intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) + config.gpu_options.allow_growth = True + + hooks = [tf.train.StopAtStepHook(last_step=1000000)] + + with tf.train.MonitoredTrainingSession( + master=server.target, is_chief=(args.task_index == 0), + hooks=hooks) as sess: + iters, num_samples, start_time = 0, 0, 0.0 + for pass_id in range(args.num_passes): + # train + num_samples = 0 + start_time = time.time() + for batch_id, data in enumerate(train_reader()): + train_images = np.array( + map(lambda x: np.transpose(x[0].reshape(raw_shape), + axes=[1, 2, 0]) if args.data_format == 'NHWC' else x[0], data)).astype("float32") + train_labels = np.array(map(lambda x: x[1], data)).astype( + 'int64') + iter_begin_time = time.time() + _, loss, acc = sess.run([train_op, avg_loss, accuracy], + feed_dict={ + images: train_images, + labels: train_labels, + is_training: True + }) + iters += 1 + print( + "Pass = %d, Iters = %d, Loss = %f, Accuracy = %f, Speed=%.2f imgs/sec" + % (pass_id, iters, loss, acc, + len(data) / (time.time() - iter_begin_time))) + num_samples += len(data) + train_elapsed = time.time() - start_time + # test + pass_test_acc = test() + print("Pass = %d, Train speed = %f imgs/s, Test accuracy = %f\n" % + (pass_id, num_samples / train_elapsed, pass_test_acc)) + + +def print_arguments(): + print('----------- Configuration Arguments -----------') + for arg, value in sorted(vars(args).iteritems()): + print('%s: %s' % (arg, value)) + print('------------------------------------------------') + + +if __name__ == '__main__': + print_arguments() + + ps_hosts = args.ps_hosts.split(",") + worker_hosts = args.worker_hosts.split(",") + + # Create a cluster from the parameter server and worker hosts. + cluster_spec = tf.train.ClusterSpec({ + "ps": ps_hosts, + "worker": worker_hosts + }) + + # Create and start a server for the local task. + server = tf.train.Server( + cluster_spec, job_name=args.job_name, task_index=args.task_index) + + if args.job_name == "ps": + print("start pserver") + server.join() + elif args.job_name == "worker": + print("start worker") + run_benchmark(cluster_spec, server) -- GitLab From c02f773a537b1693a3fad941980d052aa36cc766 Mon Sep 17 00:00:00 2001 From: Yancey Date: Tue, 27 Feb 2018 14:47:47 +0800 Subject: [PATCH 205/217] Fix dist demo var type error (#8600) * Fix dist demo error * revert trainer_id --- python/paddle/fluid/framework.py | 1 + .../fluid/tests/book_distribute/notest_dist_fit_a_line.py | 3 +-- .../book_distribute/notest_dist_image_classification.py | 1 + .../book_distribute/notest_dist_label_semantic_roles.py | 1 + .../fluid/tests/book_distribute/notest_dist_word2vec.py | 4 +++- .../tests/book_distribute/notest_machine_translation.py | 1 + .../book_distribute/notest_recognize_digits_conv_dist.py | 6 +----- .../tests/book_distribute/notest_recommender_system_dist.py | 1 + .../notest_understand_sentiment_conv_dist.py | 1 + .../notest_understand_sentiment_dynamic_lstm.py | 1 + 10 files changed, 12 insertions(+), 8 deletions(-) diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 64441e8fa4..2e23ddc9be 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -784,6 +784,7 @@ class Block(object): elif type(v) == Variable: var = Variable( self, + type=v.type, name=new_name, error_clip=error_clip, stop_gradient=stop_gradient) diff --git a/python/paddle/fluid/tests/book_distribute/notest_dist_fit_a_line.py b/python/paddle/fluid/tests/book_distribute/notest_dist_fit_a_line.py index 01c1fa24fd..cff82a8948 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_dist_fit_a_line.py +++ b/python/paddle/fluid/tests/book_distribute/notest_dist_fit_a_line.py @@ -48,6 +48,7 @@ current_endpoint = os.getenv("SERVER_ENDPOINT") # run as trainer or parameter server training_role = os.getenv("TRAINING_ROLE", "TRAINER") # get the training role: trainer/pserver + t.transpile(optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2) if training_role == "PSERVER": @@ -65,8 +66,6 @@ else: PASS_NUM = 100 for pass_id in range(PASS_NUM): - fluid.io.save_persistables(exe, "./fit_a_line.model/") - fluid.io.load_persistables(exe, "./fit_a_line.model/") for data in train_reader(): avg_loss_value = exe.run(trainer_prog, feed=feeder.feed(data), diff --git a/python/paddle/fluid/tests/book_distribute/notest_dist_image_classification.py b/python/paddle/fluid/tests/book_distribute/notest_dist_image_classification.py index e9101fd763..46630db43e 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_dist_image_classification.py +++ b/python/paddle/fluid/tests/book_distribute/notest_dist_image_classification.py @@ -138,6 +138,7 @@ current_endpoint = os.getenv("SERVER_ENDPOINT") # run as trainer or parameter server training_role = os.getenv("TRAINING_ROLE", "TRAINER") # get the training role: trainer/pserver + t.transpile( optimize_ops, params_grads, pservers=pserver_endpoints, trainers=TRAINERS) diff --git a/python/paddle/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py b/python/paddle/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py index 2d0c54fa7c..3ec85517ab 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py +++ b/python/paddle/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py @@ -191,6 +191,7 @@ def main(): # run as trainer or parameter server training_role = os.getenv( "TRAINING_ROLE", "TRAINER") # get the training role: trainer/pserver + t.transpile( optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2) diff --git a/python/paddle/fluid/tests/book_distribute/notest_dist_word2vec.py b/python/paddle/fluid/tests/book_distribute/notest_dist_word2vec.py index 6304927364..8164ba5428 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_dist_word2vec.py +++ b/python/paddle/fluid/tests/book_distribute/notest_dist_word2vec.py @@ -82,6 +82,7 @@ current_endpoint = os.getenv("SERVER_ENDPOINT") # run as trainer or parameter server training_role = os.getenv("TRAINING_ROLE", "TRAINER") # get the training role: trainer/pserver + t.transpile( optimize_ops, params_grads, pservers=pserver_endpoints, trainers=TRAINERS) if training_role == "PSERVER": @@ -97,9 +98,10 @@ elif training_role == "TRAINER": feed_list=[first_word, second_word, third_word, forth_word, next_word], place=place) exe.run(fluid.default_startup_program()) + trainer_prog = t.get_trainer_program() for pass_id in range(PASS_NUM): for data in train_reader(): - avg_cost_np = exe.run(t.get_trainer_program(), + avg_cost_np = exe.run(trainer_prog, feed=feeder.feed(data), fetch_list=[avg_cost]) print("avg_cost_np", avg_cost_np) diff --git a/python/paddle/fluid/tests/book_distribute/notest_machine_translation.py b/python/paddle/fluid/tests/book_distribute/notest_machine_translation.py index f5ef08430e..fee8db2497 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_machine_translation.py +++ b/python/paddle/fluid/tests/book_distribute/notest_machine_translation.py @@ -115,6 +115,7 @@ def main(): # run as trainer or parameter server training_role = os.getenv( "TRAINING_ROLE", "TRAINER") # get the training role: trainer/pserver + t.transpile( optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2) diff --git a/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py b/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py index eae1fe62af..b6ad6a992d 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py +++ b/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py @@ -64,11 +64,7 @@ if not current_endpoint: t = fluid.DistributeTranspiler() t.transpile( - optimize_ops, - params_grads, - 0, - pservers=pserver_endpoints, - trainers=trainers) + optimize_ops, params_grads, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": pserver_prog = t.get_pserver_program(current_endpoint) diff --git a/python/paddle/fluid/tests/book_distribute/notest_recommender_system_dist.py b/python/paddle/fluid/tests/book_distribute/notest_recommender_system_dist.py index 4329c821c2..741ec33639 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_recommender_system_dist.py +++ b/python/paddle/fluid/tests/book_distribute/notest_recommender_system_dist.py @@ -171,6 +171,7 @@ def main(): current_endpoint = os.getenv("SERVER_ENDPOINT") # run as trainer or parameter server training_role = os.getenv("TRAINING_ROLE", "TRAINER") + t.transpile( optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2) diff --git a/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py b/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py index ee0d8597b7..0467184bbf 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py +++ b/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py @@ -90,6 +90,7 @@ def main(): # run as trainer or parameter server training_role = os.getenv( "TRAINING_ROLE", "TRAINER") # get the training role: trainer/pserver + t.transpile( optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2) diff --git a/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py b/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py index fa792cbf92..1e13385852 100644 --- a/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py +++ b/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py @@ -102,6 +102,7 @@ def main(): # run as trainer or parameter server training_role = os.getenv( "TRAINING_ROLE", "TRAINER") # get the training role: trainer/pserver + t.transpile( optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2) -- GitLab From 6dd3a61b2fd385f8241ec6ce7513d16ec9a0ff27 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Tue, 27 Feb 2018 15:58:52 +0800 Subject: [PATCH 206/217] combine batch_size_like.cc into batch_size_like.h --- paddle/fluid/operators/CMakeLists.txt | 8 +-- paddle/fluid/operators/batch_size_like.cc | 64 ----------------------- paddle/fluid/operators/batch_size_like.h | 42 ++++++++++++++- 3 files changed, 41 insertions(+), 73 deletions(-) delete mode 100644 paddle/fluid/operators/batch_size_like.cc diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 0bda0e05e0..4da46e94c5 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -155,6 +155,7 @@ op_library(print_op DEPS lod_tensor) op_library(adagrad_op DEPS selected_rows_functor) op_library(maxout_op DEPS maxouting) op_library(unpool_op DEPS unpooling) +op_library(pool_op DEPS pooling) op_library(pool_with_index_op DEPS pooling) op_library(lod_rank_table_op DEPS lod_rank_table) op_library(lod_tensor_to_array_op DEPS lod_rank_table_op) @@ -171,20 +172,13 @@ op_library(cos_sim_op DEPS cos_sim_functor) op_library(parallel_do_op DEPS executor) op_library(create_reader_op DEPS reader) -# Regist multiple Kernel to pybind if (WITH_GPU) op_library(conv_op DEPS vol2col depthwise_conv) else() op_library(conv_op DEPS vol2col) endif() -op_library(pool_op DEPS pooling) op_library(conv_transpose_op DEPS vol2col) -cc_library(batch_size_like SRCS batch_size_like.cc DEPS op_registry) -op_library(fill_constant_batch_size_like_op DEPS batch_size_like) -op_library(uniform_random_batch_size_like_op DEPS batch_size_like uniform_random_op) -op_library(gaussian_random_batch_size_like_op DEPS batch_size_like gaussian_random_op) - # FIXME(typhoonzero): save/load depends lodtensor serialization functions op_library(save_op DEPS lod_tensor) op_library(load_op DEPS lod_tensor) diff --git a/paddle/fluid/operators/batch_size_like.cc b/paddle/fluid/operators/batch_size_like.cc deleted file mode 100644 index 4d4a6d4c47..0000000000 --- a/paddle/fluid/operators/batch_size_like.cc +++ /dev/null @@ -1,64 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/operators/batch_size_like.h" - -namespace paddle { -namespace operators { - -void BatchSizeLikeOp::InferShape(framework::InferShapeContext *ctx) const { - PADDLE_ENFORCE(ctx->HasInput("Input"), - "Input(Input) of %s should not be null.", Type()); - PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of %s should not be null.", - Type()); - - auto &shape = ctx->Attrs().Get>("shape"); - PADDLE_ENFORCE_GT(shape.size(), 0); - std::vector shape_int64(shape.size(), 0); - std::transform(shape.begin(), shape.end(), shape_int64.begin(), - [](int a) { return static_cast(a); }); - auto output_dim = framework::make_ddim(shape_int64); - - int input_dim_idx = ctx->Attrs().Get("input_dim_idx"); - PADDLE_ENFORCE_GE(input_dim_idx, 0); - PADDLE_ENFORCE_GT(ctx->GetInputDim("Input").size(), input_dim_idx); - - int output_dim_idx = ctx->Attrs().Get("output_dim_idx"); - PADDLE_ENFORCE_GE(output_dim_idx, 0); - PADDLE_ENFORCE_GT(static_cast(shape.size()), output_dim_idx); - - output_dim[output_dim_idx] = ctx->GetInputDim("Input")[input_dim_idx]; - ctx->SetOutputDim("Out", output_dim); -} - -BatchSizeLikeOpMaker::BatchSizeLikeOpMaker(OpProto *proto, - OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("Input", - "(Tensor) Tensor " - "whose input_dim_idx'th dimension specifies the batch_size"); - AddOutput("Out", - "(Tensor) Tensor of specified shape will be filled " - "with the specified value"); - AddAttr>("shape", "(vector) The shape of the output"); - AddAttr("input_dim_idx", - "(int, default 0) The index of input's batch size dimension") - .SetDefault(0); - AddAttr("output_dim_idx", - "(int, default 0) The index of output's batch size dimension") - .SetDefault(0); -} - -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/batch_size_like.h b/paddle/fluid/operators/batch_size_like.h index 87e8f053a7..0bdf27e620 100644 --- a/paddle/fluid/operators/batch_size_like.h +++ b/paddle/fluid/operators/batch_size_like.h @@ -24,12 +24,50 @@ class BatchSizeLikeOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - void InferShape(framework::InferShapeContext *ctx) const override; + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Input"), + "Input(Input) of %s should not be null.", Type()); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of %s should not be null.", Type()); + + auto &shape = ctx->Attrs().Get>("shape"); + PADDLE_ENFORCE_GT(shape.size(), 0); + std::vector shape_int64(shape.size(), 0); + std::transform(shape.begin(), shape.end(), shape_int64.begin(), + [](int a) { return static_cast(a); }); + auto output_dim = framework::make_ddim(shape_int64); + + int input_dim_idx = ctx->Attrs().Get("input_dim_idx"); + PADDLE_ENFORCE_GE(input_dim_idx, 0); + PADDLE_ENFORCE_GT(ctx->GetInputDim("Input").size(), input_dim_idx); + + int output_dim_idx = ctx->Attrs().Get("output_dim_idx"); + PADDLE_ENFORCE_GE(output_dim_idx, 0); + PADDLE_ENFORCE_GT(static_cast(shape.size()), output_dim_idx); + + output_dim[output_dim_idx] = ctx->GetInputDim("Input")[input_dim_idx]; + ctx->SetOutputDim("Out", output_dim); + } }; class BatchSizeLikeOpMaker : public framework::OpProtoAndCheckerMaker { public: - BatchSizeLikeOpMaker(OpProto *proto, OpAttrChecker *op_checker); + BatchSizeLikeOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("Input", + "(Tensor) Tensor " + "whose input_dim_idx'th dimension specifies the batch_size"); + AddOutput("Out", + "(Tensor) Tensor of specified shape will be filled " + "with the specified value"); + AddAttr>("shape", "(vector) The shape of the output"); + AddAttr("input_dim_idx", + "(int, default 0) The index of input's batch size dimension") + .SetDefault(0); + AddAttr("output_dim_idx", + "(int, default 0) The index of output's batch size dimension") + .SetDefault(0); + } }; } // namespace operators -- GitLab From 4d4322a6982fd5d5df03ad56c385514c381c337e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=AD=A6=E6=AF=85?= Date: Tue, 27 Feb 2018 17:18:14 +0800 Subject: [PATCH 207/217] merge fluid dist tests (#8573) * merge fluid dist tests * update cmake --- python/paddle/fluid/tests/CMakeLists.txt | 1 - .../fluid/tests/book/test_fit_a_line.py | 77 ++++-- .../tests/book/test_image_classification.py | 103 +++++--- .../tests/book/test_label_semantic_roles.py | 120 +++++---- .../tests/book/test_machine_translation.py | 77 ++++-- .../fluid/tests/book/test_recognize_digits.py | 130 +++++----- .../tests/book/test_recommender_system.py | 99 ++++--- .../tests/book/test_understand_sentiment.py | 74 ++++-- .../paddle/fluid/tests/book/test_word2vec.py | 68 +++-- .../tests/book_distribute/CMakeLists.txt | 5 - .../book_distribute/notest_dist_fit_a_line.py | 76 ------ .../notest_dist_image_classification.py | 171 ------------- .../notest_dist_label_semantic_roles.py | 241 ------------------ .../book_distribute/notest_dist_word2vec.py | 113 -------- .../notest_machine_translation.py | 158 ------------ .../notest_recognize_digits_conv_dist.py | 96 ------- .../notest_recognize_digits_mlp_dist.py | 89 ------- .../notest_recommender_system_dist.py | 217 ---------------- .../notest_understand_sentiment_conv_dist.py | 126 --------- ...otest_understand_sentiment_dynamic_lstm.py | 136 ---------- 20 files changed, 495 insertions(+), 1682 deletions(-) delete mode 100644 python/paddle/fluid/tests/book_distribute/CMakeLists.txt delete mode 100644 python/paddle/fluid/tests/book_distribute/notest_dist_fit_a_line.py delete mode 100644 python/paddle/fluid/tests/book_distribute/notest_dist_image_classification.py delete mode 100644 python/paddle/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py delete mode 100644 python/paddle/fluid/tests/book_distribute/notest_dist_word2vec.py delete mode 100644 python/paddle/fluid/tests/book_distribute/notest_machine_translation.py delete mode 100644 python/paddle/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py delete mode 100644 python/paddle/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py delete mode 100644 python/paddle/fluid/tests/book_distribute/notest_recommender_system_dist.py delete mode 100644 python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py delete mode 100644 python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py diff --git a/python/paddle/fluid/tests/CMakeLists.txt b/python/paddle/fluid/tests/CMakeLists.txt index 5ff7b1b027..d24417bbac 100644 --- a/python/paddle/fluid/tests/CMakeLists.txt +++ b/python/paddle/fluid/tests/CMakeLists.txt @@ -7,5 +7,4 @@ endforeach() add_subdirectory(unittests) add_subdirectory(book) -add_subdirectory(book_distribute) add_subdirectory(book_memory_optimization) diff --git a/python/paddle/fluid/tests/book/test_fit_a_line.py b/python/paddle/fluid/tests/book/test_fit_a_line.py index 8a45533e3b..93ef66851b 100644 --- a/python/paddle/fluid/tests/book/test_fit_a_line.py +++ b/python/paddle/fluid/tests/book/test_fit_a_line.py @@ -19,9 +19,10 @@ import numpy import unittest import math import sys +import os -def train(use_cuda, save_dirname): +def train(use_cuda, save_dirname, is_local): x = fluid.layers.data(name='x', shape=[13], dtype='float32') y_predict = fluid.layers.fc(input=x, size=1, act=None) @@ -32,7 +33,7 @@ def train(use_cuda, save_dirname): avg_cost = fluid.layers.mean(cost) sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) - sgd_optimizer.minimize(avg_cost) + optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost) BATCH_SIZE = 20 @@ -42,27 +43,57 @@ def train(use_cuda, save_dirname): batch_size=BATCH_SIZE) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - - PASS_NUM = 100 - for pass_id in range(PASS_NUM): - for data in train_reader(): - avg_loss_value, = exe.run(fluid.default_main_program(), - feed=feeder.feed(data), - fetch_list=[avg_cost]) - print(avg_loss_value) - if avg_loss_value[0] < 10.0: - if save_dirname is not None: - fluid.io.save_inference_model(save_dirname, ['x'], - [y_predict], exe) - return - if math.isnan(float(avg_loss_value)): - sys.exit("got NaN loss, training failed.") - raise AssertionError("Fit a line cost is too large, {0:2.2}".format( - avg_loss_value[0])) + def train_loop(main_program): + feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) + exe.run(fluid.default_startup_program()) + + PASS_NUM = 100 + for pass_id in range(PASS_NUM): + for data in train_reader(): + avg_loss_value, = exe.run(main_program, + feed=feeder.feed(data), + fetch_list=[avg_cost]) + print(avg_loss_value) + if avg_loss_value[0] < 10.0: + if save_dirname is not None: + fluid.io.save_inference_model(save_dirname, ['x'], + [y_predict], exe) + return + if math.isnan(float(avg_loss_value)): + sys.exit("got NaN loss, training failed.") + raise AssertionError("Fit a line cost is too large, {0:2.2}".format( + avg_loss_value[0])) + + if is_local: + train_loop(fluid.default_main_program()) + else: + port = os.getenv("PADDLE_INIT_PORT", "6174") + pserver_ips = os.getenv("PADDLE_INIT_PSERVERS") # ip,ip... + eplist = [] + for ip in pserver_ips.split(","): + eplist.append(':'.join([ip, port])) + pserver_endpoints = ",".join(eplist) # ip:port,ip:port... + trainers = int(os.getenv("TRAINERS")) + current_endpoint = os.getenv("POD_IP") + ":" + port + trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) + training_role = os.getenv("TRAINING_ROLE", "TRAINER") + t = fluid.DistributeTranspiler() + t.transpile( + optimize_ops, + params_grads, + trainer_id, + pservers=pserver_endpoints, + trainers=trainers) + if training_role == "PSERVER": + pserver_prog = t.get_pserver_program(current_endpoint) + pserver_startup = t.get_startup_program(current_endpoint, + pserver_prog) + exe.run(pserver_startup) + exe.run(pserver_prog) + elif training_role == "TRAINER": + train_loop(t.get_trainer_program()) def infer(use_cuda, save_dirname=None): @@ -94,14 +125,14 @@ def infer(use_cuda, save_dirname=None): print("infer results: ", results[0]) -def main(use_cuda): +def main(use_cuda, is_local=True): if use_cuda and not fluid.core.is_compiled_with_cuda(): return # Directory for saving the trained model save_dirname = "fit_a_line.inference.model" - train(use_cuda, save_dirname) + train(use_cuda, save_dirname, is_local) infer(use_cuda, save_dirname) diff --git a/python/paddle/fluid/tests/book/test_image_classification.py b/python/paddle/fluid/tests/book/test_image_classification.py index 60c66bc22c..613f4a7bf1 100644 --- a/python/paddle/fluid/tests/book/test_image_classification.py +++ b/python/paddle/fluid/tests/book/test_image_classification.py @@ -21,6 +21,7 @@ import math import sys import numpy import unittest +import os def resnet_cifar10(input, depth=32): @@ -92,7 +93,7 @@ def vgg16_bn_drop(input): return fc2 -def train(net_type, use_cuda, save_dirname): +def train(net_type, use_cuda, save_dirname, is_local): classdim = 10 data_shape = [3, 32, 32] @@ -117,7 +118,7 @@ def train(net_type, use_cuda, save_dirname): test_program = fluid.default_main_program().clone() optimizer = fluid.optimizer.Adam(learning_rate=0.001) - optimizer.minimize(avg_cost) + optimize_ops, params_grads = optimizer.minimize(avg_cost) BATCH_SIZE = 128 PASS_NUM = 1 @@ -133,38 +134,68 @@ def train(net_type, use_cuda, save_dirname): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) feeder = fluid.DataFeeder(place=place, feed_list=[images, label]) - exe.run(fluid.default_startup_program()) - - loss = 0.0 - for pass_id in range(PASS_NUM): - for batch_id, data in enumerate(train_reader()): - exe.run(feed=feeder.feed(data)) - - if (batch_id % 10) == 0: - acc_list = [] - avg_loss_list = [] - for tid, test_data in enumerate(test_reader()): - loss_t, acc_t = exe.run(program=test_program, - feed=feeder.feed(test_data), - fetch_list=[avg_cost, acc]) - if math.isnan(float(loss_t)): - sys.exit("got NaN loss, training failed.") - acc_list.append(float(acc_t)) - avg_loss_list.append(float(loss_t)) - break # Use 1 segment for speeding up CI - - acc_value = numpy.array(acc_list).mean() - avg_loss_value = numpy.array(avg_loss_list).mean() - - print( - 'PassID {0:1}, BatchID {1:04}, Test Loss {2:2.2}, Acc {3:2.2}'. - format(pass_id, batch_id + 1, - float(avg_loss_value), float(acc_value))) - - if acc_value > 0.01: # Low threshold for speeding up CI - fluid.io.save_inference_model(save_dirname, ["pixel"], - [predict], exe) - return + + def train_loop(main_program): + exe.run(fluid.default_startup_program()) + loss = 0.0 + for pass_id in range(PASS_NUM): + for batch_id, data in enumerate(train_reader()): + exe.run(main_program, feed=feeder.feed(data)) + + if (batch_id % 10) == 0: + acc_list = [] + avg_loss_list = [] + for tid, test_data in enumerate(test_reader()): + loss_t, acc_t = exe.run(program=test_program, + feed=feeder.feed(test_data), + fetch_list=[avg_cost, acc]) + if math.isnan(float(loss_t)): + sys.exit("got NaN loss, training failed.") + acc_list.append(float(acc_t)) + avg_loss_list.append(float(loss_t)) + break # Use 1 segment for speeding up CI + + acc_value = numpy.array(acc_list).mean() + avg_loss_value = numpy.array(avg_loss_list).mean() + + print( + 'PassID {0:1}, BatchID {1:04}, Test Loss {2:2.2}, Acc {3:2.2}'. + format(pass_id, batch_id + 1, + float(avg_loss_value), float(acc_value))) + + if acc_value > 0.01: # Low threshold for speeding up CI + fluid.io.save_inference_model(save_dirname, ["pixel"], + [predict], exe) + return + + if is_local: + train_loop(fluid.default_main_program()) + else: + port = os.getenv("PADDLE_INIT_PORT", "6174") + pserver_ips = os.getenv("PADDLE_INIT_PSERVERS") # ip,ip... + eplist = [] + for ip in pserver_ips.split(","): + eplist.append(':'.join([ip, port])) + pserver_endpoints = ",".join(eplist) # ip:port,ip:port... + trainers = int(os.getenv("TRAINERS")) + current_endpoint = os.getenv("POD_IP") + ":" + port + trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) + training_role = os.getenv("TRAINING_ROLE", "TRAINER") + t = fluid.DistributeTranspiler() + t.transpile( + optimize_ops, + params_grads, + trainer_id, + pservers=pserver_endpoints, + trainers=trainers) + if training_role == "PSERVER": + pserver_prog = t.get_pserver_program(current_endpoint) + pserver_startup = t.get_startup_program(current_endpoint, + pserver_prog) + exe.run(pserver_startup) + exe.run(pserver_prog) + elif training_role == "TRAINER": + train_loop(t.get_trainer_program()) def infer(use_cuda, save_dirname=None): @@ -196,14 +227,14 @@ def infer(use_cuda, save_dirname=None): print("infer results: ", results[0]) -def main(net_type, use_cuda): +def main(net_type, use_cuda, is_local=True): if use_cuda and not fluid.core.is_compiled_with_cuda(): return # Directory for saving the trained model save_dirname = "image_classification_" + net_type + ".inference.model" - train(net_type, use_cuda, save_dirname) + train(net_type, use_cuda, save_dirname, is_local) infer(use_cuda, save_dirname) diff --git a/python/paddle/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/fluid/tests/book/test_label_semantic_roles.py index cbb4d4b040..13efe4efb1 100644 --- a/python/paddle/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/fluid/tests/book/test_label_semantic_roles.py @@ -22,6 +22,7 @@ from paddle.fluid.initializer import init_on_cpu import contextlib import time import unittest +import os word_dict, verb_dict, label_dict = conll05.get_dict() word_dict_len = len(word_dict) @@ -138,7 +139,7 @@ def create_random_lodtensor(lod, place, low, high): return res -def train(use_cuda, save_dirname=None): +def train(use_cuda, save_dirname=None, is_local=True): # define network topology word = fluid.layers.data( name='word_data', shape=[1], dtype='int64', lod_level=1) @@ -178,7 +179,7 @@ def train(use_cuda, save_dirname=None): decay_rate=0.5, staircase=True), global_step=global_step) - sgd_optimizer.minimize(avg_cost) + optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost) # TODO(qiao) # add dependency track and move this config before optimizer @@ -204,45 +205,78 @@ def train(use_cuda, save_dirname=None): place=place) exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - - embedding_param = fluid.global_scope().find_var(embedding_name).get_tensor() - embedding_param.set( - load_parameter(conll05.get_embedding(), word_dict_len, word_dim), place) - - start_time = time.time() - batch_id = 0 - for pass_id in xrange(PASS_NUM): - chunk_evaluator.reset(exe) - for data in train_data(): - cost, precision, recall, f1_score = exe.run( - fluid.default_main_program(), - feed=feeder.feed(data), - fetch_list=[avg_cost] + chunk_evaluator.metrics) - pass_precision, pass_recall, pass_f1_score = chunk_evaluator.eval( - exe) - - if batch_id % 10 == 0: - print("avg_cost:" + str(cost) + " precision:" + str( - precision) + " recall:" + str(recall) + " f1_score:" + str( - f1_score) + " pass_precision:" + str( - pass_precision) + " pass_recall:" + str(pass_recall) - + " pass_f1_score:" + str(pass_f1_score)) - if batch_id != 0: - print("second per batch: " + str((time.time() - start_time) - / batch_id)) - # Set the threshold low to speed up the CI test - if float(pass_precision) > 0.05: - if save_dirname is not None: - # TODO(liuyiqun): Change the target to crf_decode - fluid.io.save_inference_model(save_dirname, [ - 'word_data', 'verb_data', 'ctx_n2_data', - 'ctx_n1_data', 'ctx_0_data', 'ctx_p1_data', - 'ctx_p2_data', 'mark_data' - ], [feature_out], exe) - return - - batch_id = batch_id + 1 + def train_loop(main_program): + exe.run(fluid.default_startup_program()) + + embedding_param = fluid.global_scope().find_var( + embedding_name).get_tensor() + embedding_param.set( + load_parameter(conll05.get_embedding(), word_dict_len, word_dim), + place) + + start_time = time.time() + batch_id = 0 + for pass_id in xrange(PASS_NUM): + chunk_evaluator.reset(exe) + for data in train_data(): + cost, precision, recall, f1_score = exe.run( + main_program, + feed=feeder.feed(data), + fetch_list=[avg_cost] + chunk_evaluator.metrics) + pass_precision, pass_recall, pass_f1_score = chunk_evaluator.eval( + exe) + + if batch_id % 10 == 0: + print("avg_cost:" + str(cost) + " precision:" + str( + precision) + " recall:" + str(recall) + " f1_score:" + + str(f1_score) + " pass_precision:" + str( + pass_precision) + " pass_recall:" + str( + pass_recall) + " pass_f1_score:" + str( + pass_f1_score)) + if batch_id != 0: + print("second per batch: " + str((time.time( + ) - start_time) / batch_id)) + # Set the threshold low to speed up the CI test + if float(pass_precision) > 0.05: + if save_dirname is not None: + # TODO(liuyiqun): Change the target to crf_decode + fluid.io.save_inference_model(save_dirname, [ + 'word_data', 'verb_data', 'ctx_n2_data', + 'ctx_n1_data', 'ctx_0_data', 'ctx_p1_data', + 'ctx_p2_data', 'mark_data' + ], [feature_out], exe) + return + + batch_id = batch_id + 1 + + if is_local: + train_loop(fluid.default_main_program()) + else: + port = os.getenv("PADDLE_INIT_PORT", "6174") + pserver_ips = os.getenv("PADDLE_INIT_PSERVERS") # ip,ip... + eplist = [] + for ip in pserver_ips.split(","): + eplist.append(':'.join([ip, port])) + pserver_endpoints = ",".join(eplist) # ip:port,ip:port... + trainers = int(os.getenv("TRAINERS")) + current_endpoint = os.getenv("POD_IP") + ":" + port + trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) + training_role = os.getenv("TRAINING_ROLE", "TRAINER") + t = fluid.DistributeTranspiler() + t.transpile( + optimize_ops, + params_grads, + trainer_id, + pservers=pserver_endpoints, + trainers=trainers) + if training_role == "PSERVER": + pserver_prog = t.get_pserver_program(current_endpoint) + pserver_startup = t.get_startup_program(current_endpoint, + pserver_prog) + exe.run(pserver_startup) + exe.run(pserver_prog) + elif training_role == "TRAINER": + train_loop(t.get_trainer_program()) def infer(use_cuda, save_dirname=None): @@ -308,14 +342,14 @@ def infer(use_cuda, save_dirname=None): print("Inference Shape: ", np_data.shape) -def main(use_cuda): +def main(use_cuda, is_local=True): if use_cuda and not fluid.core.is_compiled_with_cuda(): return # Directory for saving the trained model save_dirname = "label_semantic_roles.inference.model" - train(use_cuda, save_dirname) + train(use_cuda, save_dirname, is_local) infer(use_cuda, save_dirname) diff --git a/python/paddle/fluid/tests/book/test_machine_translation.py b/python/paddle/fluid/tests/book/test_machine_translation.py index bd768d5f08..caa9596a10 100644 --- a/python/paddle/fluid/tests/book/test_machine_translation.py +++ b/python/paddle/fluid/tests/book/test_machine_translation.py @@ -20,6 +20,7 @@ import paddle.fluid.framework as framework import paddle.fluid.layers as pd from paddle.fluid.executor import Executor import unittest +import os dict_size = 30000 source_dict_dim = target_dict_dim = dict_size @@ -168,7 +169,7 @@ def to_lodtensor(data, place): return res -def train_main(use_cuda, is_sparse): +def train_main(use_cuda, is_sparse, is_local=True): if use_cuda and not fluid.core.is_compiled_with_cuda(): return place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() @@ -181,7 +182,7 @@ def train_main(use_cuda, is_sparse): avg_cost = pd.mean(cost) optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4) - optimizer.minimize(avg_cost) + optimize_ops, params_grads = optimizer.minimize(avg_cost) train_data = paddle.batch( paddle.reader.shuffle( @@ -190,27 +191,57 @@ def train_main(use_cuda, is_sparse): exe = Executor(place) - exe.run(framework.default_startup_program()) - - batch_id = 0 - for pass_id in xrange(1): - for data in train_data(): - word_data = to_lodtensor(map(lambda x: x[0], data), place) - trg_word = to_lodtensor(map(lambda x: x[1], data), place) - trg_word_next = to_lodtensor(map(lambda x: x[2], data), place) - outs = exe.run(framework.default_main_program(), - feed={ - 'src_word_id': word_data, - 'target_language_word': trg_word, - 'target_language_next_word': trg_word_next - }, - fetch_list=[avg_cost]) - avg_cost_val = np.array(outs[0]) - print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) + - " avg_cost=" + str(avg_cost_val)) - if batch_id > 3: - break - batch_id += 1 + def train_loop(main_program): + exe.run(framework.default_startup_program()) + + batch_id = 0 + for pass_id in xrange(1): + for data in train_data(): + word_data = to_lodtensor(map(lambda x: x[0], data), place) + trg_word = to_lodtensor(map(lambda x: x[1], data), place) + trg_word_next = to_lodtensor(map(lambda x: x[2], data), place) + outs = exe.run(main_program, + feed={ + 'src_word_id': word_data, + 'target_language_word': trg_word, + 'target_language_next_word': trg_word_next + }, + fetch_list=[avg_cost]) + avg_cost_val = np.array(outs[0]) + print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) + + " avg_cost=" + str(avg_cost_val)) + if batch_id > 3: + break + batch_id += 1 + + if is_local: + train_loop(framework.default_main_program()) + else: + port = os.getenv("PADDLE_INIT_PORT", "6174") + pserver_ips = os.getenv("PADDLE_INIT_PSERVERS") # ip,ip... + eplist = [] + for ip in pserver_ips.split(","): + eplist.append(':'.join([ip, port])) + pserver_endpoints = ",".join(eplist) # ip:port,ip:port... + trainers = int(os.getenv("TRAINERS")) + current_endpoint = os.getenv("POD_IP") + ":" + port + trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) + training_role = os.getenv("TRAINING_ROLE", "TRAINER") + t = fluid.DistributeTranspiler() + t.transpile( + optimize_ops, + params_grads, + trainer_id, + pservers=pserver_endpoints, + trainers=trainers) + if training_role == "PSERVER": + pserver_prog = t.get_pserver_program(current_endpoint) + pserver_startup = t.get_startup_program(current_endpoint, + pserver_prog) + exe.run(pserver_startup) + exe.run(pserver_prog) + elif training_role == "TRAINER": + train_loop(t.get_trainer_program()) def decode_main(use_cuda, is_sparse): diff --git a/python/paddle/fluid/tests/book/test_recognize_digits.py b/python/paddle/fluid/tests/book/test_recognize_digits.py index 285e914203..b57fe08e1a 100644 --- a/python/paddle/fluid/tests/book/test_recognize_digits.py +++ b/python/paddle/fluid/tests/book/test_recognize_digits.py @@ -20,27 +20,7 @@ import numpy import unittest import math import sys - - -def parse_arg(): - parser = argparse.ArgumentParser() - parser.add_argument( - "nn_type", - help="The neural network type, in ['mlp', 'conv']", - type=str, - choices=['mlp', 'conv']) - parser.add_argument( - "--parallel", - help='Run in parallel or not', - default=False, - action="store_true") - parser.add_argument( - "--use_cuda", - help="Run the program by using CUDA", - default=False, - action="store_true") - return parser.parse_args() - +import os BATCH_SIZE = 64 @@ -83,7 +63,8 @@ def train(nn_type, parallel, save_dirname=None, model_filename=None, - params_filename=None): + params_filename=None, + is_local=True): if use_cuda and not fluid.core.is_compiled_with_cuda(): return img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32') @@ -114,12 +95,11 @@ def train(nn_type, test_program = fluid.default_main_program().clone() optimizer = fluid.optimizer.Adam(learning_rate=0.001) - optimizer.minimize(avg_loss) + optimize_ops, params_grads = optimizer.minimize(avg_loss) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) train_reader = paddle.batch( paddle.reader.shuffle( @@ -129,39 +109,74 @@ def train(nn_type, paddle.dataset.mnist.test(), batch_size=BATCH_SIZE) feeder = fluid.DataFeeder(feed_list=[img, label], place=place) - PASS_NUM = 100 - for pass_id in range(PASS_NUM): - for batch_id, data in enumerate(train_reader()): - # train a mini-batch, fetch nothing - exe.run(feed=feeder.feed(data)) - if (batch_id + 1) % 10 == 0: - acc_set = [] - avg_loss_set = [] - for test_data in test_reader(): - acc_np, avg_loss_np = exe.run(program=test_program, - feed=feeder.feed(test_data), - fetch_list=[acc, avg_loss]) - acc_set.append(float(acc_np)) - avg_loss_set.append(float(avg_loss_np)) - # get test acc and loss - acc_val = numpy.array(acc_set).mean() - avg_loss_val = numpy.array(avg_loss_set).mean() - if float(acc_val) > 0.2: # Smaller value to increase CI speed - if save_dirname is not None: - fluid.io.save_inference_model( - save_dirname, ["img"], [prediction], - exe, - model_filename=model_filename, - params_filename=params_filename) - return - else: - print( - 'PassID {0:1}, BatchID {1:04}, Test Loss {2:2.2}, Acc {3:2.2}'. - format(pass_id, batch_id + 1, - float(avg_loss_val), float(acc_val))) - if math.isnan(float(avg_loss_val)): - sys.exit("got NaN loss, training failed.") - raise AssertionError("Loss of recognize digits is too large") + def train_loop(main_program): + exe.run(fluid.default_startup_program()) + + PASS_NUM = 100 + for pass_id in range(PASS_NUM): + for batch_id, data in enumerate(train_reader()): + # train a mini-batch, fetch nothing + exe.run(main_program, feed=feeder.feed(data)) + if (batch_id + 1) % 10 == 0: + acc_set = [] + avg_loss_set = [] + for test_data in test_reader(): + acc_np, avg_loss_np = exe.run( + program=test_program, + feed=feeder.feed(test_data), + fetch_list=[acc, avg_loss]) + acc_set.append(float(acc_np)) + avg_loss_set.append(float(avg_loss_np)) + # get test acc and loss + acc_val = numpy.array(acc_set).mean() + avg_loss_val = numpy.array(avg_loss_set).mean() + if float(acc_val + ) > 0.2: # Smaller value to increase CI speed + if save_dirname is not None: + fluid.io.save_inference_model( + save_dirname, ["img"], [prediction], + exe, + model_filename=model_filename, + params_filename=params_filename) + return + else: + print( + 'PassID {0:1}, BatchID {1:04}, Test Loss {2:2.2}, Acc {3:2.2}'. + format(pass_id, batch_id + 1, + float(avg_loss_val), float(acc_val))) + if math.isnan(float(avg_loss_val)): + sys.exit("got NaN loss, training failed.") + raise AssertionError("Loss of recognize digits is too large") + + if is_local: + train_loop(fluid.default_main_program()) + else: + port = os.getenv("PADDLE_INIT_PORT", "6174") + pserver_ips = os.getenv("PADDLE_INIT_PSERVERS") # ip,ip... + eplist = [] + for ip in pserver_ips.split(","): + eplist.append(':'.join([ip, port])) + pserver_endpoints = ",".join(eplist) # ip:port,ip:port... + pserver_endpoints = os.getenv("PSERVERS") + trainers = int(os.getenv("TRAINERS")) + current_endpoint = os.getenv("POD_IP") + ":" + port + trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) + training_role = os.getenv("TRAINING_ROLE", "TRAINER") + t = fluid.DistributeTranspiler() + t.transpile( + optimize_ops, + params_grads, + trainer_id, + pservers=pserver_endpoints, + trainers=trainers) + if training_role == "PSERVER": + pserver_prog = t.get_pserver_program(current_endpoint) + pserver_startup = t.get_startup_program(current_endpoint, + pserver_prog) + exe.run(pserver_startup) + exe.run(pserver_prog) + elif training_role == "TRAINER": + train_loop(t.get_trainer_program()) def infer(use_cuda, @@ -208,6 +223,7 @@ def main(use_cuda, parallel, nn_type, combine): model_filename = "__model_combined__" params_filename = "__params_combined__" + # call train() with is_local argument to run distributed train train( nn_type=nn_type, use_cuda=use_cuda, diff --git a/python/paddle/fluid/tests/book/test_recommender_system.py b/python/paddle/fluid/tests/book/test_recommender_system.py index 7c58c3e782..5e258a2c51 100644 --- a/python/paddle/fluid/tests/book/test_recommender_system.py +++ b/python/paddle/fluid/tests/book/test_recommender_system.py @@ -14,6 +14,7 @@ import math import sys +import os import numpy as np import paddle.v2 as paddle import paddle.fluid as fluid @@ -152,19 +153,18 @@ def model(): return scale_infer, avg_cost -def train(use_cuda, save_dirname): +def train(use_cuda, save_dirname, is_local=True): scale_infer, avg_cost = model() # test program test_program = fluid.default_main_program().clone() sgd_optimizer = SGDOptimizer(learning_rate=0.2) - opts = sgd_optimizer.minimize(avg_cost) + optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = Executor(place) - exe.run(framework.default_startup_program()) train_reader = paddle.batch( paddle.reader.shuffle( @@ -212,36 +212,69 @@ def train(use_cuda, save_dirname): feed_tensors[key] = tensor return feed_tensors - PASS_NUM = 100 - for pass_id in range(PASS_NUM): - for batch_id, data in enumerate(train_reader()): - # train a mini-batch - outs = exe.run(program=fluid.default_main_program(), - feed=func_feed(feeding, data), - fetch_list=[avg_cost]) - out = np.array(outs[0]) - if (batch_id + 1) % 10 == 0: - avg_cost_set = [] - for test_data in test_reader(): - avg_cost_np = exe.run(program=test_program, - feed=func_feed(feeding, test_data), - fetch_list=[avg_cost]) - avg_cost_set.append(avg_cost_np[0]) - break # test only 1 segment for speeding up CI - - # get test avg_cost - test_avg_cost = np.array(avg_cost_set).mean() - if test_avg_cost < 6.0: - # if avg_cost less than 6.0, we think our code is good. - if save_dirname is not None: - fluid.io.save_inference_model(save_dirname, [ - "user_id", "gender_id", "age_id", "job_id", - "movie_id", "category_id", "movie_title" - ], [scale_infer], exe) - return - - if math.isnan(float(out[0])): - sys.exit("got NaN loss, training failed.") + def train_loop(main_program): + exe.run(framework.default_startup_program()) + + PASS_NUM = 100 + for pass_id in range(PASS_NUM): + for batch_id, data in enumerate(train_reader()): + # train a mini-batch + outs = exe.run(program=main_program, + feed=func_feed(feeding, data), + fetch_list=[avg_cost]) + out = np.array(outs[0]) + if (batch_id + 1) % 10 == 0: + avg_cost_set = [] + for test_data in test_reader(): + avg_cost_np = exe.run( + program=test_program, + feed=func_feed(feeding, test_data), + fetch_list=[avg_cost]) + avg_cost_set.append(avg_cost_np[0]) + break # test only 1 segment for speeding up CI + + # get test avg_cost + test_avg_cost = np.array(avg_cost_set).mean() + if test_avg_cost < 6.0: + # if avg_cost less than 6.0, we think our code is good. + if save_dirname is not None: + fluid.io.save_inference_model(save_dirname, [ + "user_id", "gender_id", "age_id", "job_id", + "movie_id", "category_id", "movie_title" + ], [scale_infer], exe) + return + + if math.isnan(float(out[0])): + sys.exit("got NaN loss, training failed.") + + if is_local: + train_loop(fluid.default_main_program()) + else: + port = os.getenv("PADDLE_INIT_PORT", "6174") + pserver_ips = os.getenv("PADDLE_INIT_PSERVERS") # ip,ip... + eplist = [] + for ip in pserver_ips.split(","): + eplist.append(':'.join([ip, port])) + pserver_endpoints = ",".join(eplist) # ip:port,ip:port... + trainers = int(os.getenv("TRAINERS")) + current_endpoint = os.getenv("POD_IP") + ":" + port + trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) + training_role = os.getenv("TRAINING_ROLE", "TRAINER") + t = fluid.DistributeTranspiler() + t.transpile( + optimize_ops, + params_grads, + trainer_id, + pservers=pserver_endpoints, + trainers=trainers) + if training_role == "PSERVER": + pserver_prog = t.get_pserver_program(current_endpoint) + pserver_startup = t.get_startup_program(current_endpoint, + pserver_prog) + exe.run(pserver_startup) + exe.run(pserver_prog) + elif training_role == "TRAINER": + train_loop(t.get_trainer_program()) def infer(use_cuda, save_dirname=None): diff --git a/python/paddle/fluid/tests/book/test_understand_sentiment.py b/python/paddle/fluid/tests/book/test_understand_sentiment.py index fae74c3557..1b7e84ea05 100644 --- a/python/paddle/fluid/tests/book/test_understand_sentiment.py +++ b/python/paddle/fluid/tests/book/test_understand_sentiment.py @@ -20,6 +20,7 @@ import contextlib import math import numpy as np import sys +import os def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32, @@ -132,7 +133,12 @@ def create_random_lodtensor(lod, place, low, high): return res -def train(word_dict, net_method, use_cuda, parallel=False, save_dirname=None): +def train(word_dict, + net_method, + use_cuda, + parallel=False, + save_dirname=None, + is_local=True): BATCH_SIZE = 128 PASS_NUM = 5 dict_dim = len(word_dict) @@ -164,7 +170,7 @@ def train(word_dict, net_method, use_cuda, parallel=False, save_dirname=None): assert save_dirname is None adagrad = fluid.optimizer.Adagrad(learning_rate=0.002) - adagrad.minimize(cost) + optimize_ops, params_grads = adagrad.minimize(cost) train_data = paddle.batch( paddle.reader.shuffle( @@ -174,23 +180,53 @@ def train(word_dict, net_method, use_cuda, parallel=False, save_dirname=None): exe = fluid.Executor(place) feeder = fluid.DataFeeder(feed_list=[data, label], place=place) - exe.run(fluid.default_startup_program()) - - for pass_id in xrange(PASS_NUM): - for data in train_data(): - cost_val, acc_val = exe.run(fluid.default_main_program(), - feed=feeder.feed(data), - fetch_list=[cost, acc_out]) - print("cost=" + str(cost_val) + " acc=" + str(acc_val)) - if cost_val < 0.4 and acc_val > 0.8: - if save_dirname is not None: - fluid.io.save_inference_model(save_dirname, ["words"], - prediction, exe) - return - if math.isnan(float(cost_val)): - sys.exit("got NaN loss, training failed.") - raise AssertionError("Cost is too large for {0}".format( - net_method.__name__)) + def train_loop(main_program): + exe.run(fluid.default_startup_program()) + + for pass_id in xrange(PASS_NUM): + for data in train_data(): + cost_val, acc_val = exe.run(main_program, + feed=feeder.feed(data), + fetch_list=[cost, acc_out]) + print("cost=" + str(cost_val) + " acc=" + str(acc_val)) + if cost_val < 0.4 and acc_val > 0.8: + if save_dirname is not None: + fluid.io.save_inference_model(save_dirname, ["words"], + prediction, exe) + return + if math.isnan(float(cost_val)): + sys.exit("got NaN loss, training failed.") + raise AssertionError("Cost is too large for {0}".format( + net_method.__name__)) + + if is_local: + train_loop(fluid.default_main_program()) + else: + port = os.getenv("PADDLE_INIT_PORT", "6174") + pserver_ips = os.getenv("PADDLE_INIT_PSERVERS") # ip,ip... + eplist = [] + for ip in pserver_ips.split(","): + eplist.append(':'.join([ip, port])) + pserver_endpoints = ",".join(eplist) # ip:port,ip:port... + trainers = int(os.getenv("TRAINERS")) + current_endpoint = os.getenv("POD_IP") + ":" + port + trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) + training_role = os.getenv("TRAINING_ROLE", "TRAINER") + t = fluid.DistributeTranspiler() + t.transpile( + optimize_ops, + params_grads, + trainer_id, + pservers=pserver_endpoints, + trainers=trainers) + if training_role == "PSERVER": + pserver_prog = t.get_pserver_program(current_endpoint) + pserver_startup = t.get_startup_program(current_endpoint, + pserver_prog) + exe.run(pserver_startup) + exe.run(pserver_prog) + elif training_role == "TRAINER": + train_loop(t.get_trainer_program()) def infer(word_dict, use_cuda, save_dirname=None): diff --git a/python/paddle/fluid/tests/book/test_word2vec.py b/python/paddle/fluid/tests/book/test_word2vec.py index 696abd5499..26b97c3e25 100644 --- a/python/paddle/fluid/tests/book/test_word2vec.py +++ b/python/paddle/fluid/tests/book/test_word2vec.py @@ -30,7 +30,7 @@ def create_random_lodtensor(lod, place, low, high): return res -def train(use_cuda, is_sparse, is_parallel, save_dirname): +def train(use_cuda, is_sparse, is_parallel, save_dirname, is_local=True): PASS_NUM = 100 EMBED_SIZE = 32 HIDDEN_SIZE = 256 @@ -101,7 +101,7 @@ def train(use_cuda, is_sparse, is_parallel, save_dirname): avg_cost = fluid.layers.mean(pd()) sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) - sgd_optimizer.minimize(avg_cost) + optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost) train_reader = paddle.batch( paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE) @@ -112,23 +112,53 @@ def train(use_cuda, is_sparse, is_parallel, save_dirname): feed_list=[first_word, second_word, third_word, forth_word, next_word], place=place) - exe.run(fluid.default_startup_program()) - - for pass_id in range(PASS_NUM): - for data in train_reader(): - avg_cost_np = exe.run(fluid.default_main_program(), - feed=feeder.feed(data), - fetch_list=[avg_cost]) - if avg_cost_np[0] < 5.0: - if save_dirname is not None: - fluid.io.save_inference_model(save_dirname, [ - 'firstw', 'secondw', 'thirdw', 'forthw' - ], [predict_word], exe) - return - if math.isnan(float(avg_cost_np[0])): - sys.exit("got NaN loss, training failed.") - - raise AssertionError("Cost is too large {0:2.2}".format(avg_cost_np[0])) + def train_loop(main_program): + exe.run(fluid.default_startup_program()) + + for pass_id in range(PASS_NUM): + for data in train_reader(): + avg_cost_np = exe.run(main_program, + feed=feeder.feed(data), + fetch_list=[avg_cost]) + if avg_cost_np[0] < 5.0: + if save_dirname is not None: + fluid.io.save_inference_model(save_dirname, [ + 'firstw', 'secondw', 'thirdw', 'forthw' + ], [predict_word], exe) + return + if math.isnan(float(avg_cost_np[0])): + sys.exit("got NaN loss, training failed.") + + raise AssertionError("Cost is too large {0:2.2}".format(avg_cost_np[0])) + + if is_local: + train_loop(fluid.default_main_program()) + else: + port = os.getenv("PADDLE_INIT_PORT", "6174") + pserver_ips = os.getenv("PADDLE_INIT_PSERVERS") # ip,ip... + eplist = [] + for ip in pserver_ips.split(","): + eplist.append(':'.join([ip, port])) + pserver_endpoints = ",".join(eplist) # ip:port,ip:port... + trainers = int(os.getenv("TRAINERS")) + current_endpoint = os.getenv("POD_IP") + ":" + port + trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) + training_role = os.getenv("TRAINING_ROLE", "TRAINER") + t = fluid.DistributeTranspiler() + t.transpile( + optimize_ops, + params_grads, + trainer_id, + pservers=pserver_endpoints, + trainers=trainers) + if training_role == "PSERVER": + pserver_prog = t.get_pserver_program(current_endpoint) + pserver_startup = t.get_startup_program(current_endpoint, + pserver_prog) + exe.run(pserver_startup) + exe.run(pserver_prog) + elif training_role == "TRAINER": + train_loop(t.get_trainer_program()) def infer(use_cuda, save_dirname=None): diff --git a/python/paddle/fluid/tests/book_distribute/CMakeLists.txt b/python/paddle/fluid/tests/book_distribute/CMakeLists.txt deleted file mode 100644 index 4d7664469e..0000000000 --- a/python/paddle/fluid/tests/book_distribute/CMakeLists.txt +++ /dev/null @@ -1,5 +0,0 @@ -file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") -string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") -foreach(src ${TEST_OPS}) - py_test(${src} SRCS ${src}.py) -endforeach() diff --git a/python/paddle/fluid/tests/book_distribute/notest_dist_fit_a_line.py b/python/paddle/fluid/tests/book_distribute/notest_dist_fit_a_line.py deleted file mode 100644 index cff82a8948..0000000000 --- a/python/paddle/fluid/tests/book_distribute/notest_dist_fit_a_line.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np -import paddle.v2 as paddle -import paddle.fluid as fluid -import os - -x = fluid.layers.data(name='x', shape=[13], dtype='float32') - -y_predict = fluid.layers.fc(input=x, size=1, act=None) - -y = fluid.layers.data(name='y', shape=[1], dtype='float32') - -cost = fluid.layers.square_error_cost(input=y_predict, label=y) -avg_cost = fluid.layers.mean(cost) - -sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) -optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost) - -BATCH_SIZE = 20 - -train_reader = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.uci_housing.train(), buf_size=500), - batch_size=BATCH_SIZE) - -place = fluid.CPUPlace() -feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) -exe = fluid.Executor(place) - -t = fluid.DistributeTranspiler() -# all parameter server endpoints list for spliting parameters -pserver_endpoints = os.getenv("PSERVERS") -# server endpoint for current node -current_endpoint = os.getenv("SERVER_ENDPOINT") -# run as trainer or parameter server -training_role = os.getenv("TRAINING_ROLE", - "TRAINER") # get the training role: trainer/pserver - -t.transpile(optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2) - -if training_role == "PSERVER": - if not current_endpoint: - print("need env SERVER_ENDPOINT") - exit(1) - pserver_prog = t.get_pserver_program(current_endpoint) - pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) - exe.run(pserver_startup) - exe.run(pserver_prog) -else: - trainer_prog = t.get_trainer_program() - - exe.run(fluid.default_startup_program()) - - PASS_NUM = 100 - for pass_id in range(PASS_NUM): - for data in train_reader(): - avg_loss_value = exe.run(trainer_prog, - feed=feeder.feed(data), - fetch_list=[avg_cost]) - print("loss:" + str(avg_loss_value)) - if avg_loss_value[0] < 10.0: - exit(0) -exit(1) diff --git a/python/paddle/fluid/tests/book_distribute/notest_dist_image_classification.py b/python/paddle/fluid/tests/book_distribute/notest_dist_image_classification.py deleted file mode 100644 index 46630db43e..0000000000 --- a/python/paddle/fluid/tests/book_distribute/notest_dist_image_classification.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import paddle.v2 as paddle -import paddle.fluid as fluid -import os -import sys - -TRAINERS = 5 -BATCH_SIZE = 128 -PASS_NUM = 100 - - -def resnet_cifar10(input, depth=32): - def conv_bn_layer(input, ch_out, filter_size, stride, padding, act='relu'): - tmp = fluid.layers.conv2d( - input=input, - filter_size=filter_size, - num_filters=ch_out, - stride=stride, - padding=padding, - act=None, - bias_attr=False) - return fluid.layers.batch_norm(input=tmp, act=act) - - def shortcut(input, ch_in, ch_out, stride): - if ch_in != ch_out: - return conv_bn_layer(input, ch_out, 1, stride, 0, None) - else: - return input - - def basicblock(input, ch_in, ch_out, stride): - tmp = conv_bn_layer(input, ch_out, 3, stride, 1) - tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, act=None) - short = shortcut(input, ch_in, ch_out, stride) - return fluid.layers.elementwise_add(x=tmp, y=short, act='relu') - - def layer_warp(block_func, input, ch_in, ch_out, count, stride): - tmp = block_func(input, ch_in, ch_out, stride) - for i in range(1, count): - tmp = block_func(tmp, ch_out, ch_out, 1) - return tmp - - assert (depth - 2) % 6 == 0 - n = (depth - 2) / 6 - conv1 = conv_bn_layer( - input=input, ch_out=16, filter_size=3, stride=1, padding=1) - res1 = layer_warp(basicblock, conv1, 16, 16, n, 1) - res2 = layer_warp(basicblock, res1, 16, 32, n, 2) - res3 = layer_warp(basicblock, res2, 32, 64, n, 2) - pool = fluid.layers.pool2d( - input=res3, pool_size=8, pool_type='avg', pool_stride=1) - return pool - - -def vgg16_bn_drop(input): - def conv_block(input, num_filter, groups, dropouts): - return fluid.nets.img_conv_group( - input=input, - pool_size=2, - pool_stride=2, - conv_num_filter=[num_filter] * groups, - conv_filter_size=3, - conv_act='relu', - conv_with_batchnorm=True, - conv_batchnorm_drop_rate=dropouts, - pool_type='max') - - conv1 = conv_block(input, 64, 2, [0.3, 0]) - conv2 = conv_block(conv1, 128, 2, [0.4, 0]) - conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0]) - conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0]) - conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0]) - - drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5) - fc1 = fluid.layers.fc(input=drop, size=512, act=None) - bn = fluid.layers.batch_norm(input=fc1, act='relu') - drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5) - fc2 = fluid.layers.fc(input=drop2, size=512, act=None) - return fc2 - - -classdim = 10 -data_shape = [3, 32, 32] - -images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32') -label = fluid.layers.data(name='label', shape=[1], dtype='int64') - -net_type = "vgg" -if len(sys.argv) >= 2: - net_type = sys.argv[1] - -if net_type == "vgg": - print("training vgg net") - net = vgg16_bn_drop(images) -elif net_type == "resnet": - print("training resnet") - net = resnet_cifar10(images, 32) -else: - raise ValueError("%s network is not supported" % net_type) - -predict = fluid.layers.fc(input=net, size=classdim, act='softmax') -cost = fluid.layers.cross_entropy(input=predict, label=label) -avg_cost = fluid.layers.mean(cost) - -optimizer = fluid.optimizer.Adam(learning_rate=0.001) -optimize_ops, params_grads = optimizer.minimize(avg_cost) - -accuracy = fluid.evaluator.Accuracy(input=predict, label=label) - -train_reader = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.cifar.train10(), buf_size=128 * 10), - batch_size=BATCH_SIZE) - -place = fluid.CPUPlace() -feeder = fluid.DataFeeder(place=place, feed_list=[images, label]) -exe = fluid.Executor(place) - -t = fluid.DistributeTranspiler() -# all parameter server endpoints list for spliting parameters -pserver_endpoints = os.getenv("PSERVERS") -# server endpoint for current node -current_endpoint = os.getenv("SERVER_ENDPOINT") -# run as trainer or parameter server -training_role = os.getenv("TRAINING_ROLE", - "TRAINER") # get the training role: trainer/pserver - -t.transpile( - optimize_ops, params_grads, pservers=pserver_endpoints, trainers=TRAINERS) - -if training_role == "PSERVER": - if not current_endpoint: - print("need env SERVER_ENDPOINT") - exit(1) - pserver_prog = t.get_pserver_program(current_endpoint) - pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) - exe.run(pserver_startup) - exe.run(pserver_prog) -elif training_role == "TRAINER": - trainer_prog = t.get_trainer_program() - exe.run(fluid.default_startup_program()) - - for pass_id in range(PASS_NUM): - accuracy.reset(exe) - for data in train_reader(): - loss, acc = exe.run(trainer_prog, - feed=feeder.feed(data), - fetch_list=[avg_cost] + accuracy.metrics) - pass_acc = accuracy.eval(exe) - print("pass_id:" + str(pass_id) + "loss:" + str(loss) + " pass_acc:" - + str(pass_acc)) - # this model is slow, so if we can train two mini batches, - # we think it works properly. - print("trainer run end") -else: - print("environment var TRAINER_ROLE should be TRAINER os PSERVER") -exit(1) diff --git a/python/paddle/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py b/python/paddle/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py deleted file mode 100644 index 3ec85517ab..0000000000 --- a/python/paddle/fluid/tests/book_distribute/notest_dist_label_semantic_roles.py +++ /dev/null @@ -1,241 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import math - -import numpy as np -import paddle.v2 as paddle -import paddle.v2.dataset.conll05 as conll05 -import paddle.fluid as fluid -import time -import os - -word_dict, verb_dict, label_dict = conll05.get_dict() -word_dict_len = len(word_dict) -label_dict_len = len(label_dict) -pred_len = len(verb_dict) - -mark_dict_len = 2 -word_dim = 32 -mark_dim = 5 -hidden_dim = 512 -depth = 8 -mix_hidden_lr = 1e-3 - -IS_SPARSE = True -PASS_NUM = 10 -BATCH_SIZE = 20 - -embedding_name = 'emb' - - -def load_parameter(file_name, h, w): - with open(file_name, 'rb') as f: - f.read(16) # skip header. - return np.fromfile(f, dtype=np.float32).reshape(h, w) - - -def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, - **ignored): - # 8 features - predicate_embedding = fluid.layers.embedding( - input=predicate, - size=[pred_len, word_dim], - dtype='float32', - is_sparse=IS_SPARSE, - param_attr='vemb') - - mark_embedding = fluid.layers.embedding( - input=mark, - size=[mark_dict_len, mark_dim], - dtype='float32', - is_sparse=IS_SPARSE) - - word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2] - emb_layers = [ - fluid.layers.embedding( - size=[word_dict_len, word_dim], - input=x, - param_attr=fluid.ParamAttr( - name=embedding_name, trainable=False)) for x in word_input - ] - emb_layers.append(predicate_embedding) - emb_layers.append(mark_embedding) - - hidden_0_layers = [ - fluid.layers.fc(input=emb, size=hidden_dim) for emb in emb_layers - ] - - hidden_0 = fluid.layers.sums(input=hidden_0_layers) - - lstm_0 = fluid.layers.dynamic_lstm( - input=hidden_0, - size=hidden_dim, - candidate_activation='relu', - gate_activation='sigmoid', - cell_activation='sigmoid') - - # stack L-LSTM and R-LSTM with direct edges - input_tmp = [hidden_0, lstm_0] - - for i in range(1, depth): - mix_hidden = fluid.layers.sums(input=[ - fluid.layers.fc(input=input_tmp[0], size=hidden_dim), - fluid.layers.fc(input=input_tmp[1], size=hidden_dim) - ]) - - lstm = fluid.layers.dynamic_lstm( - input=mix_hidden, - size=hidden_dim, - candidate_activation='relu', - gate_activation='sigmoid', - cell_activation='sigmoid', - is_reverse=((i % 2) == 1)) - - input_tmp = [mix_hidden, lstm] - - feature_out = fluid.layers.sums(input=[ - fluid.layers.fc(input=input_tmp[0], size=label_dict_len), - fluid.layers.fc(input=input_tmp[1], size=label_dict_len) - ]) - - return feature_out - - -def to_lodtensor(data, place): - seq_lens = [len(seq) for seq in data] - cur_len = 0 - lod = [cur_len] - for l in seq_lens: - cur_len += l - lod.append(cur_len) - flattened_data = np.concatenate(data, axis=0).astype("int64") - flattened_data = flattened_data.reshape([len(flattened_data), 1]) - res = fluid.LoDTensor() - res.set(flattened_data, place) - res.set_lod([lod]) - return res - - -def main(): - # define network topology - word = fluid.layers.data( - name='word_data', shape=[1], dtype='int64', lod_level=1) - predicate = fluid.layers.data( - name='verb_data', shape=[1], dtype='int64', lod_level=1) - ctx_n2 = fluid.layers.data( - name='ctx_n2_data', shape=[1], dtype='int64', lod_level=1) - ctx_n1 = fluid.layers.data( - name='ctx_n1_data', shape=[1], dtype='int64', lod_level=1) - ctx_0 = fluid.layers.data( - name='ctx_0_data', shape=[1], dtype='int64', lod_level=1) - ctx_p1 = fluid.layers.data( - name='ctx_p1_data', shape=[1], dtype='int64', lod_level=1) - ctx_p2 = fluid.layers.data( - name='ctx_p2_data', shape=[1], dtype='int64', lod_level=1) - mark = fluid.layers.data( - name='mark_data', shape=[1], dtype='int64', lod_level=1) - feature_out = db_lstm(**locals()) - target = fluid.layers.data( - name='target', shape=[1], dtype='int64', lod_level=1) - crf_cost = fluid.layers.linear_chain_crf( - input=feature_out, - label=target, - param_attr=fluid.ParamAttr( - name='crfw', learning_rate=mix_hidden_lr)) - avg_cost = fluid.layers.mean(crf_cost) - - # TODO(qiao) - # check other optimizers and check why out will be NAN - sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.0001) - optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost) - - # TODO(qiao) - # add dependency track and move this config before optimizer - crf_decode = fluid.layers.crf_decoding( - input=feature_out, param_attr=fluid.ParamAttr(name='crfw')) - - chunk_evaluator = fluid.evaluator.ChunkEvaluator( - input=crf_decode, - label=target, - chunk_scheme="IOB", - num_chunk_types=int(math.ceil((label_dict_len - 1) / 2.0))) - - train_data = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.conll05.test(), buf_size=8192), - batch_size=BATCH_SIZE) - place = fluid.CPUPlace() - feeder = fluid.DataFeeder( - feed_list=[ - word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, predicate, mark, target - ], - place=place) - exe = fluid.Executor(place) - - t = fluid.DistributeTranspiler() - pserver_endpoints = os.getenv("PSERVERS") - # server endpoint for current node - current_endpoint = os.getenv("SERVER_ENDPOINT") - # run as trainer or parameter server - training_role = os.getenv( - "TRAINING_ROLE", "TRAINER") # get the training role: trainer/pserver - - t.transpile( - optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2) - - if training_role == "PSERVER": - if not current_endpoint: - print("need env SERVER_ENDPOINT") - exit(1) - pserver_prog = t.get_pserver_program(current_endpoint) - pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) - exe.run(pserver_startup) - exe.run(pserver_prog) - elif training_role == "TRAINER": - trainer_prog = t.get_trainer_program() - start_time = time.time() - batch_id = 0 - exe.run(fluid.default_startup_program()) - embedding_param = fluid.global_scope().find_var( - embedding_name).get_tensor() - embedding_param.set( - load_parameter(conll05.get_embedding(), word_dict_len, word_dim), - place) - for pass_id in xrange(PASS_NUM): - chunk_evaluator.reset(exe) - for data in train_data(): - cost, precision, recall, f1_score = exe.run( - trainer_prog, - feed=feeder.feed(data), - fetch_list=[avg_cost] + chunk_evaluator.metrics) - pass_precision, pass_recall, pass_f1_score = chunk_evaluator.eval( - exe) - - if batch_id % 10 == 0: - print("avg_cost:" + str(cost) + " precision:" + str( - precision) + " recall:" + str(recall) + " f1_score:" + - str(f1_score) + " pass_precision:" + str( - pass_precision) + " pass_recall:" + str( - pass_recall) + " pass_f1_score:" + str( - pass_f1_score)) - if batch_id != 0: - print("second per batch: " + str((time.time( - ) - start_time) / batch_id)) - - batch_id = batch_id + 1 - - -if __name__ == '__main__': - main() diff --git a/python/paddle/fluid/tests/book_distribute/notest_dist_word2vec.py b/python/paddle/fluid/tests/book_distribute/notest_dist_word2vec.py deleted file mode 100644 index 8164ba5428..0000000000 --- a/python/paddle/fluid/tests/book_distribute/notest_dist_word2vec.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -import numpy as np -import paddle.v2 as paddle -import paddle.fluid as fluid -import os - -PASS_NUM = 100 -EMBED_SIZE = 32 -HIDDEN_SIZE = 256 -N = 5 -BATCH_SIZE = 32 -IS_SPARSE = True -TRAINERS = 2 - -word_dict = paddle.dataset.imikolov.build_dict() -dict_size = len(word_dict) - -first_word = fluid.layers.data(name='firstw', shape=[1], dtype='int64') -second_word = fluid.layers.data(name='secondw', shape=[1], dtype='int64') -third_word = fluid.layers.data(name='thirdw', shape=[1], dtype='int64') -forth_word = fluid.layers.data(name='forthw', shape=[1], dtype='int64') -next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64') - -embed_first = fluid.layers.embedding( - input=first_word, - size=[dict_size, EMBED_SIZE], - dtype='float32', - is_sparse=IS_SPARSE, - param_attr='shared_w') -embed_second = fluid.layers.embedding( - input=second_word, - size=[dict_size, EMBED_SIZE], - dtype='float32', - is_sparse=IS_SPARSE, - param_attr='shared_w') -embed_third = fluid.layers.embedding( - input=third_word, - size=[dict_size, EMBED_SIZE], - dtype='float32', - is_sparse=IS_SPARSE, - param_attr='shared_w') -embed_forth = fluid.layers.embedding( - input=forth_word, - size=[dict_size, EMBED_SIZE], - dtype='float32', - is_sparse=IS_SPARSE, - param_attr='shared_w') - -concat_embed = fluid.layers.concat( - input=[embed_first, embed_second, embed_third, embed_forth], axis=1) -hidden1 = fluid.layers.fc(input=concat_embed, size=HIDDEN_SIZE, act='sigmoid') -predict_word = fluid.layers.fc(input=hidden1, size=dict_size, act='softmax') -cost = fluid.layers.cross_entropy(input=predict_word, label=next_word) -avg_cost = fluid.layers.mean(cost) -sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) -optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost) -train_reader = paddle.batch( - paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE) - -place = fluid.CPUPlace() -exe = fluid.Executor(place) - -t = fluid.DistributeTranspiler() -# all parameter server endpoints list for spliting parameters -pserver_endpoints = os.getenv("PSERVERS") -# server endpoint for current node -current_endpoint = os.getenv("SERVER_ENDPOINT") -# run as trainer or parameter server -training_role = os.getenv("TRAINING_ROLE", - "TRAINER") # get the training role: trainer/pserver - -t.transpile( - optimize_ops, params_grads, pservers=pserver_endpoints, trainers=TRAINERS) -if training_role == "PSERVER": - if not current_endpoint: - print("need env SERVER_ENDPOINT") - exit(1) - pserver_prog = t.get_pserver_program(current_endpoint) - pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) - exe.run(pserver_startup) - exe.run(pserver_prog) -elif training_role == "TRAINER": - feeder = fluid.DataFeeder( - feed_list=[first_word, second_word, third_word, forth_word, next_word], - place=place) - exe.run(fluid.default_startup_program()) - trainer_prog = t.get_trainer_program() - for pass_id in range(PASS_NUM): - for data in train_reader(): - avg_cost_np = exe.run(trainer_prog, - feed=feeder.feed(data), - fetch_list=[avg_cost]) - print("avg_cost_np", avg_cost_np) - if avg_cost_np[0] < 5.0: - exit( - 0) # if avg cost less than 10.0, we think our code is good. -else: - print("environment var TRAINER_ROLE should be TRAINER os PSERVER") -exit(1) diff --git a/python/paddle/fluid/tests/book_distribute/notest_machine_translation.py b/python/paddle/fluid/tests/book_distribute/notest_machine_translation.py deleted file mode 100644 index fee8db2497..0000000000 --- a/python/paddle/fluid/tests/book_distribute/notest_machine_translation.py +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np -import paddle.v2 as paddle -import paddle.fluid as fluid -import paddle.fluid.core as core -import paddle.fluid.framework as framework -import paddle.fluid.layers as layers -from paddle.fluid.executor import Executor -import os - -dict_size = 30000 -source_dict_dim = target_dict_dim = dict_size -src_dict, trg_dict = paddle.dataset.wmt14.get_dict(dict_size) -hidden_dim = 32 -word_dim = 16 -IS_SPARSE = True -batch_size = 10 -max_length = 50 -topk_size = 50 -trg_dic_size = 10000 - -decoder_size = hidden_dim - - -def encoder_decoder(): - # encoder - src_word_id = layers.data( - name="src_word_id", shape=[1], dtype='int64', lod_level=1) - src_embedding = layers.embedding( - input=src_word_id, - size=[dict_size, word_dim], - dtype='float32', - is_sparse=IS_SPARSE, - param_attr=fluid.ParamAttr(name='vemb')) - - fc1 = fluid.layers.fc(input=src_embedding, size=hidden_dim * 4, act='tanh') - lstm_hidden0, lstm_0 = layers.dynamic_lstm(input=fc1, size=hidden_dim * 4) - encoder_out = layers.sequence_last_step(input=lstm_hidden0) - - # decoder - trg_language_word = layers.data( - name="target_language_word", shape=[1], dtype='int64', lod_level=1) - trg_embedding = layers.embedding( - input=trg_language_word, - size=[dict_size, word_dim], - dtype='float32', - is_sparse=IS_SPARSE, - param_attr=fluid.ParamAttr(name='vemb')) - - rnn = fluid.layers.DynamicRNN() - with rnn.block(): - current_word = rnn.step_input(trg_embedding) - mem = rnn.memory(init=encoder_out) - fc1 = fluid.layers.fc(input=[current_word, mem], - size=decoder_size, - act='tanh') - out = fluid.layers.fc(input=fc1, size=target_dict_dim, act='softmax') - rnn.update_memory(mem, fc1) - rnn.output(out) - - return rnn() - - -def to_lodtensor(data, place): - seq_lens = [len(seq) for seq in data] - cur_len = 0 - lod = [cur_len] - for l in seq_lens: - cur_len += l - lod.append(cur_len) - flattened_data = np.concatenate(data, axis=0).astype("int64") - flattened_data = flattened_data.reshape([len(flattened_data), 1]) - res = core.LoDTensor() - res.set(flattened_data, place) - res.set_lod([lod]) - return res - - -def main(): - rnn_out = encoder_decoder() - label = layers.data( - name="target_language_next_word", shape=[1], dtype='int64', lod_level=1) - cost = layers.cross_entropy(input=rnn_out, label=label) - avg_cost = fluid.layers.mean(cost) - - optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4) - optimize_ops, params_grads = optimizer.minimize(avg_cost) - - train_data = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.wmt14.train(dict_size), buf_size=1000), - batch_size=batch_size) - - place = core.CPUPlace() - exe = Executor(place) - - t = fluid.DistributeTranspiler() - # all parameter server endpoints list for spliting parameters - pserver_endpoints = os.getenv("PSERVERS") - # server endpoint for current node - current_endpoint = os.getenv("SERVER_ENDPOINT") - # run as trainer or parameter server - training_role = os.getenv( - "TRAINING_ROLE", "TRAINER") # get the training role: trainer/pserver - - t.transpile( - optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2) - - if training_role == "PSERVER": - if not current_endpoint: - print("need env SERVER_ENDPOINT") - exit(1) - pserver_prog = t.get_pserver_program(current_endpoint) - pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) - exe.run(pserver_startup) - exe.run(pserver_prog) - elif training_role == "TRAINER": - trainer_prog = t.get_trainer_program() - exe.run(framework.default_startup_program()) - - batch_id = 0 - for pass_id in xrange(2): - for data in train_data(): - word_data = to_lodtensor(map(lambda x: x[0], data), place) - trg_word = to_lodtensor(map(lambda x: x[1], data), place) - trg_word_next = to_lodtensor(map(lambda x: x[2], data), place) - outs = exe.run(trainer_prog, - feed={ - 'src_word_id': word_data, - 'target_language_word': trg_word, - 'target_language_next_word': trg_word_next - }, - fetch_list=[avg_cost]) - avg_cost_val = np.array(outs[0]) - print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) + - " avg_cost=" + str(avg_cost_val)) - if batch_id > 3: - exit(0) - batch_id += 1 - else: - print("environment var TRAINER_ROLE should be TRAINER os PSERVER") - - -if __name__ == '__main__': - main() diff --git a/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py b/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py deleted file mode 100644 index b6ad6a992d..0000000000 --- a/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -import numpy as np -import paddle.v2 as paddle -import paddle.fluid as fluid -import os - -images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype='float32') -label = fluid.layers.data(name='label', shape=[1], dtype='int64') -conv_pool_1 = fluid.nets.simple_img_conv_pool( - input=images, - filter_size=5, - num_filters=20, - pool_size=2, - pool_stride=2, - act="relu") -conv_pool_2 = fluid.nets.simple_img_conv_pool( - input=conv_pool_1, - filter_size=5, - num_filters=50, - pool_size=2, - pool_stride=2, - act="relu") - -predict = fluid.layers.fc(input=conv_pool_2, size=10, act="softmax") -cost = fluid.layers.cross_entropy(input=predict, label=label) -avg_cost = fluid.layers.mean(cost) -optimizer = fluid.optimizer.Adam(learning_rate=0.01) -optimize_ops, params_grads = optimizer.minimize(avg_cost) - -accuracy = fluid.evaluator.Accuracy(input=predict, label=label) - -BATCH_SIZE = 50 -PASS_NUM = 3 -train_reader = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.mnist.train(), buf_size=500), - batch_size=BATCH_SIZE) - -place = fluid.CPUPlace() -exe = fluid.Executor(place) - -pserver_endpoints = os.getenv("PSERVERS") # all pserver endpoints -trainers = int(os.getenv("TRAINERS")) # total trainer count -current_endpoint = os.getenv("SERVER_ENDPOINT") # current pserver endpoint -training_role = os.getenv("TRAINING_ROLE", - "TRAINER") # get the training role: trainer/pserver -if not current_endpoint: - print("need env SERVER_ENDPOINT") - exit(1) - -t = fluid.DistributeTranspiler() -t.transpile( - optimize_ops, params_grads, pservers=pserver_endpoints, trainers=trainers) - -if training_role == "PSERVER": - pserver_prog = t.get_pserver_program(current_endpoint) - pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) - exe.run(pserver_startup) - exe.run(pserver_prog) -elif training_role == "TRAINER": - trainer_prog = t.get_trainer_program() - feeder = fluid.DataFeeder(feed_list=[images, label], place=place) - # TODO(typhoonzero): change trainer startup program to fetch parameters from pserver - exe.run(fluid.default_startup_program()) - - for pass_id in range(PASS_NUM): - accuracy.reset(exe) - batch_id = 0 - for data in train_reader(): - loss, acc = exe.run(trainer_prog, - feed=feeder.feed(data), - fetch_list=[avg_cost] + accuracy.metrics) - pass_acc = accuracy.eval(exe) - if batch_id % 100 == 0: - print("batch_id %d, loss: %f, acc: %f" % - (batch_id, loss, pass_acc)) - batch_id += 1 - - pass_acc = accuracy.eval(exe) - print("pass_id=" + str(pass_id) + " pass_acc=" + str(pass_acc)) -else: - print("environment var TRAINER_ROLE should be TRAINER os PSERVER") diff --git a/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py b/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py deleted file mode 100644 index dad95c0f3f..0000000000 --- a/python/paddle/fluid/tests/book_distribute/notest_recognize_digits_mlp_dist.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -import numpy as np -import paddle.v2 as paddle -import paddle.fluid as fluid -import os - -BATCH_SIZE = 128 -PASS_NUM = 100 - -images = fluid.layers.data(name='x', shape=[784], dtype='float32') - -# TODO(aroraabhinav) Add regularization and error clipping after -# Issue 7432(https://github.com/PaddlePaddle/Paddle/issues/7432) is resolved. -hidden1 = fluid.layers.fc(input=images, size=128, act='relu') -hidden2 = fluid.layers.fc(input=hidden1, size=64, act='relu') -predict = fluid.layers.fc(input=hidden2, size=10, act='softmax') - -label = fluid.layers.data(name='y', shape=[1], dtype='int64') - -cost = fluid.layers.cross_entropy(input=predict, label=label) -avg_cost = fluid.layers.mean(cost) - -optimizer = fluid.optimizer.Momentum(learning_rate=0.001, momentum=0.9) -optimize_ops, params_grads = optimizer.minimize(avg_cost) - -accuracy = fluid.evaluator.Accuracy(input=predict, label=label) - -train_reader = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.mnist.train(), buf_size=8192), - batch_size=BATCH_SIZE) - -place = fluid.CPUPlace() -exe = fluid.Executor(place) - -t = fluid.DistributeTranspiler() -# all parameter server endpoints list for spliting parameters -pserver_endpoints = os.getenv("PSERVERS") -# server endpoint for current node -current_endpoint = os.getenv("SERVER_ENDPOINT") -# run as trainer or parameter server -training_role = os.getenv("TRAINING_ROLE", - "TRAINER") # get the training role: trainer/pserver -t.transpile(optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2) - -if training_role == "PSERVER": - if not current_endpoint: - print("need env SERVER_ENDPOINT") - exit(1) - pserver_prog = t.get_pserver_program(current_endpoint) - pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) - exe.run(pserver_startup) - exe.run(pserver_prog) -elif training_role == "TRAINER": - trainer_prog = t.get_trainer_program() - feeder = fluid.DataFeeder(feed_list=[images, label], place=place) - exe.run(fluid.default_startup_program()) - - for pass_id in range(PASS_NUM): - accuracy.reset(exe) - batch_id = 0 - for data in train_reader(): - loss, acc = exe.run(trainer_prog, - feed=feeder.feed(data), - fetch_list=[avg_cost] + accuracy.metrics) - pass_acc = accuracy.eval(exe) - if batch_id % 100 == 0: - print("batch_id %d, loss: %f, acc: %f" % - (batch_id, loss, pass_acc)) - batch_id += 1 - - pass_acc = accuracy.eval(exe) - print("pass_id=" + str(pass_id) + " pass_acc=" + str(pass_acc)) -else: - print("environment var TRAINER_ROLE should be TRAINER os PSERVER") diff --git a/python/paddle/fluid/tests/book_distribute/notest_recommender_system_dist.py b/python/paddle/fluid/tests/book_distribute/notest_recommender_system_dist.py deleted file mode 100644 index 741ec33639..0000000000 --- a/python/paddle/fluid/tests/book_distribute/notest_recommender_system_dist.py +++ /dev/null @@ -1,217 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np -import os -import paddle.v2 as paddle -import paddle.fluid as fluid -import paddle.fluid.core as core -import paddle.fluid.layers as layers -import paddle.fluid.nets as nets -from paddle.fluid.optimizer import SGDOptimizer - -IS_SPARSE = True -BATCH_SIZE = 256 -PASS_NUM = 100 - - -def get_usr_combined_features(): - USR_DICT_SIZE = paddle.dataset.movielens.max_user_id() + 1 - uid = layers.data(name='user_id', shape=[1], dtype='int64') - usr_emb = layers.embedding( - input=uid, - dtype='float32', - size=[USR_DICT_SIZE, 32], - param_attr='user_table', - is_sparse=IS_SPARSE) - usr_fc = layers.fc(input=usr_emb, size=32) - USR_GENDER_DICT_SIZE = 2 - - usr_gender_id = layers.data(name='gender_id', shape=[1], dtype='int64') - usr_gender_emb = layers.embedding( - input=usr_gender_id, - size=[USR_GENDER_DICT_SIZE, 16], - param_attr='gender_table', - is_sparse=IS_SPARSE) - usr_gender_fc = layers.fc(input=usr_gender_emb, size=16) - - USR_AGE_DICT_SIZE = len(paddle.dataset.movielens.age_table) - usr_age_id = layers.data(name='age_id', shape=[1], dtype="int64") - usr_age_emb = layers.embedding( - input=usr_age_id, - size=[USR_AGE_DICT_SIZE, 16], - is_sparse=IS_SPARSE, - param_attr='age_table') - usr_age_fc = layers.fc(input=usr_age_emb, size=16) - - USR_JOB_DICT_SIZE = paddle.dataset.movielens.max_job_id() + 1 - usr_job_id = layers.data(name='job_id', shape=[1], dtype="int64") - usr_job_emb = layers.embedding( - input=usr_job_id, - size=[USR_JOB_DICT_SIZE, 16], - param_attr='job_table', - is_sparse=IS_SPARSE) - usr_job_fc = layers.fc(input=usr_job_emb, size=16) - - concat_embed = layers.concat( - input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc], axis=1) - - usr_combined_features = layers.fc(input=concat_embed, size=200, act="tanh") - return usr_combined_features - - -def get_mov_combined_features(): - MOV_DICT_SIZE = paddle.dataset.movielens.max_movie_id() + 1 - mov_id = layers.data(name='movie_id', shape=[1], dtype='int64') - mov_emb = layers.embedding( - input=mov_id, - dtype='float32', - size=[MOV_DICT_SIZE, 32], - param_attr='movie_table', - is_sparse=IS_SPARSE) - mov_fc = layers.fc(input=mov_emb, size=32) - - CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories()) - category_id = layers.data(name='category_id', shape=[1], dtype='int64') - mov_categories_emb = layers.embedding( - input=category_id, size=[CATEGORY_DICT_SIZE, 32], is_sparse=IS_SPARSE) - mov_categories_hidden = layers.sequence_pool( - input=mov_categories_emb, pool_type="sum") - - MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict()) - mov_title_id = layers.data(name='movie_title', shape=[1], dtype='int64') - mov_title_emb = layers.embedding( - input=mov_title_id, size=[MOV_TITLE_DICT_SIZE, 32], is_sparse=IS_SPARSE) - mov_title_conv = nets.sequence_conv_pool( - input=mov_title_emb, - num_filters=32, - filter_size=3, - act="tanh", - pool_type="sum") - - concat_embed = layers.concat( - input=[mov_fc, mov_categories_hidden, mov_title_conv], axis=1) - - mov_combined_features = layers.fc(input=concat_embed, size=200, act="tanh") - return mov_combined_features - - -def model(): - usr_combined_features = get_usr_combined_features() - mov_combined_features = get_mov_combined_features() - - # need cos sim - inference = layers.cos_sim(X=usr_combined_features, Y=mov_combined_features) - scale_infer = layers.scale(x=inference, scale=5.0) - - label = layers.data(name='score', shape=[1], dtype='float32') - square_cost = layers.square_error_cost(input=scale_infer, label=label) - avg_cost = layers.mean(square_cost) - - return avg_cost - - -def func_feed(feeding, data, place): - feed_tensors = {} - for (key, idx) in feeding.iteritems(): - tensor = core.LoDTensor() - if key != "category_id" and key != "movie_title": - if key == "score": - numpy_data = np.array(map(lambda x: x[idx], data)).astype( - "float32") - else: - numpy_data = np.array(map(lambda x: x[idx], data)).astype( - "int64") - else: - numpy_data = map(lambda x: np.array(x[idx]).astype("int64"), data) - lod_info = [len(item) for item in numpy_data] - offset = 0 - lod = [offset] - for item in lod_info: - offset += item - lod.append(offset) - numpy_data = np.concatenate(numpy_data, axis=0) - tensor.set_lod([lod]) - - numpy_data = numpy_data.reshape([numpy_data.shape[0], 1]) - tensor.set(numpy_data, place) - feed_tensors[key] = tensor - return feed_tensors - - -def main(): - cost = model() - optimizer = SGDOptimizer(learning_rate=0.2) - optimize_ops, params_grads = optimizer.minimize(cost) - - train_reader = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.movielens.train(), buf_size=8192), - batch_size=BATCH_SIZE) - - place = fluid.CPUPlace() - exe = fluid.Executor(place) - - t = fluid.DistributeTranspiler() - - # all parameter server endpoints list for spliting parameters - pserver_endpoints = os.getenv("PSERVERS") - # server endpoint for current node - current_endpoint = os.getenv("SERVER_ENDPOINT") - # run as trainer or parameter server - training_role = os.getenv("TRAINING_ROLE", "TRAINER") - - t.transpile( - optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2) - - if training_role == "PSERVER": - if not current_endpoint: - print("need env SERVER_ENDPOINT") - exit(1) - pserver_prog = t.get_pserver_program(current_endpoint) - pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) - exe.run(pserver_startup) - exe.run(pserver_prog) - elif training_role == "TRAINER": - exe.run(fluid.default_startup_program()) - trainer_prog = t.get_trainer_program() - - feeding = { - 'user_id': 0, - 'gender_id': 1, - 'age_id': 2, - 'job_id': 3, - 'movie_id': 4, - 'category_id': 5, - 'movie_title': 6, - 'score': 7 - } - - for pass_id in range(PASS_NUM): - for data in train_reader(): - outs = exe.run(trainer_prog, - feed=func_feed(feeding, data, place), - fetch_list=[cost]) - out = np.array(outs[0]) - print("cost=" + str(out[0])) - if out[0] < 6.0: - print("Training complete. Average cost is less than 6.0.") - # if avg cost less than 6.0, we think our code is good. - exit(0) - else: - print("environment var TRAINER_ROLE should be TRAINER os PSERVER") - - -if __name__ == '__main__': - main() diff --git a/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py b/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py deleted file mode 100644 index 0467184bbf..0000000000 --- a/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_conv_dist.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -import os -import numpy as np -import paddle.v2 as paddle -import paddle.fluid as fluid - - -def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32, - hid_dim=32): - emb = fluid.layers.embedding(input=data, size=[input_dim, emb_dim]) - conv_3 = fluid.nets.sequence_conv_pool( - input=emb, - num_filters=hid_dim, - filter_size=3, - act="tanh", - pool_type="sqrt") - conv_4 = fluid.nets.sequence_conv_pool( - input=emb, - num_filters=hid_dim, - filter_size=4, - act="tanh", - pool_type="sqrt") - prediction = fluid.layers.fc(input=[conv_3, conv_4], - size=class_dim, - act="softmax") - cost = fluid.layers.cross_entropy(input=prediction, label=label) - avg_cost = fluid.layers.mean(cost) - adam_optimizer = fluid.optimizer.Adam(learning_rate=0.002) - optimize_ops, params_grads = adam_optimizer.minimize(avg_cost) - accuracy = fluid.evaluator.Accuracy(input=prediction, label=label) - return avg_cost, accuracy, accuracy.metrics[0], optimize_ops, params_grads - - -def to_lodtensor(data, place): - seq_lens = [len(seq) for seq in data] - cur_len = 0 - lod = [cur_len] - for l in seq_lens: - cur_len += l - lod.append(cur_len) - flattened_data = np.concatenate(data, axis=0).astype("int64") - flattened_data = flattened_data.reshape([len(flattened_data), 1]) - res = fluid.LoDTensor() - res.set(flattened_data, place) - res.set_lod([lod]) - return res - - -def main(): - BATCH_SIZE = 100 - PASS_NUM = 5 - - word_dict = paddle.dataset.imdb.word_dict() - dict_dim = len(word_dict) - class_dim = 2 - - data = fluid.layers.data( - name="words", shape=[1], dtype="int64", lod_level=1) - label = fluid.layers.data(name="label", shape=[1], dtype="int64") - cost, accuracy, acc_out, optimize_ops, params_grads = convolution_net( - data, label, input_dim=dict_dim, class_dim=class_dim) - - train_data = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.imdb.train(word_dict), buf_size=1000), - batch_size=BATCH_SIZE) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - - t = fluid.DistributeTranspiler() - - # all parameter server endpoints list for spliting parameters - pserver_endpoints = os.getenv("PSERVERS") - # server endpoint for current node - current_endpoint = os.getenv("SERVER_ENDPOINT") - # run as trainer or parameter server - training_role = os.getenv( - "TRAINING_ROLE", "TRAINER") # get the training role: trainer/pserver - - t.transpile( - optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2) - - if training_role == "PSERVER": - if not current_endpoint: - print("need env SERVER_ENDPOINT") - exit(1) - pserver_prog = t.get_pserver_program(current_endpoint) - pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) - exe.run(pserver_startup) - exe.run(pserver_prog) - elif training_role == "TRAINER": - exe.run(fluid.default_startup_program()) - trainer_prog = t.get_trainer_program() - feeder = fluid.DataFeeder(feed_list=[data, label], place=place) - - for pass_id in xrange(PASS_NUM): - accuracy.reset(exe) - for data in train_data(): - cost_val, acc_val = exe.run(trainer_prog, - feed=feeder.feed(data), - fetch_list=[cost, acc_out]) - pass_acc = accuracy.eval(exe) - print("cost=" + str(cost_val) + " acc=" + str(acc_val) + - " pass_acc=" + str(pass_acc)) - if cost_val < 1.0 and pass_acc > 0.8: - exit(0) - else: - print("environment var TRAINER_ROLE should be TRAINER os PSERVER") - - -if __name__ == '__main__': - main() diff --git a/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py b/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py deleted file mode 100644 index 1e13385852..0000000000 --- a/python/paddle/fluid/tests/book_distribute/notest_understand_sentiment_dynamic_lstm.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np -import os -import paddle.v2 as paddle -import paddle.fluid as fluid - - -def stacked_lstm_net(data, - label, - input_dim, - class_dim=2, - emb_dim=128, - hid_dim=512, - stacked_num=3): - assert stacked_num % 2 == 1 - - emb = fluid.layers.embedding(input=data, size=[input_dim, emb_dim]) - # add bias attr - - # TODO(qijun) linear act - fc1 = fluid.layers.fc(input=emb, size=hid_dim) - lstm1, cell1 = fluid.layers.dynamic_lstm(input=fc1, size=hid_dim) - - inputs = [fc1, lstm1] - - for i in range(2, stacked_num + 1): - fc = fluid.layers.fc(input=inputs, size=hid_dim) - lstm, cell = fluid.layers.dynamic_lstm( - input=fc, size=hid_dim, is_reverse=(i % 2) == 0) - inputs = [fc, lstm] - - fc_last = fluid.layers.sequence_pool(input=inputs[0], pool_type='max') - lstm_last = fluid.layers.sequence_pool(input=inputs[1], pool_type='max') - - prediction = fluid.layers.fc(input=[fc_last, lstm_last], - size=class_dim, - act='softmax') - cost = fluid.layers.cross_entropy(input=prediction, label=label) - avg_cost = fluid.layers.mean(cost) - adam_optimizer = fluid.optimizer.Adam(learning_rate=0.002) - optimize_ops, params_grads = adam_optimizer.minimize(avg_cost) - accuracy = fluid.evaluator.Accuracy(input=prediction, label=label) - return avg_cost, accuracy, accuracy.metrics[0], optimize_ops, params_grads - - -def to_lodtensor(data, place): - seq_lens = [len(seq) for seq in data] - cur_len = 0 - lod = [cur_len] - for l in seq_lens: - cur_len += l - lod.append(cur_len) - flattened_data = np.concatenate(data, axis=0).astype("int64") - flattened_data = flattened_data.reshape([len(flattened_data), 1]) - res = fluid.LoDTensor() - res.set(flattened_data, place) - res.set_lod([lod]) - return res - - -def main(): - BATCH_SIZE = 100 - PASS_NUM = 5 - - word_dict = paddle.dataset.imdb.word_dict() - print "loaded word dict successfully" - dict_dim = len(word_dict) - class_dim = 2 - - data = fluid.layers.data( - name="words", shape=[1], dtype="int64", lod_level=1) - label = fluid.layers.data(name="label", shape=[1], dtype="int64") - cost, accuracy, acc_out, optimize_ops, params_grads = stacked_lstm_net( - data, label, input_dim=dict_dim, class_dim=class_dim) - - train_data = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.imdb.train(word_dict), buf_size=1000), - batch_size=BATCH_SIZE) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - feeder = fluid.DataFeeder(feed_list=[data, label], place=place) - - t = fluid.DistributeTranspiler() - # all parameter server endpoints list for spliting parameters - pserver_endpoints = os.getenv("PSERVERS") - # server endpoint for current node - current_endpoint = os.getenv("SERVER_ENDPOINT") - # run as trainer or parameter server - training_role = os.getenv( - "TRAINING_ROLE", "TRAINER") # get the training role: trainer/pserver - - t.transpile( - optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2) - - if training_role == "PSERVER": - if not current_endpoint: - print("need env SERVER_ENDPOINT") - exit(1) - pserver_prog = t.get_pserver_program(current_endpoint) - pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) - exe.run(pserver_startup) - exe.run(pserver_prog) - elif training_role == "TRAINER": - exe.run(fluid.default_startup_program()) - trainer_prog = t.get_trainer_program() - for pass_id in xrange(PASS_NUM): - accuracy.reset(exe) - for data in train_data(): - cost_val, acc_val = exe.run(trainer_prog, - feed=feeder.feed(data), - fetch_list=[cost, acc_out]) - pass_acc = accuracy.eval(exe) - print("cost=" + str(cost_val) + " acc=" + str(acc_val) + - " pass_acc=" + str(pass_acc)) - if cost_val < 1.0 and acc_val > 0.8: - exit(0) - else: - print("environment var TRAINER_ROLE should be TRAINER os PSERVER") - - -if __name__ == '__main__': - main() -- GitLab From b5c920920c7e36eca9464b839d455848d0501efd Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Tue, 27 Feb 2018 20:05:14 +0800 Subject: [PATCH 208/217] fix conv_op bug --- paddle/fluid/operators/conv_op.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index 2ecece7073..f703b4cac1 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -55,7 +55,7 @@ void ConvOp::InferShape(framework::InferShapeContext* ctx) const { std::vector output_shape({in_dims[0], filter_dims[0]}); for (size_t i = 0; i < strides.size(); ++i) { PADDLE_ENFORCE(in_dims[i + 2] + 2 * paddings[i] - - (dilations[i] * (filter_dims[i + 2] - 1) + 1) > + (dilations[i] * (filter_dims[i + 2] - 1) + 1) >= 0, "Due to the settings of paddings, filter_dims and " "dilations, the output size is less than 0, please check " -- GitLab From 7cf2c05fd273cad93724a3ab61b83afdb39490c0 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Tue, 27 Feb 2018 20:57:12 +0800 Subject: [PATCH 209/217] add unit test for input's size is 1x1 --- .../fluid/tests/unittests/test_conv2d_op.py | 22 ++++++++++++++++++- .../unittests/test_conv2d_transpose_op.py | 3 ++- .../fluid/tests/unittests/test_conv3d_op.py | 22 +++++++++++++++++++ .../unittests/test_conv3d_transpose_op.py | 3 ++- 4 files changed, 47 insertions(+), 3 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_op.py index 1fada38a03..1321cfd484 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_op.py @@ -210,6 +210,19 @@ class TestWithDilation(TestConv2dOp): self.groups = 3 +class TestWithInput1x1Filter1x1(TestConv2dOp): + def init_test_case(self): + self.pad = [0, 0] + self.stride = [1, 1] + self.input_size = [2, 3, 1, 1] # NCHW + assert np.mod(self.input_size[1], self.groups) == 0 + f_c = self.input_size[1] / self.groups + self.filter_size = [6, f_c, 1, 1] + + def init_group(self): + self.groups = 3 + + #----------------Conv2dCUDNN---------------- class TestCUDNN(TestConv2dOp): def init_op_type(self): @@ -241,6 +254,12 @@ class TestCUDNNWith1x1(TestWith1x1): self.op_type = "conv2d" +class TestCUDNNWithInput1x1Filter1x1(TestWithInput1x1Filter1x1): + def init_op_type(self): + self.use_cudnn = True + self.op_type = "conv2d" + + class TestDepthwiseConv(TestConv2dOp): def init_test_case(self): self.pad = [1, 1] @@ -265,7 +284,8 @@ class TestDepthwiseConv2(TestConv2dOp): self.op_type = "depthwise_conv2d" -# cudnn v5 does not support dilation conv. +# Please Don't remove the following code. +# Currently, CI use cudnn V5.0 which not support dilation conv. # class TestCUDNNWithDilation(TestWithDilation): # def init_op_type(self): # self.op_type = "conv_cudnn" diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py index 9831b7eb12..d864b9b348 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py @@ -200,7 +200,8 @@ class TestCUDNNWithStride(TestWithStride): self.op_type = "conv2d_transpose" -# #cudnn v5 does not support dilation conv. +# Please Don't remove the following code. +# Currently, CI use cudnn V5.0 which not support dilation conv. # class TestCUDNNWithDilation(TestWithDilation): # def init_test_case(self): # self.pad = [1, 1] diff --git a/python/paddle/fluid/tests/unittests/test_conv3d_op.py b/python/paddle/fluid/tests/unittests/test_conv3d_op.py index 4d3df5e33c..d5dd63e873 100644 --- a/python/paddle/fluid/tests/unittests/test_conv3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv3d_op.py @@ -200,6 +200,22 @@ class TestWith1x1(TestConv3dOp): self.groups = 3 +class TestWithInput1x1Filter1x1(TestConv3dOp): + def init_test_case(self): + self.pad = [0, 0, 0] + self.stride = [1, 1, 1] + self.input_size = [2, 3, 1, 1, 1] # NCHW + assert np.mod(self.input_size[1], self.groups) == 0 + f_c = self.input_size[1] / self.groups + self.filter_size = [6, f_c, 1, 1, 1] + + def init_dilation(self): + self.dilations = [1, 1, 1] + + def init_group(self): + self.groups = 3 + + class TestWithDilation(TestConv3dOp): def init_test_case(self): self.pad = [0, 0, 0] @@ -240,6 +256,12 @@ class TestWith1x1CUDNN(TestWith1x1): self.op_type = "conv3d" +class TestWithInput1x1Filter1x1CUDNN(TestWithInput1x1Filter1x1): + def init_op_type(self): + self.use_cudnn = True + self.op_type = "conv3d" + + # FIXME(typhoonzero): find a way to determine if # using cudnn > 6 in python # class TestWithDilationCUDNN(TestWithDilation): diff --git a/python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py b/python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py index a79bfa13d6..55ba238710 100644 --- a/python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py @@ -207,7 +207,8 @@ class TestCUDNNWithStride(TestWithStride): self.op_type = "conv3d_transpose" -# #cudnn v5 does not support dilation conv. +# Please Don't remove the following code. +# Currently, CI use cudnn V5.0 which not support dilation conv. # class TestCUDNNWithDilation(TestWithDilation): # def init_test_case(self): # self.pad = [1, 1, 1] -- GitLab From 0b73cb33586de115764db6e0faa54f345fef5e08 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Tue, 27 Feb 2018 21:20:58 +0800 Subject: [PATCH 210/217] remove `import paddle.fluid` in conf.py.in --- doc/templates/conf.py.cn.in | 1 - doc/templates/conf.py.en.in | 1 - 2 files changed, 2 deletions(-) diff --git a/doc/templates/conf.py.cn.in b/doc/templates/conf.py.cn.in index c8da4a790b..d134aad794 100644 --- a/doc/templates/conf.py.cn.in +++ b/doc/templates/conf.py.cn.in @@ -18,7 +18,6 @@ import shlex from recommonmark import parser, transform import paddle import paddle.v2 -import paddle.fluid MarkdownParser = parser.CommonMarkParser AutoStructify = transform.AutoStructify diff --git a/doc/templates/conf.py.en.in b/doc/templates/conf.py.en.in index a4cb2b7170..1f057d2e83 100644 --- a/doc/templates/conf.py.en.in +++ b/doc/templates/conf.py.en.in @@ -18,7 +18,6 @@ import shlex from recommonmark import parser, transform import paddle import paddle.v2 -import paddle.fluid MarkdownParser = parser.CommonMarkParser -- GitLab From a779b424507b85f6f47d6d96222474d983806179 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Tue, 27 Feb 2018 22:46:50 +0800 Subject: [PATCH 211/217] follow comments --- paddle/fluid/operators/conv_op.cc | 6 ------ paddle/fluid/operators/conv_op.h | 9 ++++++++- paddle/fluid/operators/pool_op.cc | 5 +++++ 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index f703b4cac1..83b7708bf3 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -54,12 +54,6 @@ void ConvOp::InferShape(framework::InferShapeContext* ctx) const { std::vector output_shape({in_dims[0], filter_dims[0]}); for (size_t i = 0; i < strides.size(); ++i) { - PADDLE_ENFORCE(in_dims[i + 2] + 2 * paddings[i] - - (dilations[i] * (filter_dims[i + 2] - 1) + 1) >= - 0, - "Due to the settings of paddings, filter_dims and " - "dilations, the output size is less than 0, please check " - "again."); output_shape.push_back(ConvOutputSize(in_dims[i + 2], filter_dims[i + 2], dilations[i], paddings[i], strides[i])); diff --git a/paddle/fluid/operators/conv_op.h b/paddle/fluid/operators/conv_op.h index c93c2e73f7..12b45f1d65 100644 --- a/paddle/fluid/operators/conv_op.h +++ b/paddle/fluid/operators/conv_op.h @@ -31,7 +31,14 @@ using Tensor = framework::Tensor; inline int ConvOutputSize(int input_size, int filter_size, int dilation, int padding, int stride) { const int dkernel = dilation * (filter_size - 1) + 1; - const int output_size = (input_size + 2 * padding - dkernel) / stride + 1; + int output_size = (input_size + 2 * padding - dkernel) / stride + 1; + PADDLE_ENFORCE( + output_size > 0, + "Due to the settings of padding(%d), filter_size(%d), dilation(%d) and " + "stride(%d), the output size is less than 0, please check " + "again. Input_size:%d", + padding, filter_size, dilation, stride, input_size); + return output_size; } inline bool IsExpand(std::vector& filter_dim, diff --git a/paddle/fluid/operators/pool_op.cc b/paddle/fluid/operators/pool_op.cc index c7729ad132..a87a3511ee 100644 --- a/paddle/fluid/operators/pool_op.cc +++ b/paddle/fluid/operators/pool_op.cc @@ -19,6 +19,11 @@ namespace operators { int PoolOutputSize(int input_size, int filter_size, int padding, int stride) { int output_size = (input_size - filter_size + 2 * padding) / stride + 1; + PADDLE_ENFORCE(output_size > 0, + "Due to the settings of padding(%d), filter_size(%d) and " + "stride(%d), the output size is less than 0, please check " + "again. Input_size:%d", + padding, filter_size, stride, input_size); return output_size; } -- GitLab From 97a132d5a9a4432996c6fe0b7093e2f5d1ae329e Mon Sep 17 00:00:00 2001 From: gongweibao Date: Wed, 28 Feb 2018 02:18:00 +0000 Subject: [PATCH 212/217] init --- doc/howto/cluster/multi_cluster/index_cn.rst | 24 ++++++++++++++------ 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/doc/howto/cluster/multi_cluster/index_cn.rst b/doc/howto/cluster/multi_cluster/index_cn.rst index ef56b6ddb3..11f49d619f 100644 --- a/doc/howto/cluster/multi_cluster/index_cn.rst +++ b/doc/howto/cluster/multi_cluster/index_cn.rst @@ -1,14 +1,24 @@ -在不同集群中运行 +集群训练 ================ +用户的集群环境不尽相同,为了方便大家的部署,我们提供了多种的集群部署方式,方便提交集群训练任务,以下将一一介绍: -PaddlePaddle可以使用多种分布式计算平台构建分布式计算任务,包括: -- `Kubernetes `_ Google开源的容器集群的调度框架,支持大规模集群生产环境的完整集群方案。 -- `OpenMPI `_ 成熟的高性能并行计算框架。 -- `Fabric `_ 集群管理工具。可以使用`Fabric`编写集群任务提交和管理脚本。 +`Kubernetes `_ 是Google开源的容器集群的调度框架,支持大规模集群生产环境的完整集群方案。以下指南展示了PaddlePaddle对Kubernetes的支持: -对于不同的集群平台,会分别介绍集群作业的启动和停止方法。这些例子都可以在 `cluster_train_v2 `_ 找到。 +- `Kubernetes单机训练 `_ +- `Kubernetes分布式训练 `_ + +`OpenMPI `_ 是成熟的高性能并行计算框架,在HPC领域使用非常的广泛。以下指南介绍了如何使用OpenMPI来搭建PaddlePaddle的集群训练任务: + +- `在OpenMPI集群中提交训练作业 `_ + +`Fabric `_ 是一个方便的程序部署和管理工具。我们提供了使用Fabric 进行部署、管理的方法,如果想详细了解,请阅读以下指南: + +- `使用fabric启动集群训练 `_ + +我们也支持在AWS上部署PaddlePaddle,详细请了解: + +- `Distributed PaddlePaddle Training on AWS with Kubernetes `_ -在使用分布式计算平台进行训练时,任务被调度在集群中时,分布式计算平台通常会通过API或者环境变量提供任务运行需要的参数,比如节点的ID、IP和任务节点个数等。 .. toctree:: :maxdepth: 1 -- GitLab From f449180b1cae6731b5ae54ab49d812daceb477fc Mon Sep 17 00:00:00 2001 From: qingqing01 Date: Wed, 28 Feb 2018 10:47:42 +0800 Subject: [PATCH 213/217] Register more data type for reshape operator. (#8617) --- paddle/fluid/operators/reshape_op.cc | 13 +++++++++---- paddle/fluid/operators/reshape_op.cu | 16 ++++++++++------ 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/paddle/fluid/operators/reshape_op.cc b/paddle/fluid/operators/reshape_op.cc index a90ffb4ff3..3580932356 100644 --- a/paddle/fluid/operators/reshape_op.cc +++ b/paddle/fluid/operators/reshape_op.cc @@ -121,10 +121,15 @@ class ReshapeGradOp : public framework::OperatorWithKernel { } // namespace operators } // namespace paddle namespace ops = paddle::operators; +using CPU = paddle::platform::CPUDeviceContext; REGISTER_OP(reshape, ops::ReshapeOp, ops::ReshapeOpMaker, reshape_grad, ops::ReshapeGradOp); -REGISTER_OP_CPU_KERNEL(reshape, - ops::ReshapeKernel); -REGISTER_OP_CPU_KERNEL( - reshape_grad, ops::ReshapeGradKernel); +REGISTER_OP_CPU_KERNEL(reshape, ops::ReshapeKernel, + ops::ReshapeKernel, + ops::ReshapeKernel, + ops::ReshapeKernel); +REGISTER_OP_CPU_KERNEL(reshape_grad, ops::ReshapeGradKernel, + ops::ReshapeGradKernel, + ops::ReshapeGradKernel, + ops::ReshapeGradKernel); diff --git a/paddle/fluid/operators/reshape_op.cu b/paddle/fluid/operators/reshape_op.cu index d5ceaf784c..c628c634e2 100644 --- a/paddle/fluid/operators/reshape_op.cu +++ b/paddle/fluid/operators/reshape_op.cu @@ -13,10 +13,14 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/reshape_op.h" +using CUDA = paddle::platform::CUDADeviceContext; -REGISTER_OP_CUDA_KERNEL( - reshape, - paddle::operators::ReshapeKernel); -REGISTER_OP_CUDA_KERNEL( - reshape_grad, - paddle::operators::ReshapeGradKernel); +REGISTER_OP_CUDA_KERNEL(reshape, paddle::operators::ReshapeKernel, + paddle::operators::ReshapeKernel, + paddle::operators::ReshapeKernel, + paddle::operators::ReshapeKernel); +REGISTER_OP_CUDA_KERNEL(reshape_grad, + paddle::operators::ReshapeGradKernel, + paddle::operators::ReshapeGradKernel, + paddle::operators::ReshapeGradKernel, + paddle::operators::ReshapeGradKernel); -- GitLab From 4ef4bd8e8eea6aa260d6cdd7661537b03cd35aaf Mon Sep 17 00:00:00 2001 From: gongweibao Date: Wed, 28 Feb 2018 02:56:43 +0000 Subject: [PATCH 214/217] clean up --- doc/howto/cluster/multi_cluster/index_cn.rst | 27 ++++++++++++-------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/doc/howto/cluster/multi_cluster/index_cn.rst b/doc/howto/cluster/multi_cluster/index_cn.rst index 11f49d619f..e98aa1c3c7 100644 --- a/doc/howto/cluster/multi_cluster/index_cn.rst +++ b/doc/howto/cluster/multi_cluster/index_cn.rst @@ -4,27 +4,32 @@ `Kubernetes `_ 是Google开源的容器集群的调度框架,支持大规模集群生产环境的完整集群方案。以下指南展示了PaddlePaddle对Kubernetes的支持: -- `Kubernetes单机训练 `_ -- `Kubernetes分布式训练 `_ +.. toctree:: + :maxdepth: 1 + + k8s_cn.md + k8s_distributed_cn.md `OpenMPI `_ 是成熟的高性能并行计算框架,在HPC领域使用非常的广泛。以下指南介绍了如何使用OpenMPI来搭建PaddlePaddle的集群训练任务: -- `在OpenMPI集群中提交训练作业 `_ +.. toctree:: + :maxdepth: 1 -`Fabric `_ 是一个方便的程序部署和管理工具。我们提供了使用Fabric 进行部署、管理的方法,如果想详细了解,请阅读以下指南: + openmpi_cn.md -- `使用fabric启动集群训练 `_ +`Fabric `_ 是一个方便的程序部署和管理工具。我们提供了使用Fabric 进行部署、管理的方法,如果想详细了解,请阅读以下指南: -我们也支持在AWS上部署PaddlePaddle,详细请了解: +.. toctree:: + :maxdepth: 1 -- `Distributed PaddlePaddle Training on AWS with Kubernetes `_ + fabric_cn.md +我们也支持在AWS上部署PaddlePaddle,详细请了解: .. toctree:: :maxdepth: 1 - fabric_cn.md - openmpi_cn.md - k8s_cn.md - k8s_distributed_cn.md k8s_aws_cn.md + +您可以在 `cluster_train_v2 `_ 找到以上相关的例子。 + -- GitLab From 6f4c2f0a4689f55b290d52d169089389e6eca6be Mon Sep 17 00:00:00 2001 From: gongweibao Date: Wed, 28 Feb 2018 03:22:00 +0000 Subject: [PATCH 215/217] follow comments --- doc/howto/cluster/multi_cluster/index_cn.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/howto/cluster/multi_cluster/index_cn.rst b/doc/howto/cluster/multi_cluster/index_cn.rst index e98aa1c3c7..eabf95eda0 100644 --- a/doc/howto/cluster/multi_cluster/index_cn.rst +++ b/doc/howto/cluster/multi_cluster/index_cn.rst @@ -1,4 +1,4 @@ -集群训练 +在不同集群中运行 ================ 用户的集群环境不尽相同,为了方便大家的部署,我们提供了多种的集群部署方式,方便提交集群训练任务,以下将一一介绍: -- GitLab From f0548679b3e3f1194591e80af8a4114dc7da10e5 Mon Sep 17 00:00:00 2001 From: Yiqun Liu Date: Wed, 28 Feb 2018 11:45:11 +0800 Subject: [PATCH 216/217] Get rid of the dependency of Go compiler when WITH_GOLANG is OFF. (#8610) * Get rid of the dependency of Go compiler when WITH_GOLANG is OFF. * Minor modification to the documentation. * Remove the downloading and installing of Go compiler when build developing Docker image for android. * Minor modifcation to the documentation's format. * Change the default value of WITH_GOLANG to OFF in paddle/scripts/docker/build.sh. --- CMakeLists.txt | 11 +++++---- Dockerfile.android | 10 -------- .../build_from_source_cn.rst | 2 +- .../build_from_source_en.rst | 2 +- doc/mobile/cross_compiling_for_android_cn.md | 24 +++++++++++++------ doc/mobile/cross_compiling_for_android_en.md | 22 ++++++++++------- paddle/scripts/docker/README.md | 2 +- paddle/scripts/docker/build.sh | 4 ++-- 8 files changed, 42 insertions(+), 35 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c5552d6ec9..a2f440c2d0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -19,7 +19,7 @@ set(PADDLE_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}) include(system) -project(paddle CXX C Go) +project(paddle CXX C) message(STATUS "CXX compiler: ${CMAKE_CXX_COMPILER}, version: " "${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION}") message(STATUS "C compiler: ${CMAKE_C_COMPILER}, version: " @@ -175,7 +175,7 @@ set(EXTERNAL_LIBS ) if(WITH_GPU) - include(cuda) + include(cuda) endif(WITH_GPU) if(WITH_MKLML) @@ -202,17 +202,18 @@ endif() # "add_subdirectory(paddle)" and "add_subdirectory(python)" should be # placed after this block, because they depends on it. if(WITH_GOLANG) + enable_language(Go) add_subdirectory(go) endif(WITH_GOLANG) set(PADDLE_PYTHON_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/python/build") -SET(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O3 -g -DNDEBUG") -SET(CMAKE_C_FLAGS_RELWITHDEBINFO "-O3 -g -DNDEBUG") +set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O3 -g -DNDEBUG") +set(CMAKE_C_FLAGS_RELWITHDEBINFO "-O3 -g -DNDEBUG") add_subdirectory(paddle) if(WITH_PYTHON) - add_subdirectory(python) + add_subdirectory(python) endif() if(WITH_DOC) diff --git a/Dockerfile.android b/Dockerfile.android index 9d13a414f6..cc022d596b 100644 --- a/Dockerfile.android +++ b/Dockerfile.android @@ -21,16 +21,6 @@ RUN apt-get update && \ wget curl tar unzip gcc g++ locales clang-format-3.8 swig cmake && \ apt-get clean -y -# Install Go and glide -RUN wget -qO- go.tgz https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz | \ - tar -xz -C /usr/local && \ - mkdir /root/gopath && \ - mkdir /root/gopath/bin && \ - mkdir /root/gopath/src -ENV GOROOT=/usr/local/go GOPATH=/root/gopath -# should not be in the same line with GOROOT definition, otherwise docker build could not find GOROOT. -ENV PATH=${PATH}:${GOROOT}/bin:${GOPATH}/bin - # git credential to skip password typing RUN git config --global credential.helper store diff --git a/doc/build_and_install/build_from_source_cn.rst b/doc/build_and_install/build_from_source_cn.rst index fec2d412f0..cb766c3838 100644 --- a/doc/build_and_install/build_from_source_cn.rst +++ b/doc/build_and_install/build_from_source_cn.rst @@ -189,7 +189,7 @@ PaddlePaddle的编译选项,包括生成CPU/GPU二进制文件、链接何种B "WITH_TESTING", "是否开启单元测试", "OFF" "WITH_DOC", "是否编译中英文文档", "OFF" "WITH_SWIG_PY", "是否编译PYTHON的SWIG接口,该接口可用于预测和定制化训练", "Auto" - "WITH_GOLANG", "是否编译go语言的可容错parameter server", "ON" + "WITH_GOLANG", "是否编译go语言的可容错parameter server", "OFF" "WITH_MKL", "是否使用MKL数学库,如果为否则是用OpenBLAS", "ON" BLAS diff --git a/doc/build_and_install/build_from_source_en.rst b/doc/build_and_install/build_from_source_en.rst index 29a1439e4c..556cbfdf08 100644 --- a/doc/build_and_install/build_from_source_en.rst +++ b/doc/build_and_install/build_from_source_en.rst @@ -191,7 +191,7 @@ You can add :code:`-D` argument to pass such options, like: "WITH_TESTING", "Build unit tests", "OFF" "WITH_DOC", "Build documentations", "OFF" "WITH_SWIG_PY", "Build Python SWIG interface for V2 API", "Auto" - "WITH_GOLANG", "Build fault-tolerant parameter server written in go", "ON" + "WITH_GOLANG", "Build fault-tolerant parameter server written in go", "OFF" "WITH_MKL", "Use MKL as BLAS library, else use OpenBLAS", "ON" diff --git a/doc/mobile/cross_compiling_for_android_cn.md b/doc/mobile/cross_compiling_for_android_cn.md index ae24ced770..cdd6917239 100644 --- a/doc/mobile/cross_compiling_for_android_cn.md +++ b/doc/mobile/cross_compiling_for_android_cn.md @@ -23,6 +23,12 @@ $ docker build -t username/paddle-android:dev . -f Dockerfile.android $ docker pull paddlepaddle/paddle:latest-dev-android ``` +对于国内用户,我们提供了加速访问的镜像源: + +```bash +$ docker pull docker.paddlepaddlehub.com/paddle:latest-dev-android +``` + ### 编译PaddlePaddle C-API库 构建好开发镜像后,即可使用开发镜像来编译Android版PaddlePaddle C-API库。 Android的Docker开发镜像向用户提供两个可配置的参数: @@ -56,15 +62,15 @@ Android的Docker开发镜像向用户提供两个可配置的参数: - 编译`armeabi-v7a`,`Android API 21`的PaddlePaddle库 - ```bash - $ docker run -it --rm -v $PWD:/paddle -e "ANDROID_ABI=armeabi-v7a" -e "ANDROID_API=21" username/paddle-android:dev - ``` +```bash +$ docker run -it --rm -v $PWD:/paddle -e "ANDROID_ABI=armeabi-v7a" -e "ANDROID_API=21" username/paddle-android:dev +``` - 编译`arm64-v8a`,`Android API 21`的PaddlePaddle库 - ```bash - $ docker run -it --rm -v $PWD:/paddle -e "ANDROID_ABI=arm64-v8a" -e "ANDROID_API=21" username/paddle-android:dev - ``` +```bash +$ docker run -it --rm -v $PWD:/paddle -e "ANDROID_ABI=arm64-v8a" -e "ANDROID_API=21" username/paddle-android:dev +``` 执行上述`docker run`命令时,容器默认执行[paddle/scripts/docker/build_android.sh](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/scripts/docker/build_android.sh)脚本。该脚本中记录了交叉编译Android版PaddlePaddle库常用的CMake配置,并且会根据`ANDROID_ABI`和`ANDROID_API`自动构建独立工具链、进行编译和安装。由于arm64架构要求Android API不小于21。因此当`ANDROID_ABI=arm64-v8a`,`ANDROID_API<21`时,Docker容器中将默认使用`Android API 21`的编译工具链。用户可以参考下文[配置交叉编译参数](#配置交叉编译参数)章节,根据个人的需求修改定制Docker容器所执行的脚本。编译安装结束之后,PaddlePaddle的C-API库将被安装到`$PWD/install_android`目录,所依赖的第三方库同时也被安装到`$PWD/install_android/third_party`目录。 @@ -155,7 +161,11 @@ cmake -DCMAKE_SYSTEM_NAME=Android \ .. ``` -用户还可根据自己的需求设置其他编译参数。比如希望最小化生成的库的大小,可以设置`CMAKE_BUILD_TYPE`为`MinSizeRel`;若希望最快的执行速度,则可设置`CMAKE_BUILD_TYPE`为`Release`。亦可以通过手动设置`CMAKE_C/CXX_FLAGS`来影响PaddlePaddle的编译过程。 +用户还可根据自己的需求设置其他编译参数。 + +- 设置`CMAKE_BUILD_TYPE`为`MinSizeRel`,最小化生成的库的大小。 +- 设置`CMAKE_BUILD_TYPE`为`Release`,获得最快的执行速度, +- 用户亦可以通过手动设置`CMAKE_C/CXX_FLAGS`来影响PaddlePaddle的编译过程。 **性能TIPS**,为了达到最快的计算速度,在CMake参数配置上,有以下建议: diff --git a/doc/mobile/cross_compiling_for_android_en.md b/doc/mobile/cross_compiling_for_android_en.md index 0cf50181df..6af16fc114 100644 --- a/doc/mobile/cross_compiling_for_android_en.md +++ b/doc/mobile/cross_compiling_for_android_en.md @@ -25,6 +25,12 @@ Users can directly use the published Docker image. $ docker pull paddlepaddle/paddle:latest-dev-android ``` +For users in China, we provide a faster mirror. + +```bash +$ docker pull docker.paddlepaddlehub.com/paddle:latest-dev-android +``` + ### Build the Inference Library We can run the Docker image we just created to build the inference library of PaddlePaddle for Android using the command below: @@ -86,19 +92,19 @@ Android NDK includes everything we need to build the [*standalone toolchain*](ht - To build the standalone toolchain for `armeabi-v7a` and Android API level 21: - ```bash - your/path/to/android-ndk-r14b-linux-x86_64/build/tools/make-standalone-toolchain.sh \ - --arch=arm --platform=android-21 --install-dir=your/path/to/arm_standalone_toolchain - ``` +```bash +your/path/to/android-ndk-r14b-linux-x86_64/build/tools/make-standalone-toolchain.sh \ + --arch=arm --platform=android-21 --install-dir=your/path/to/arm_standalone_toolchain +``` The generated standalone toolchain will be in `your/path/to/arm_standalone_toolchain`. - To build the standalone toolchain for `arm64-v8a` and Android API level 21: - ```bash - your/path/to/android-ndk-r14b-linux-x86_64/build/tools/make-standalone-toolchain.sh \ - --arch=arm64 --platform=android-21 --install-dir=your/path/to/arm64_standalone_toolchain - ``` +```bash +your/path/to/android-ndk-r14b-linux-x86_64/build/tools/make-standalone-toolchain.sh \ + --arch=arm64 --platform=android-21 --install-dir=your/path/to/arm64_standalone_toolchain +``` The generated standalone toolchain will be in `your/path/to/arm64_standalone_toolchain`. diff --git a/paddle/scripts/docker/README.md b/paddle/scripts/docker/README.md index 65c4674555..78c0cc3782 100644 --- a/paddle/scripts/docker/README.md +++ b/paddle/scripts/docker/README.md @@ -58,7 +58,7 @@ Users can specify the following Docker build arguments with either "ON" or "OFF" | `WITH_AVX` | OFF | Set to "ON" to enable AVX support. | | `WITH_TESTING` | OFF | Build unit tests binaries. | | `WITH_MKL` | ON | Build with [Intel® MKL](https://software.intel.com/en-us/mkl) and [Intel® MKL-DNN](https://github.com/01org/mkl-dnn) support. | -| `WITH_GOLANG` | ON | Build fault-tolerant parameter server written in go. | +| `WITH_GOLANG` | OFF | Build fault-tolerant parameter server written in go. | | `WITH_SWIG_PY` | ON | Build with SWIG python API support. | | `WITH_C_API` | OFF | Build capi libraries for inference. | | `WITH_PYTHON` | ON | Build with python support. Turn this off if build is only for capi. | diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index 2220a593b3..06319fc638 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -40,7 +40,7 @@ function cmake_gen() { -DWITH_DISTRIBUTE=${WITH_DISTRIBUTE:-OFF} -DWITH_MKL=${WITH_MKL:-ON} -DWITH_AVX=${WITH_AVX:-OFF} - -DWITH_GOLANG=${WITH_GOLANG:-ON} + -DWITH_GOLANG=${WITH_GOLANG:-OFF} -DCUDA_ARCH_NAME=${CUDA_ARCH_NAME:-All} -DWITH_SWIG_PY=ON -DWITH_C_API=${WITH_C_API:-OFF} @@ -65,7 +65,7 @@ EOF -DWITH_DISTRIBUTE=${WITH_DISTRIBUTE:-OFF} \ -DWITH_MKL=${WITH_MKL:-ON} \ -DWITH_AVX=${WITH_AVX:-OFF} \ - -DWITH_GOLANG=${WITH_GOLANG:-ON} \ + -DWITH_GOLANG=${WITH_GOLANG:-OFF} \ -DCUDA_ARCH_NAME=${CUDA_ARCH_NAME:-All} \ -DWITH_SWIG_PY=${WITH_SWIG_PY:-ON} \ -DWITH_C_API=${WITH_C_API:-OFF} \ -- GitLab From 69643b5e892aee5df4f32911e343093b092e7690 Mon Sep 17 00:00:00 2001 From: qingqing01 Date: Wed, 28 Feb 2018 13:45:32 +0800 Subject: [PATCH 217/217] Enable the SSD loss to support normalization by the total number of output locations. (#8630) * Register more data type for reshape operator. * Enable the SSD loss to support normalization by the total number of output locations. * Fix the doc format. --- python/paddle/fluid/layers/detection.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/python/paddle/fluid/layers/detection.py b/python/paddle/fluid/layers/detection.py index fff64a57a4..04c900b829 100644 --- a/python/paddle/fluid/layers/detection.py +++ b/python/paddle/fluid/layers/detection.py @@ -328,6 +328,7 @@ def ssd_loss(location, conf_loss_weight=1.0, match_type='per_prediction', mining_type='max_negative', + normalize=True, sample_size=None): """ **Multi-box loss layer for object dection algorithm of SSD** @@ -376,18 +377,20 @@ def ssd_loss(location, `overlap_threshold` to determine the extra matching bboxes when finding matched boxes. 0.5 by default. neg_pos_ratio (float): The ratio of the negative boxes to the positive - boxes, used only when mining_type is max_negative, 3.0 by defalut. + boxes, used only when mining_type is 'max_negative', 3.0 by defalut. neg_overlap (float): The negative overlap upper bound for the unmatched - predictions. Use only when mining_type is max_negative, + predictions. Use only when mining_type is 'max_negative', 0.5 by default. - sample_size (int): The max sample size of negative box, used only when - mining_type is hard_example. loc_loss_weight (float): Weight for localization loss, 1.0 by default. conf_loss_weight (float): Weight for confidence loss, 1.0 by default. match_type (str): The type of matching method during training, should be 'bipartite' or 'per_prediction', 'per_prediction' by defalut. mining_type (str): The hard example mining type, should be 'hard_example' or 'max_negative', now only support `max_negative`. + normalize (bool): Whether to normalize the SSD loss by the total number + of output locations, True by defalut. + sample_size (int): The max sample size of negative box, used only when + mining_type is 'hard_example'. Returns: Variable: The weighted sum of the localization loss and confidence loss, @@ -507,6 +510,13 @@ def ssd_loss(location, # 5.3 Compute overall weighted loss. loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss + # reshape to [N, Np], N is the batch size and Np is the prior box number. + loss = ops.reshape(x=loss, shape=[-1, num_prior]) + loss = nn.reduce_sum(loss, dim=1, keep_dim=True) + if normalize: + normalizer = nn.reduce_sum(target_loc_weight) + loss = loss / normalizer + return loss -- GitLab