From 21ea5425ccc7e1f8d848935a41a05b418ba60c65 Mon Sep 17 00:00:00 2001 From: liuwei1031 <46661762+liuwei1031@users.noreply.github.com> Date: Wed, 17 Apr 2019 19:37:55 +0800 Subject: [PATCH] test=Release/1.4, merge security issue fix (#16942) * Security issue (#16774) * disable memory_optimize and inpalce strategy by default, test=develop * fix security issue http://newicafe.baidu.com:80/issue/PaddleSec-3/show?from=page http://newicafe.baidu.com:80/issue/PaddleSec-8/show?from=page http://newicafe.baidu.com:80/issue/PaddleSec-12/show?from=page http://newicafe.baidu.com:80/issue/PaddleSec-32/show?from=page http://newicafe.baidu.com:80/issue/PaddleSec-35/show?from=page http://newicafe.baidu.com:80/issue/PaddleSec-37/show?from=page http://newicafe.baidu.com:80/issue/PaddleSec-40/show?from=page http://newicafe.baidu.com:80/issue/PaddleSec-43/show?from=page http://newicafe.baidu.com:80/issue/PaddleSec-44/show?from=page http://newicafe.baidu.com:80/issue/PaddleSec-45/show?from=page test=develop * revert piece.cc, test=develop * adjust api.cc,test=develop * fix overflow by int32 mul test=develop (#16794) * fix overflow by int32 mul test=develop * fix reference nullptr * fix codestyle test=develop * modify to point in ContextProjectFunctor test=develop * modify to point in ContextProjectFunctor test=develop * modify . to -> test=develop * test=release/1.4 cherry-pick (#16783) (#16794) (#16774) fix security issue --- paddle/fluid/framework/op_desc.cc | 1 + .../fluid/inference/api/analysis_predictor.cc | 3 ++ paddle/fluid/inference/api/api.cc | 1 + paddle/fluid/inference/api/api_impl.cc | 5 ++ .../tests/api/analyzer_seq_conv1_tester.cc | 1 + paddle/fluid/operators/affine_grid_op.h | 12 +++-- paddle/fluid/operators/detection/gpc.cc | 5 ++ paddle/fluid/operators/math/context_project.h | 7 +-- .../operators/sequence_ops/sequence_conv_op.h | 8 ++-- .../fluid/operators/squared_l2_distance_op.h | 48 +++++++++---------- 10 files changed, 56 insertions(+), 35 deletions(-) diff --git a/paddle/fluid/framework/op_desc.cc b/paddle/fluid/framework/op_desc.cc index 353db4352..c68e2db8b 100644 --- a/paddle/fluid/framework/op_desc.cc +++ b/paddle/fluid/framework/op_desc.cc @@ -241,6 +241,7 @@ OpDesc::OpDesc(const std::string &type, const VariableNameMap &inputs, outputs_ = outputs; attrs_ = attrs; need_update_ = true; + block_ = nullptr; } OpDesc::OpDesc(const OpDesc &other, BlockDesc *block) { diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index a9d8113a7..c7899a733 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -259,6 +259,9 @@ bool AnalysisPredictor::SetFeed(const std::vector &inputs, return false; } + PADDLE_ENFORCE_NOT_NULL(input_ptr); + PADDLE_ENFORCE_NOT_NULL(inputs[i].data.data()); + if (platform::is_cpu_place(place_)) { // TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy. std::memcpy(static_cast(input_ptr), inputs[i].data.data(), diff --git a/paddle/fluid/inference/api/api.cc b/paddle/fluid/inference/api/api.cc index 7d57b6ec7..fc2d7b48c 100644 --- a/paddle/fluid/inference/api/api.cc +++ b/paddle/fluid/inference/api/api.cc @@ -54,6 +54,7 @@ PaddleBuf &PaddleBuf::operator=(const PaddleBuf &other) { memory_owned_ = other.memory_owned_; } else { Resize(other.length()); + PADDLE_ENFORCE(!(other.length() > 0 && other.data() == nullptr)); memcpy(data_, other.data(), other.length()); length_ = other.length(); memory_owned_ = true; diff --git a/paddle/fluid/inference/api/api_impl.cc b/paddle/fluid/inference/api/api_impl.cc index 54f40563c..56996c5cf 100644 --- a/paddle/fluid/inference/api/api_impl.cc +++ b/paddle/fluid/inference/api/api_impl.cc @@ -169,6 +169,7 @@ std::unique_ptr NativePaddlePredictor::Clone() { std::unique_ptr cls(new NativePaddlePredictor(config_)); // Hot fix the bug that result diff in multi-thread. // TODO(Superjomn) re-implement a real clone here. + PADDLE_ENFORCE_NOT_NULL(dynamic_cast(cls.get())); if (!dynamic_cast(cls.get())->Init(nullptr)) { LOG(ERROR) << "fail to call Init"; return nullptr; @@ -210,6 +211,8 @@ bool NativePaddlePredictor::SetFeed(const std::vector &inputs, return false; } + PADDLE_ENFORCE_NOT_NULL(input_ptr); + PADDLE_ENFORCE_NOT_NULL(inputs[i].data.data()); if (platform::is_cpu_place(place_)) { // TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy. std::memcpy(static_cast(input_ptr), inputs[i].data.data(), @@ -316,6 +319,8 @@ std::unique_ptr CreatePaddlePredictor< } std::unique_ptr predictor(new NativePaddlePredictor(config)); + PADDLE_ENFORCE_NOT_NULL( + dynamic_cast(predictor.get())); if (!dynamic_cast(predictor.get())->Init(nullptr)) { return nullptr; } diff --git a/paddle/fluid/inference/tests/api/analyzer_seq_conv1_tester.cc b/paddle/fluid/inference/tests/api/analyzer_seq_conv1_tester.cc index 9f23b9f03..5ee848c3c 100644 --- a/paddle/fluid/inference/tests/api/analyzer_seq_conv1_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_seq_conv1_tester.cc @@ -47,6 +47,7 @@ struct DataRecord { num_lines++; std::vector data; split(line, '\t', &data); + PADDLE_ENFORCE(data.size() >= 4); // load title1 data std::vector title1_data; split_to_int64(data[0], ' ', &title1_data); diff --git a/paddle/fluid/operators/affine_grid_op.h b/paddle/fluid/operators/affine_grid_op.h index 87d238314..73df8a38b 100644 --- a/paddle/fluid/operators/affine_grid_op.h +++ b/paddle/fluid/operators/affine_grid_op.h @@ -121,9 +121,11 @@ class AffineGridOpKernel : public framework::OpKernel { // TODO(wanghaoshuang): Refine batched matrix multiply auto blas = math::GetBlas(ctx); for (int i = 0; i < n; ++i) { - Tensor sliced_grid = grid.Slice(i, i + 1).Resize({h * w, 3}); + Tensor sliced_grid = grid.Slice(i, i + 1).Resize( + {static_cast(h) * static_cast(w), 3}); Tensor sliced_theta = theta->Slice(i, i + 1).Resize({2, 3}); - Tensor sliced_out = output->Slice(i, i + 1).Resize({h * w, 2}); + Tensor sliced_out = output->Slice(i, i + 1).Resize( + {static_cast(h) * static_cast(w), 2}); blas.MatMul(sliced_grid, false, sliced_theta, true, T(1), &sliced_out, T(0)); } @@ -161,8 +163,10 @@ class AffineGridGradOpKernel : public framework::OpKernel { // TODO(wanghaoshuang): Refine batched matrix multiply auto blas = math::GetBlas(ctx); for (int i = 0; i < n; ++i) { - Tensor sliced_grid = grid.Slice(i, i + 1).Resize({h * w, 3}); - Tensor sliced_out_grad = output_grad->Slice(i, i + 1).Resize({h * w, 2}); + Tensor sliced_grid = grid.Slice(i, i + 1).Resize( + {static_cast(h) * static_cast(w), 3}); + Tensor sliced_out_grad = output_grad->Slice(i, i + 1).Resize( + {static_cast(h) * static_cast(w), 2}); Tensor sliced_theta_grad = theta_grad->Slice(i, i + 1).Resize({2, 3}); blas.MatMul(sliced_out_grad, true, sliced_grid, false, T(1), &sliced_theta_grad, T(0)); diff --git a/paddle/fluid/operators/detection/gpc.cc b/paddle/fluid/operators/detection/gpc.cc index 7c0823c04..f46aaf7d0 100644 --- a/paddle/fluid/operators/detection/gpc.cc +++ b/paddle/fluid/operators/detection/gpc.cc @@ -24,6 +24,7 @@ **/ #include "paddle/fluid/operators/detection/gpc.h" +#include "paddle/fluid/platform/enforce.h" namespace gpc { @@ -689,6 +690,7 @@ static bbox *create_contour_bboxes(gpc_polygon *p) { gpc_malloc(box, p->num_contours * sizeof(bbox), const_cast("Bounding box creation")); + PADDLE_ENFORCE_NOT_NULL(box); /* Construct contour bounding boxes */ for (c = 0; c < p->num_contours; c++) { @@ -852,6 +854,7 @@ void gpc_add_contour(gpc_polygon *p, gpc_vertex_list *new_contour, int hole) { /* Create an extended hole array */ gpc_malloc(extended_hole, (p->num_contours + 1) * sizeof(int), const_cast("contour hole addition")); + PADDLE_ENFORCE_NOT_NULL(extended_hole); /* Create an extended contour array */ gpc_malloc(extended_contour, @@ -969,6 +972,7 @@ void gpc_polygon_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip, /* Build scanbeam table from scanbeam tree */ gpc_malloc(sbt, sbt_entries * sizeof(double), const_cast("sbt creation")); + PADDLE_ENFORCE_NOT_NULL(sbt); build_sbt(&scanbeam, sbt, sbtree); scanbeam = 0; free_sbtree(&sbtree); @@ -1604,6 +1608,7 @@ void gpc_tristrip_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip, /* Build scanbeam table from scanbeam tree */ gpc_malloc(sbt, sbt_entries * sizeof(double), const_cast("sbt creation")); + PADDLE_ENFORCE_NOT_NULL(sbt); build_sbt(&scanbeam, sbt, sbtree); scanbeam = 0; free_sbtree(&sbtree); diff --git a/paddle/fluid/operators/math/context_project.h b/paddle/fluid/operators/math/context_project.h index bc0df3f35..d6a4793a8 100644 --- a/paddle/fluid/operators/math/context_project.h +++ b/paddle/fluid/operators/math/context_project.h @@ -87,7 +87,7 @@ template class ContextProjectFunctor { public: void operator()(const DeviceContext& context, const LoDTensor& in, - const Tensor& padding_data, bool padding_trainable, + const Tensor* padding_data, bool padding_trainable, const int context_start, const int context_length, const int context_stride, const int up_pad, const int down_pad, Tensor* col) { @@ -132,6 +132,7 @@ class ContextProjectFunctor { } } if (padding_trainable) { + PADDLE_ENFORCE_NOT_NULL(padding_data); for (int i = 0; i < static_cast(lod_level_0.size()) - 1; ++i) { Tensor out_t = col->Slice(static_cast(lod_level_0[i]), static_cast(lod_level_0[i + 1])); @@ -150,7 +151,7 @@ class ContextProjectFunctor { k + context_length < up_pad ? context_length : up_pad - k; Tensor out_t_sub = out_t.Slice(k * context_length, k * context_length + padding_size); - Tensor w_sub = padding_data.Slice(k, k + padding_size); + Tensor w_sub = padding_data->Slice(k, k + padding_size); framework::TensorCopy(w_sub, context.GetPlace(), context, &out_t_sub); } @@ -180,7 +181,7 @@ class ContextProjectFunctor { Tensor out_t_sub = out_t.Slice( (down_pad_begin_row + t) * context_length - padding_size, (down_pad_begin_row + t) * context_length); - Tensor w_sub = padding_data.Slice( + Tensor w_sub = padding_data->Slice( up_pad + padding_idx, up_pad + padding_idx + padding_size); framework::TensorCopy(w_sub, context.GetPlace(), context, &out_t_sub); diff --git a/paddle/fluid/operators/sequence_ops/sequence_conv_op.h b/paddle/fluid/operators/sequence_ops/sequence_conv_op.h index ee70281d5..3a2c9e3f7 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_conv_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_conv_op.h @@ -49,7 +49,7 @@ class SequenceConvKernel : public framework::OpKernel { int up_pad = std::max(0, -context_start); int down_pad = std::max(0, context_start + context_length - 1); - int sequence_width = static_cast(in->dims()[1]); + auto sequence_width = static_cast(in->dims()[1]); framework::DDim col_shape = {in->dims()[0], context_length * sequence_width}; @@ -62,7 +62,7 @@ class SequenceConvKernel : public framework::OpKernel { set_zero(dev_ctx, &col, static_cast(0)); math::ContextProjectFunctor seq_project_functor; - seq_project_functor(dev_ctx, *in, *padding_data, padding_trainable, + seq_project_functor(dev_ctx, *in, padding_data, padding_trainable, context_start, context_length, context_stride, up_pad, down_pad, &col); @@ -93,7 +93,7 @@ class SequenceConvGradKernel : public framework::OpKernel { int up_pad = std::max(0, -context_start); int down_pad = std::max(0, context_start + context_length - 1); - int sequence_width = static_cast(in->dims()[1]); + auto sequence_width = static_cast(in->dims()[1]); math::SetConstant set_zero; auto& dev_ctx = context.template device_context(); @@ -144,7 +144,7 @@ class SequenceConvGradKernel : public framework::OpKernel { padding_data = context.Input("PaddingData"); } - seq_project_functor(dev_ctx, *in, *padding_data, padding_trainable, + seq_project_functor(dev_ctx, *in, padding_data, padding_trainable, context_start, context_length, context_stride, up_pad, down_pad, &col); diff --git a/paddle/fluid/operators/squared_l2_distance_op.h b/paddle/fluid/operators/squared_l2_distance_op.h index e0133d33e..12a8f05b5 100644 --- a/paddle/fluid/operators/squared_l2_distance_op.h +++ b/paddle/fluid/operators/squared_l2_distance_op.h @@ -77,6 +77,9 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel { auto* x_g = context.Output(framework::GradVarName("X")); auto* y_g = context.Output(framework::GradVarName("Y")); + PADDLE_ENFORCE_NOT_NULL(x_g); + PADDLE_ENFORCE_NOT_NULL(y_g); + auto sub_result = EigenMatrix::From(*in0); auto out_grad = EigenMatrix::From(*in1); @@ -92,31 +95,28 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel { // propagate back to input auto& eigen_place = *context.template device_context().eigen_device(); - if (x_g) { - x_g->mutable_data(context.GetPlace()); - // eigen matrix - auto x_grad = - EigenMatrix::From(*x_g, framework::make_ddim({x_dims[0], cols})); - // dimensions are same with subResult - x_grad.device(eigen_place) = grad_mat; - } - if (y_g) { - y_g->mutable_data(context.GetPlace()); - - PADDLE_ENFORCE_GE(sub_result.dimensions()[0], y_dims[0], - "First dimension of gradient must be greater or " - "equal than first dimension of target."); - - if (sub_result.dimensions()[0] == y_dims[0]) { - auto y_grad = - EigenMatrix::From(*y_g, framework::make_ddim({y_dims[0], cols})); - y_grad.device(eigen_place) = -1 * grad_mat; - } else { - auto col_sum_res = -1 * (grad_mat.sum(Eigen::array({{0}}))); - auto y_grad = EigenVector::Flatten(*y_g); - y_grad.device(eigen_place) = col_sum_res; - } + x_g->mutable_data(context.GetPlace()); + // eigen matrix + auto x_grad = + EigenMatrix::From(*x_g, framework::make_ddim({x_dims[0], cols})); + // dimensions are same with subResult + x_grad.device(eigen_place) = grad_mat; + + y_g->mutable_data(context.GetPlace()); + + PADDLE_ENFORCE_GE(sub_result.dimensions()[0], y_dims[0], + "First dimension of gradient must be greater or " + "equal than first dimension of target."); + + if (sub_result.dimensions()[0] == y_dims[0]) { + auto y_grad = + EigenMatrix::From(*y_g, framework::make_ddim({y_dims[0], cols})); + y_grad.device(eigen_place) = -1 * grad_mat; + } else { + auto col_sum_res = -1 * (grad_mat.sum(Eigen::array({{0}}))); + auto y_grad = EigenVector::Flatten(*y_g); + y_grad.device(eigen_place) = col_sum_res; } } }; -- GitLab