未验证 提交 21ea5425 编写于 作者: L liuwei1031 提交者: GitHub

test=Release/1.4, merge security issue fix (#16942)

* Security issue (#16774)

* disable memory_optimize and inpalce strategy by default, test=develop

* fix security issue
http://newicafe.baidu.com:80/issue/PaddleSec-3/show?from=page
http://newicafe.baidu.com:80/issue/PaddleSec-8/show?from=page
http://newicafe.baidu.com:80/issue/PaddleSec-12/show?from=page
http://newicafe.baidu.com:80/issue/PaddleSec-32/show?from=page
http://newicafe.baidu.com:80/issue/PaddleSec-35/show?from=page
http://newicafe.baidu.com:80/issue/PaddleSec-37/show?from=page
http://newicafe.baidu.com:80/issue/PaddleSec-40/show?from=page
http://newicafe.baidu.com:80/issue/PaddleSec-43/show?from=page
http://newicafe.baidu.com:80/issue/PaddleSec-44/show?from=page
http://newicafe.baidu.com:80/issue/PaddleSec-45/show?from=page

test=develop

* revert piece.cc, test=develop

* adjust api.cc,test=develop

* fix overflow by int32 mul test=develop (#16794)

* fix overflow by int32 mul test=develop

* fix reference nullptr

* fix codestyle test=develop

* modify to point in ContextProjectFunctor test=develop

* modify to point in ContextProjectFunctor test=develop

* modify . to -> test=develop

* test=release/1.4 cherry-pick (#16783) (#16794) (#16774)
fix security issue
上级 07462de8
...@@ -241,6 +241,7 @@ OpDesc::OpDesc(const std::string &type, const VariableNameMap &inputs, ...@@ -241,6 +241,7 @@ OpDesc::OpDesc(const std::string &type, const VariableNameMap &inputs,
outputs_ = outputs; outputs_ = outputs;
attrs_ = attrs; attrs_ = attrs;
need_update_ = true; need_update_ = true;
block_ = nullptr;
} }
OpDesc::OpDesc(const OpDesc &other, BlockDesc *block) { OpDesc::OpDesc(const OpDesc &other, BlockDesc *block) {
......
...@@ -259,6 +259,9 @@ bool AnalysisPredictor::SetFeed(const std::vector<PaddleTensor> &inputs, ...@@ -259,6 +259,9 @@ bool AnalysisPredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
return false; return false;
} }
PADDLE_ENFORCE_NOT_NULL(input_ptr);
PADDLE_ENFORCE_NOT_NULL(inputs[i].data.data());
if (platform::is_cpu_place(place_)) { if (platform::is_cpu_place(place_)) {
// TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy. // TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
std::memcpy(static_cast<void *>(input_ptr), inputs[i].data.data(), std::memcpy(static_cast<void *>(input_ptr), inputs[i].data.data(),
......
...@@ -54,6 +54,7 @@ PaddleBuf &PaddleBuf::operator=(const PaddleBuf &other) { ...@@ -54,6 +54,7 @@ PaddleBuf &PaddleBuf::operator=(const PaddleBuf &other) {
memory_owned_ = other.memory_owned_; memory_owned_ = other.memory_owned_;
} else { } else {
Resize(other.length()); Resize(other.length());
PADDLE_ENFORCE(!(other.length() > 0 && other.data() == nullptr));
memcpy(data_, other.data(), other.length()); memcpy(data_, other.data(), other.length());
length_ = other.length(); length_ = other.length();
memory_owned_ = true; memory_owned_ = true;
......
...@@ -169,6 +169,7 @@ std::unique_ptr<PaddlePredictor> NativePaddlePredictor::Clone() { ...@@ -169,6 +169,7 @@ std::unique_ptr<PaddlePredictor> NativePaddlePredictor::Clone() {
std::unique_ptr<PaddlePredictor> cls(new NativePaddlePredictor(config_)); std::unique_ptr<PaddlePredictor> cls(new NativePaddlePredictor(config_));
// Hot fix the bug that result diff in multi-thread. // Hot fix the bug that result diff in multi-thread.
// TODO(Superjomn) re-implement a real clone here. // TODO(Superjomn) re-implement a real clone here.
PADDLE_ENFORCE_NOT_NULL(dynamic_cast<NativePaddlePredictor *>(cls.get()));
if (!dynamic_cast<NativePaddlePredictor *>(cls.get())->Init(nullptr)) { if (!dynamic_cast<NativePaddlePredictor *>(cls.get())->Init(nullptr)) {
LOG(ERROR) << "fail to call Init"; LOG(ERROR) << "fail to call Init";
return nullptr; return nullptr;
...@@ -210,6 +211,8 @@ bool NativePaddlePredictor::SetFeed(const std::vector<PaddleTensor> &inputs, ...@@ -210,6 +211,8 @@ bool NativePaddlePredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
return false; return false;
} }
PADDLE_ENFORCE_NOT_NULL(input_ptr);
PADDLE_ENFORCE_NOT_NULL(inputs[i].data.data());
if (platform::is_cpu_place(place_)) { if (platform::is_cpu_place(place_)) {
// TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy. // TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
std::memcpy(static_cast<void *>(input_ptr), inputs[i].data.data(), std::memcpy(static_cast<void *>(input_ptr), inputs[i].data.data(),
...@@ -316,6 +319,8 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor< ...@@ -316,6 +319,8 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
} }
std::unique_ptr<PaddlePredictor> predictor(new NativePaddlePredictor(config)); std::unique_ptr<PaddlePredictor> predictor(new NativePaddlePredictor(config));
PADDLE_ENFORCE_NOT_NULL(
dynamic_cast<NativePaddlePredictor *>(predictor.get()));
if (!dynamic_cast<NativePaddlePredictor *>(predictor.get())->Init(nullptr)) { if (!dynamic_cast<NativePaddlePredictor *>(predictor.get())->Init(nullptr)) {
return nullptr; return nullptr;
} }
......
...@@ -47,6 +47,7 @@ struct DataRecord { ...@@ -47,6 +47,7 @@ struct DataRecord {
num_lines++; num_lines++;
std::vector<std::string> data; std::vector<std::string> data;
split(line, '\t', &data); split(line, '\t', &data);
PADDLE_ENFORCE(data.size() >= 4);
// load title1 data // load title1 data
std::vector<int64_t> title1_data; std::vector<int64_t> title1_data;
split_to_int64(data[0], ' ', &title1_data); split_to_int64(data[0], ' ', &title1_data);
......
...@@ -121,9 +121,11 @@ class AffineGridOpKernel : public framework::OpKernel<T> { ...@@ -121,9 +121,11 @@ class AffineGridOpKernel : public framework::OpKernel<T> {
// TODO(wanghaoshuang): Refine batched matrix multiply // TODO(wanghaoshuang): Refine batched matrix multiply
auto blas = math::GetBlas<DeviceContext, T>(ctx); auto blas = math::GetBlas<DeviceContext, T>(ctx);
for (int i = 0; i < n; ++i) { for (int i = 0; i < n; ++i) {
Tensor sliced_grid = grid.Slice(i, i + 1).Resize({h * w, 3}); Tensor sliced_grid = grid.Slice(i, i + 1).Resize(
{static_cast<int64_t>(h) * static_cast<int64_t>(w), 3});
Tensor sliced_theta = theta->Slice(i, i + 1).Resize({2, 3}); Tensor sliced_theta = theta->Slice(i, i + 1).Resize({2, 3});
Tensor sliced_out = output->Slice(i, i + 1).Resize({h * w, 2}); Tensor sliced_out = output->Slice(i, i + 1).Resize(
{static_cast<int64_t>(h) * static_cast<int64_t>(w), 2});
blas.MatMul(sliced_grid, false, sliced_theta, true, T(1), &sliced_out, blas.MatMul(sliced_grid, false, sliced_theta, true, T(1), &sliced_out,
T(0)); T(0));
} }
...@@ -161,8 +163,10 @@ class AffineGridGradOpKernel : public framework::OpKernel<T> { ...@@ -161,8 +163,10 @@ class AffineGridGradOpKernel : public framework::OpKernel<T> {
// TODO(wanghaoshuang): Refine batched matrix multiply // TODO(wanghaoshuang): Refine batched matrix multiply
auto blas = math::GetBlas<DeviceContext, T>(ctx); auto blas = math::GetBlas<DeviceContext, T>(ctx);
for (int i = 0; i < n; ++i) { for (int i = 0; i < n; ++i) {
Tensor sliced_grid = grid.Slice(i, i + 1).Resize({h * w, 3}); Tensor sliced_grid = grid.Slice(i, i + 1).Resize(
Tensor sliced_out_grad = output_grad->Slice(i, i + 1).Resize({h * w, 2}); {static_cast<int64_t>(h) * static_cast<int64_t>(w), 3});
Tensor sliced_out_grad = output_grad->Slice(i, i + 1).Resize(
{static_cast<int64_t>(h) * static_cast<int64_t>(w), 2});
Tensor sliced_theta_grad = theta_grad->Slice(i, i + 1).Resize({2, 3}); Tensor sliced_theta_grad = theta_grad->Slice(i, i + 1).Resize({2, 3});
blas.MatMul(sliced_out_grad, true, sliced_grid, false, T(1), blas.MatMul(sliced_out_grad, true, sliced_grid, false, T(1),
&sliced_theta_grad, T(0)); &sliced_theta_grad, T(0));
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
**/ **/
#include "paddle/fluid/operators/detection/gpc.h" #include "paddle/fluid/operators/detection/gpc.h"
#include "paddle/fluid/platform/enforce.h"
namespace gpc { namespace gpc {
...@@ -689,6 +690,7 @@ static bbox *create_contour_bboxes(gpc_polygon *p) { ...@@ -689,6 +690,7 @@ static bbox *create_contour_bboxes(gpc_polygon *p) {
gpc_malloc<bbox>(box, p->num_contours * sizeof(bbox), gpc_malloc<bbox>(box, p->num_contours * sizeof(bbox),
const_cast<char *>("Bounding box creation")); const_cast<char *>("Bounding box creation"));
PADDLE_ENFORCE_NOT_NULL(box);
/* Construct contour bounding boxes */ /* Construct contour bounding boxes */
for (c = 0; c < p->num_contours; c++) { for (c = 0; c < p->num_contours; c++) {
...@@ -852,6 +854,7 @@ void gpc_add_contour(gpc_polygon *p, gpc_vertex_list *new_contour, int hole) { ...@@ -852,6 +854,7 @@ void gpc_add_contour(gpc_polygon *p, gpc_vertex_list *new_contour, int hole) {
/* Create an extended hole array */ /* Create an extended hole array */
gpc_malloc<int>(extended_hole, (p->num_contours + 1) * sizeof(int), gpc_malloc<int>(extended_hole, (p->num_contours + 1) * sizeof(int),
const_cast<char *>("contour hole addition")); const_cast<char *>("contour hole addition"));
PADDLE_ENFORCE_NOT_NULL(extended_hole);
/* Create an extended contour array */ /* Create an extended contour array */
gpc_malloc<gpc_vertex_list>(extended_contour, gpc_malloc<gpc_vertex_list>(extended_contour,
...@@ -969,6 +972,7 @@ void gpc_polygon_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip, ...@@ -969,6 +972,7 @@ void gpc_polygon_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip,
/* Build scanbeam table from scanbeam tree */ /* Build scanbeam table from scanbeam tree */
gpc_malloc<double>(sbt, sbt_entries * sizeof(double), gpc_malloc<double>(sbt, sbt_entries * sizeof(double),
const_cast<char *>("sbt creation")); const_cast<char *>("sbt creation"));
PADDLE_ENFORCE_NOT_NULL(sbt);
build_sbt(&scanbeam, sbt, sbtree); build_sbt(&scanbeam, sbt, sbtree);
scanbeam = 0; scanbeam = 0;
free_sbtree(&sbtree); free_sbtree(&sbtree);
...@@ -1604,6 +1608,7 @@ void gpc_tristrip_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip, ...@@ -1604,6 +1608,7 @@ void gpc_tristrip_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip,
/* Build scanbeam table from scanbeam tree */ /* Build scanbeam table from scanbeam tree */
gpc_malloc<double>(sbt, sbt_entries * sizeof(double), gpc_malloc<double>(sbt, sbt_entries * sizeof(double),
const_cast<char *>("sbt creation")); const_cast<char *>("sbt creation"));
PADDLE_ENFORCE_NOT_NULL(sbt);
build_sbt(&scanbeam, sbt, sbtree); build_sbt(&scanbeam, sbt, sbtree);
scanbeam = 0; scanbeam = 0;
free_sbtree(&sbtree); free_sbtree(&sbtree);
......
...@@ -87,7 +87,7 @@ template <typename DeviceContext, typename T> ...@@ -87,7 +87,7 @@ template <typename DeviceContext, typename T>
class ContextProjectFunctor { class ContextProjectFunctor {
public: public:
void operator()(const DeviceContext& context, const LoDTensor& in, void operator()(const DeviceContext& context, const LoDTensor& in,
const Tensor& padding_data, bool padding_trainable, const Tensor* padding_data, bool padding_trainable,
const int context_start, const int context_length, const int context_start, const int context_length,
const int context_stride, const int up_pad, const int context_stride, const int up_pad,
const int down_pad, Tensor* col) { const int down_pad, Tensor* col) {
...@@ -132,6 +132,7 @@ class ContextProjectFunctor { ...@@ -132,6 +132,7 @@ class ContextProjectFunctor {
} }
} }
if (padding_trainable) { if (padding_trainable) {
PADDLE_ENFORCE_NOT_NULL(padding_data);
for (int i = 0; i < static_cast<int>(lod_level_0.size()) - 1; ++i) { for (int i = 0; i < static_cast<int>(lod_level_0.size()) - 1; ++i) {
Tensor out_t = col->Slice(static_cast<int>(lod_level_0[i]), Tensor out_t = col->Slice(static_cast<int>(lod_level_0[i]),
static_cast<int>(lod_level_0[i + 1])); static_cast<int>(lod_level_0[i + 1]));
...@@ -150,7 +151,7 @@ class ContextProjectFunctor { ...@@ -150,7 +151,7 @@ class ContextProjectFunctor {
k + context_length < up_pad ? context_length : up_pad - k; k + context_length < up_pad ? context_length : up_pad - k;
Tensor out_t_sub = out_t.Slice(k * context_length, Tensor out_t_sub = out_t.Slice(k * context_length,
k * context_length + padding_size); k * context_length + padding_size);
Tensor w_sub = padding_data.Slice(k, k + padding_size); Tensor w_sub = padding_data->Slice(k, k + padding_size);
framework::TensorCopy(w_sub, context.GetPlace(), context, framework::TensorCopy(w_sub, context.GetPlace(), context,
&out_t_sub); &out_t_sub);
} }
...@@ -180,7 +181,7 @@ class ContextProjectFunctor { ...@@ -180,7 +181,7 @@ class ContextProjectFunctor {
Tensor out_t_sub = out_t.Slice( Tensor out_t_sub = out_t.Slice(
(down_pad_begin_row + t) * context_length - padding_size, (down_pad_begin_row + t) * context_length - padding_size,
(down_pad_begin_row + t) * context_length); (down_pad_begin_row + t) * context_length);
Tensor w_sub = padding_data.Slice( Tensor w_sub = padding_data->Slice(
up_pad + padding_idx, up_pad + padding_idx + padding_size); up_pad + padding_idx, up_pad + padding_idx + padding_size);
framework::TensorCopy(w_sub, context.GetPlace(), context, framework::TensorCopy(w_sub, context.GetPlace(), context,
&out_t_sub); &out_t_sub);
......
...@@ -49,7 +49,7 @@ class SequenceConvKernel : public framework::OpKernel<T> { ...@@ -49,7 +49,7 @@ class SequenceConvKernel : public framework::OpKernel<T> {
int up_pad = std::max(0, -context_start); int up_pad = std::max(0, -context_start);
int down_pad = std::max(0, context_start + context_length - 1); int down_pad = std::max(0, context_start + context_length - 1);
int sequence_width = static_cast<int>(in->dims()[1]); auto sequence_width = static_cast<int64_t>(in->dims()[1]);
framework::DDim col_shape = {in->dims()[0], framework::DDim col_shape = {in->dims()[0],
context_length * sequence_width}; context_length * sequence_width};
...@@ -62,7 +62,7 @@ class SequenceConvKernel : public framework::OpKernel<T> { ...@@ -62,7 +62,7 @@ class SequenceConvKernel : public framework::OpKernel<T> {
set_zero(dev_ctx, &col, static_cast<T>(0)); set_zero(dev_ctx, &col, static_cast<T>(0));
math::ContextProjectFunctor<DeviceContext, T> seq_project_functor; math::ContextProjectFunctor<DeviceContext, T> seq_project_functor;
seq_project_functor(dev_ctx, *in, *padding_data, padding_trainable, seq_project_functor(dev_ctx, *in, padding_data, padding_trainable,
context_start, context_length, context_stride, up_pad, context_start, context_length, context_stride, up_pad,
down_pad, &col); down_pad, &col);
...@@ -93,7 +93,7 @@ class SequenceConvGradKernel : public framework::OpKernel<T> { ...@@ -93,7 +93,7 @@ class SequenceConvGradKernel : public framework::OpKernel<T> {
int up_pad = std::max(0, -context_start); int up_pad = std::max(0, -context_start);
int down_pad = std::max(0, context_start + context_length - 1); int down_pad = std::max(0, context_start + context_length - 1);
int sequence_width = static_cast<int>(in->dims()[1]); auto sequence_width = static_cast<int64_t>(in->dims()[1]);
math::SetConstant<DeviceContext, T> set_zero; math::SetConstant<DeviceContext, T> set_zero;
auto& dev_ctx = context.template device_context<DeviceContext>(); auto& dev_ctx = context.template device_context<DeviceContext>();
...@@ -144,7 +144,7 @@ class SequenceConvGradKernel : public framework::OpKernel<T> { ...@@ -144,7 +144,7 @@ class SequenceConvGradKernel : public framework::OpKernel<T> {
padding_data = context.Input<Tensor>("PaddingData"); padding_data = context.Input<Tensor>("PaddingData");
} }
seq_project_functor(dev_ctx, *in, *padding_data, padding_trainable, seq_project_functor(dev_ctx, *in, padding_data, padding_trainable,
context_start, context_length, context_stride, up_pad, context_start, context_length, context_stride, up_pad,
down_pad, &col); down_pad, &col);
......
...@@ -77,6 +77,9 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel<T> { ...@@ -77,6 +77,9 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel<T> {
auto* x_g = context.Output<Tensor>(framework::GradVarName("X")); auto* x_g = context.Output<Tensor>(framework::GradVarName("X"));
auto* y_g = context.Output<Tensor>(framework::GradVarName("Y")); auto* y_g = context.Output<Tensor>(framework::GradVarName("Y"));
PADDLE_ENFORCE_NOT_NULL(x_g);
PADDLE_ENFORCE_NOT_NULL(y_g);
auto sub_result = EigenMatrix<T>::From(*in0); auto sub_result = EigenMatrix<T>::From(*in0);
auto out_grad = EigenMatrix<T>::From(*in1); auto out_grad = EigenMatrix<T>::From(*in1);
...@@ -92,31 +95,28 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel<T> { ...@@ -92,31 +95,28 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel<T> {
// propagate back to input // propagate back to input
auto& eigen_place = auto& eigen_place =
*context.template device_context<DeviceContext>().eigen_device(); *context.template device_context<DeviceContext>().eigen_device();
if (x_g) {
x_g->mutable_data<T>(context.GetPlace());
// eigen matrix
auto x_grad =
EigenMatrix<T>::From(*x_g, framework::make_ddim({x_dims[0], cols}));
// dimensions are same with subResult
x_grad.device(eigen_place) = grad_mat;
}
if (y_g) { x_g->mutable_data<T>(context.GetPlace());
y_g->mutable_data<T>(context.GetPlace()); // eigen matrix
auto x_grad =
PADDLE_ENFORCE_GE(sub_result.dimensions()[0], y_dims[0], EigenMatrix<T>::From(*x_g, framework::make_ddim({x_dims[0], cols}));
"First dimension of gradient must be greater or " // dimensions are same with subResult
"equal than first dimension of target."); x_grad.device(eigen_place) = grad_mat;
if (sub_result.dimensions()[0] == y_dims[0]) { y_g->mutable_data<T>(context.GetPlace());
auto y_grad =
EigenMatrix<T>::From(*y_g, framework::make_ddim({y_dims[0], cols})); PADDLE_ENFORCE_GE(sub_result.dimensions()[0], y_dims[0],
y_grad.device(eigen_place) = -1 * grad_mat; "First dimension of gradient must be greater or "
} else { "equal than first dimension of target.");
auto col_sum_res = -1 * (grad_mat.sum(Eigen::array<int, 1>({{0}})));
auto y_grad = EigenVector<T>::Flatten(*y_g); if (sub_result.dimensions()[0] == y_dims[0]) {
y_grad.device(eigen_place) = col_sum_res; auto y_grad =
} EigenMatrix<T>::From(*y_g, framework::make_ddim({y_dims[0], cols}));
y_grad.device(eigen_place) = -1 * grad_mat;
} else {
auto col_sum_res = -1 * (grad_mat.sum(Eigen::array<int, 1>({{0}})));
auto y_grad = EigenVector<T>::Flatten(*y_g);
y_grad.device(eigen_place) = col_sum_res;
} }
} }
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册