提交 743dfd82 编写于 作者: W wanghaoshuang

Add nullptr check

上级 14fb15b6
......@@ -68,8 +68,9 @@ class ClipOpGrad : public framework::OperatorWithKernel {
"Input(Out@GRAD) should not be null");
auto x_dims = ctx.Input<LoDTensor>("X")->dims();
auto *x_grad = ctx.Output<LoDTensor>(framework::GradVarName("X"));
x_grad->Resize(x_dims);
if (x_grad != nullptr) {
x_grad->Resize(x_dims);
}
}
};
......
......@@ -43,22 +43,24 @@ class ClipGradientOpCUDAKernel : public framework::OpKernel {
auto min = context.Attr<float>("min");
auto* d_out = context.Input<LoDTensor>(framework::GradVarName("Out"));
auto* d_x = context.Output<LoDTensor>(framework::GradVarName("X"));
auto* x = context.Input<LoDTensor>("X");
auto dims = d_x->dims();
int64_t count = d_out->numel();
auto d_x_data = d_x->mutable_data<T>(context.GetPlace());
auto d_out_data = d_out->data<T>();
auto x_data = x->data<T>();
if (d_x != nullptr) {
auto* x = context.Input<LoDTensor>("X");
auto dims = d_x->dims();
int64_t count = d_out->numel();
auto d_x_data = d_x->mutable_data<T>(context.GetPlace());
auto d_out_data = d_out->data<T>();
auto x_data = x->data<T>();
int N = d_x->dims()[0];
int D = d_x->dims()[1];
int block = 512;
int grid = (N * D + block - 1) / block;
ClipGradientKernel<T><<<
grid, block, 0, reinterpret_cast<const platform::CUDADeviceContext&>(
context.device_context())
.stream()>>>(count, min, max, x_data, d_out_data,
d_x_data);
int N = d_x->dims()[0];
int D = d_x->dims()[1];
int block = 512;
int grid = (N * D + block - 1) / block;
ClipGradientKernel<T><<<
grid, block, 0, reinterpret_cast<const platform::CUDADeviceContext&>(
context.device_context())
.stream()>>>(count, min, max, x_data, d_out_data,
d_x_data);
}
}
};
......
......@@ -78,17 +78,19 @@ class ClipGradKernel : public framework::OpKernel {
auto min = context.op().Attr<float>("min");
auto* d_out = context.Input<LoDTensor>(framework::GradVarName("Out"));
auto* d_x = context.Output<LoDTensor>(framework::GradVarName("X"));
auto* x = context.Input<LoDTensor>("X");
auto dims = d_x->dims();
int64_t count = d_out->numel();
auto d_x_data = d_x->mutable_data<T>(context.GetPlace());
auto d_out_data = d_out->data<T>();
auto x_data = x->data<T>();
for (int i = 0; i < count; ++i) {
if (x_data[i] > min && x_data[i] < max) {
d_x_data[i] = d_out_data[i];
} else {
d_x_data[i] = 0;
if (d_x != nullptr) {
auto* x = context.Input<LoDTensor>("X");
auto dims = d_x->dims();
int64_t count = d_out->numel();
auto d_x_data = d_x->mutable_data<T>(context.GetPlace());
auto d_out_data = d_out->data<T>();
auto x_data = x->data<T>();
for (int i = 0; i < count; ++i) {
if (x_data[i] > min && x_data[i] < max) {
d_x_data[i] = d_out_data[i];
} else {
d_x_data[i] = 0;
}
}
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册