提交 ba168bd2 编写于 作者: S sneaxiy

modify API.spec

上级 c73c5ed5
...@@ -162,6 +162,7 @@ paddle.fluid.layers.crop ArgSpec(args=['x', 'shape', 'offsets', 'name'], varargs ...@@ -162,6 +162,7 @@ paddle.fluid.layers.crop ArgSpec(args=['x', 'shape', 'offsets', 'name'], varargs
paddle.fluid.layers.rank_loss ArgSpec(args=['label', 'left', 'right', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.rank_loss ArgSpec(args=['label', 'left', 'right', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.prelu ArgSpec(args=['x', 'mode', 'param_attr', 'name'], varargs=None, keywords=None, defaults=(None, None)) paddle.fluid.layers.prelu ArgSpec(args=['x', 'mode', 'param_attr', 'name'], varargs=None, keywords=None, defaults=(None, None))
paddle.fluid.layers.flatten ArgSpec(args=['x', 'axis', 'name'], varargs=None, keywords=None, defaults=(1, None)) paddle.fluid.layers.flatten ArgSpec(args=['x', 'axis', 'name'], varargs=None, keywords=None, defaults=(1, None))
paddle.fluid.layers.stack ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=(0,))
paddle.fluid.layers.data ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True)) paddle.fluid.layers.data ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True))
paddle.fluid.layers.open_recordio_file ArgSpec(args=['filename', 'shapes', 'lod_levels', 'dtypes', 'pass_num', 'for_parallel'], varargs=None, keywords=None, defaults=(1, True)) paddle.fluid.layers.open_recordio_file ArgSpec(args=['filename', 'shapes', 'lod_levels', 'dtypes', 'pass_num', 'for_parallel'], varargs=None, keywords=None, defaults=(1, True))
paddle.fluid.layers.open_files ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None)) paddle.fluid.layers.open_files ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None))
......
...@@ -154,17 +154,22 @@ class StackKernel : public framework::OpKernel<T> { ...@@ -154,17 +154,22 @@ class StackKernel : public framework::OpKernel<T> {
if (std::is_same<DeviceContext, platform::CPUDeviceContext>::value || if (std::is_same<DeviceContext, platform::CPUDeviceContext>::value ||
n > kMaxThreshold) { n > kMaxThreshold) {
#ifdef __NVCC__ #ifdef __NVCC__
VLOG(10) << "Stack more than " << kMaxThreshold
<< " tensors on GPU may be slow.";
thrust::device_vector<const T *> device_x_vec(x_datas); thrust::device_vector<const T *> device_x_vec(x_datas);
auto x_data_arr = device_x_vec.data().get(); auto x_data_arr = device_x_vec.data().get();
#else #else
auto x_data_arr = x_datas.data(); auto x_data_arr = x_datas.data();
#endif #endif
StackFunctorForRange(dev_ctx, x_data_arr, y_data, total_num, n, post); StackFunctorForRange(dev_ctx, x_data_arr, y_data, total_num, n, post);
#ifdef __NVCC__
// Wait() must be called because device_x_vec may be destructed before
// kernel ends
dev_ctx.Wait();
#endif
} }
#ifdef __NVCC__ #ifdef __NVCC__
else { // NOLINT else { // NOLINT
VLOG(10) << "Stack more than " << kMaxThreshold
<< " tensors on GPU may be slow.";
framework::Array<const T *, kMaxThreshold> x_data_arr; framework::Array<const T *, kMaxThreshold> x_data_arr;
for (int i = 0; i < n; ++i) x_data_arr[i] = x_datas[i]; for (int i = 0; i < n; ++i) x_data_arr[i] = x_datas[i];
StackFunctorForRange(dev_ctx, x_data_arr, y_data, total_num, n, post); StackFunctorForRange(dev_ctx, x_data_arr, y_data, total_num, n, post);
...@@ -243,6 +248,8 @@ class StackGradKernel : public framework::OpKernel<T> { ...@@ -243,6 +248,8 @@ class StackGradKernel : public framework::OpKernel<T> {
if (std::is_same<DeviceContext, platform::CPUDeviceContext>::value || if (std::is_same<DeviceContext, platform::CPUDeviceContext>::value ||
n > kMaxThreshold) { n > kMaxThreshold) {
#ifdef __NVCC__ #ifdef __NVCC__
VLOG(10) << "Stack more than " << kMaxThreshold
<< " tensors on GPU may be slow.";
thrust::device_vector<T *> device_dx_vec(dx_datas); thrust::device_vector<T *> device_dx_vec(dx_datas);
auto dx_data_arr = device_dx_vec.data().get(); auto dx_data_arr = device_dx_vec.data().get();
#else #else
...@@ -250,11 +257,14 @@ class StackGradKernel : public framework::OpKernel<T> { ...@@ -250,11 +257,14 @@ class StackGradKernel : public framework::OpKernel<T> {
#endif #endif
StackGradFunctorForRange(dev_ctx, dx_data_arr, dy_data, total_num, n, StackGradFunctorForRange(dev_ctx, dx_data_arr, dy_data, total_num, n,
post); post);
#ifdef __NVCC__
// Wait() must be called because device_dx_vec may be destructed before
// kernel ends
dev_ctx.Wait();
#endif
} }
#ifdef __NVCC__ #ifdef __NVCC__
else { // NOLINT else { // NOLINT
VLOG(10) << "Stack more than " << kMaxThreshold
<< " tensors on GPU may be slow.";
framework::Array<T *, kMaxThreshold> dx_data_arr; framework::Array<T *, kMaxThreshold> dx_data_arr;
for (int i = 0; i < n; ++i) dx_data_arr[i] = dx_datas[i]; for (int i = 0; i < n; ++i) dx_data_arr[i] = dx_datas[i];
StackGradFunctorForRange(dev_ctx, dx_data_arr, dy_data, total_num, n, StackGradFunctorForRange(dev_ctx, dx_data_arr, dy_data, total_num, n,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册