提交 5f217099 编写于 作者: D dangqingqing

Fix bug.

上级 524ccba4
......@@ -10,7 +10,7 @@ if(WITH_GPU)
nv_library(pooling SRCS pooling.cc pooling.cu DEPS device_context)
nv_library(sequence_pooling SRCS sequence_pooling.cc sequence_pooling.cu DEPS device_context math_function)
nv_library(vol2col SRCS vol2col.cc vol2col.cu DEPS device_context)
nv_library(context_project SRCS context_project.cc context_project.cu DEPS device_context)
nv_library(context_project SRCS context_project.cc context_project.cu DEPS device_context math_function)
nv_library(sequence2batch SRCS sequence2batch.cc sequence2batch.cu DEPS device_context)
nv_library(lstm_compute SRCS lstm_compute.cc lstm_compute.cu DEPS device_context activation_functions)
nv_library(gru_compute SRCS gru_compute.cc gru_compute.cu DEPS device_context activation_functions math_function)
......@@ -22,7 +22,7 @@ else()
cc_library(pooling SRCS pooling.cc DEPS device_context)
cc_library(sequence_pooling SRCS sequence_pooling.cc DEPS device_context math_function)
cc_library(vol2col SRCS vol2col.cc DEPS device_context)
cc_library(context_project SRCS context_project.cc DEPS device_context)
cc_library(context_project SRCS context_project.cc DEPS device_context math_function)
cc_library(sequence2batch SRCS sequence2batch.cc DEPS device_context)
cc_library(lstm_compute SRCS lstm_compute.cc DEPS device_context activation_functions)
cc_library(gru_compute SRCS gru_compute.cc DEPS device_context activation_functions math_function)
......
......@@ -91,9 +91,6 @@ class ContextProjectFunctor {
auto lod_level_0 = in.lod()[0];
math::Im2ColFunctor<math::ColFormat::kOCF, Place, float> im2col_ocf;
if (platform::is_gpu_place(context.GetPlace())) {
LOG(INFO) << "========= gpu ==========";
}
int input_row_begin, input_row_end;
int sequence_height, sequence_width;
......
......@@ -23,8 +23,7 @@ template <typename Place, typename T>
void SetConstant<Place, T>::operator()(const platform::DeviceContext& context,
framework::Tensor* tensor, T num) {
auto t = framework::EigenVector<T>::Flatten(*tensor);
t.device(*context.GetEigenDevice<platform::CPUPlace>()) =
t.constant(static_cast<T>(num));
t.device(*context.GetEigenDevice<Place>()) = t.constant(static_cast<T>(num));
}
template <typename Place, typename T, int Rank>
......
......@@ -27,9 +27,6 @@ class SoftmaxKernel : public framework::OpKernel<T> {
void Compute(const framework::ExecutionContext& context) const override {
auto* X = context.Input<Tensor>("X");
auto* Y = context.Output<Tensor>("Y");
if (platform::is_gpu_place(context.GetPlace())) {
LOG(INFO) << "==========gpu=========";
}
// allocate memory on device.
Y->mutable_data<T>(context.GetPlace());
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册