diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index 46ea3b881d4fd0e260714156b556b80a0ee581a5..6c6f298edc187a87677089e54c4c9046821282df 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -134,14 +134,6 @@ inline void* Tensor::mutable_data(platform::Place place, std::type_index type) { #endif offset_ = 0; } - - if (typeid(float).hash_code() == type.hash_code()) { - auto buf = reinterpret_cast( - reinterpret_cast(holder_->ptr()) + offset_); - for (int64_t i = 0; i < this->numel(); ++i) { - buf[i] = NAN; - } - } return reinterpret_cast(reinterpret_cast(holder_->ptr()) + offset_); } diff --git a/paddle/operators/sum_op.h b/paddle/operators/sum_op.h index cbde9976dc392d748f60165ba55d7888408a5d63..eaa36aa1aea53e0b37ef6c578d8bb1cda230ded0 100644 --- a/paddle/operators/sum_op.h +++ b/paddle/operators/sum_op.h @@ -107,12 +107,10 @@ class SumKernel : public framework::OpKernel { out_array.resize(i + 1); } if (out_array[i].numel() == 0) { - VLOG(10) << context.op().Output("Out") << " just copy"; framework::CopyFrom(in_array[i], in_array[i].place(), context.device_context(), &out_array[i]); out_array[i].set_lod(in_array[i].lod()); } else { - VLOG(10) << context.op().Output("Out") << " merged"; PADDLE_ENFORCE(out_array[i].lod() == in_array[i].lod()); auto in = EigenVector::Flatten(in_array[i]); auto result = EigenVector::Flatten(out_array[i]);