提交 0fd4a04a 编写于 作者: Y Yang Yu

Remove debug codes

上级 dd0a4c35
......@@ -134,17 +134,8 @@ inline void* Tensor::mutable_data(platform::Place place, std::type_index type) {
#endif
offset_ = 0;
}
void* buf = reinterpret_cast<void*>(
reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_);
if (type.hash_code() == typeid(float).hash_code() ||
type.hash_code() == typeid(double).hash_code()) {
float* tmp = (float*)(buf);
for (int64_t i = 0; i < numel(); ++i) {
tmp[i] = NAN;
}
}
return buf;
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(holder_->ptr()) +
offset_);
}
inline void* Tensor::mutable_data(platform::Place place) {
......
......@@ -51,7 +51,6 @@ class FillConstantOp : public framework::OperatorBase {
platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
auto &dev_ctx = *pool.Get(dev_place);
VLOG(10) << "FillConstant to " << &out;
math::set_constant(dev_ctx, &out, value);
}
};
......
......@@ -116,7 +116,6 @@ class ShrinkRNNMemoryGradOp : public ArrayOp {
auto height = dout_tensor.dims()[0];
auto slice = dx_tensor.Slice(0, static_cast<int>(height));
framework::CopyFrom(dout_tensor, dout_tensor.place(), dev_ctx, &slice);
VLOG(10) << dx_tensor.dims()[0] << ", " << height;
if (dx_tensor.dims()[0] > height) {
auto rest_tensor = dx_tensor.Slice(
static_cast<int>(height), static_cast<int>(dx_tensor.dims()[0]));
......
......@@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <cmath>
#include <vector>
#include "paddle/framework/executor.h"
#include "paddle/framework/lod_tensor_array.h"
......@@ -195,36 +194,14 @@ class WhileGradOp : public framework::OperatorBase {
}
}
auto check_var_no_nan = [](const framework::Scope &scope,
const std::string &var_name) {
auto *var = scope.FindVar(var_name);
if (var->IsType<LoDTensor>()) {
VLOG(10) << "Checking " << var_name;
PADDLE_ENFORCE(!framework::HasNAN(var->Get<framework::LoDTensor>()),
"%s has NAN", var_name);
if (var->Get<framework::LoDTensor>().type() ==
typeid(float)) { // NOLINT
auto &tensor = var->Get<framework::LoDTensor>();
auto *buf = tensor.data<float>();
for (int64_t i = 0; i < tensor.numel(); ++i) {
PADDLE_ENFORCE(!std::isnan(buf[i]));
}
VLOG(10) << buf[0];
}
}
};
check_var_no_nan(cur_scope, inside_grad_name);
auto new_inside_name = cur_scope.Rename(inside_grad_name);
check_var_no_nan(cur_scope, new_inside_name);
auto sum_op = framework::OpRegistry::CreateOp(
"sum", {{"X", {pg_names[param_id], new_inside_name}}},
{{"Out", {pg_names[param_id]}}}, framework::AttributeMap{});
sum_op->Run(cur_scope, dev_place);
check_var_no_nan(scope, pg_names[param_id]);
cur_scope.Rename(new_inside_name, inside_grad_name);
}
}
VLOG(1) << "Complete WhileOpGrad";
}
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册