未验证 提交 cb246fc8 编写于 作者: J juncaipeng 提交者: GitHub

fix mask rcnn error when run twice, test=develop (#2675) (#2709)

add clear for tensor
上级 d56f25df
...@@ -120,6 +120,7 @@ class Buffer { ...@@ -120,6 +120,7 @@ class Buffer {
if (space_ > 0) { if (space_ > 0) {
TargetFree(target_, data_); TargetFree(target_, data_);
} }
data_ = nullptr;
target_ = TargetType::kHost; target_ = TargetType::kHost;
space_ = 0; space_ = 0;
} }
......
...@@ -176,6 +176,10 @@ class TensorLite { ...@@ -176,6 +176,10 @@ class TensorLite {
(static_cast<char *>(buffer_->data()) + offset_)); (static_cast<char *>(buffer_->data()) + offset_));
} }
void clear() {
buffer_->Free();
offset_ = 0;
}
size_t data_size() const { return this->dims().production(); } size_t data_size() const { return this->dims().production(); }
size_t memory_size() const { return memory_size_; } size_t memory_size() const { return memory_size_; }
......
...@@ -34,6 +34,9 @@ void ConditionalBlockCompute::PrepareForRun() { ...@@ -34,6 +34,9 @@ void ConditionalBlockCompute::PrepareForRun() {
} }
void ConditionalBlockCompute::Run() { void ConditionalBlockCompute::Run() {
auto& param = Param<operators::ConditionalBlockParam>(); auto& param = Param<operators::ConditionalBlockParam>();
for (auto& out : param.outs) {
out->clear();
}
bool need_run = true; bool need_run = true;
if (param.is_scalar_condition) { if (param.is_scalar_condition) {
auto* cond = param.cond; auto* cond = param.cond;
......
...@@ -82,6 +82,10 @@ void SplitLodTensorCompute::Run() { ...@@ -82,6 +82,10 @@ void SplitLodTensorCompute::Run() {
ranges.begin(), ranges.end(), 0UL, [](size_t a, const CopyRange &b) { ranges.begin(), ranges.end(), 0UL, [](size_t a, const CopyRange &b) {
return a + b.end - b.begin; return a + b.end - b.begin;
}); });
if (height == 0) {
out->clear();
continue;
}
auto x_dim = x->dims(); auto x_dim = x->dims();
x_dim[0] = static_cast<int64_t>(height); x_dim[0] = static_cast<int64_t>(height);
out->Resize(x_dim); out->Resize(x_dim);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册