提交 74cf7adb 编写于 作者: 李寅

Merge branch 'rm-dynamic-cast' into 'master'

BUG: Remove RTTI related code (dynamic_cast, typeid).

See merge request !934
...@@ -25,6 +25,12 @@ ...@@ -25,6 +25,12 @@
#include "mace/core/types.h" #include "mace/core/types.h"
namespace mace { namespace mace {
namespace core {
enum BufferType {
BT_BUFFER,
BT_IMAGE,
};
} // namespace core
class BufferBase { class BufferBase {
public: public:
...@@ -32,6 +38,8 @@ class BufferBase { ...@@ -32,6 +38,8 @@ class BufferBase {
explicit BufferBase(index_t size) : size_(size) {} explicit BufferBase(index_t size) : size_(size) {}
virtual ~BufferBase() {} virtual ~BufferBase() {}
virtual core::BufferType buffer_type() const = 0;
virtual void *buffer() = 0; virtual void *buffer() = 0;
virtual const void *raw_data() const = 0; virtual const void *raw_data() const = 0;
...@@ -63,6 +71,8 @@ class BufferBase { ...@@ -63,6 +71,8 @@ class BufferBase {
virtual void Clear(index_t size) = 0; virtual void Clear(index_t size) = 0;
virtual const std::vector<size_t> shape() const = 0;
virtual index_t offset() const { return 0; } virtual index_t offset() const { return 0; }
template <typename T> template <typename T>
...@@ -106,6 +116,10 @@ class Buffer : public BufferBase { ...@@ -106,6 +116,10 @@ class Buffer : public BufferBase {
} }
} }
core::BufferType buffer_type() const {
return core::BufferType::BT_BUFFER;
}
void *buffer() { void *buffer() {
MACE_CHECK_NOTNULL(buf_); MACE_CHECK_NOTNULL(buf_);
return buf_; return buf_;
...@@ -207,6 +221,11 @@ class Buffer : public BufferBase { ...@@ -207,6 +221,11 @@ class Buffer : public BufferBase {
memset(reinterpret_cast<char*>(raw_mutable_data()), 0, size); memset(reinterpret_cast<char*>(raw_mutable_data()), 0, size);
} }
const std::vector<size_t> shape() const {
MACE_NOT_IMPLEMENTED;
return {};
}
protected: protected:
Allocator *allocator_; Allocator *allocator_;
void *buf_; void *buf_;
...@@ -238,6 +257,10 @@ class Image : public BufferBase { ...@@ -238,6 +257,10 @@ class Image : public BufferBase {
return data_type_; return data_type_;
} }
core::BufferType buffer_type() const {
return core::BufferType::BT_IMAGE;
}
void *buffer() { void *buffer() {
MACE_CHECK_NOTNULL(buf_); MACE_CHECK_NOTNULL(buf_);
return buf_; return buf_;
...@@ -253,8 +276,6 @@ class Image : public BufferBase { ...@@ -253,8 +276,6 @@ class Image : public BufferBase {
return mapped_buf_; return mapped_buf_;
} }
std::vector<size_t> image_shape() const { return shape_; }
MaceStatus Allocate(index_t nbytes) { MaceStatus Allocate(index_t nbytes) {
MACE_UNUSED(nbytes); MACE_UNUSED(nbytes);
LOG(FATAL) << "Image should not call this allocate function"; LOG(FATAL) << "Image should not call this allocate function";
...@@ -328,6 +349,10 @@ class Image : public BufferBase { ...@@ -328,6 +349,10 @@ class Image : public BufferBase {
MACE_NOT_IMPLEMENTED; MACE_NOT_IMPLEMENTED;
} }
const std::vector<size_t> shape() const {
return shape_;
}
private: private:
Allocator *allocator_; Allocator *allocator_;
std::vector<size_t> shape_; std::vector<size_t> shape_;
...@@ -365,6 +390,10 @@ class BufferSlice : public BufferBase { ...@@ -365,6 +390,10 @@ class BufferSlice : public BufferBase {
} }
} }
core::BufferType buffer_type() const {
return core::BufferType::BT_BUFFER;
}
void *buffer() { void *buffer() {
MACE_CHECK_NOTNULL(buffer_); MACE_CHECK_NOTNULL(buffer_);
return buffer_->buffer(); return buffer_->buffer();
...@@ -454,6 +483,11 @@ class BufferSlice : public BufferBase { ...@@ -454,6 +483,11 @@ class BufferSlice : public BufferBase {
memset(raw_mutable_data(), 0, size); memset(raw_mutable_data(), 0, size);
} }
const std::vector<size_t> shape() const {
MACE_NOT_IMPLEMENTED;
return {};
}
private: private:
BufferBase *buffer_; BufferBase *buffer_;
void *mapped_buf_; void *mapped_buf_;
......
...@@ -33,7 +33,7 @@ Image *ScratchImageManager::Spawn( ...@@ -33,7 +33,7 @@ Image *ScratchImageManager::Spawn(
for (int i = 0; i < image_count; ++i) { for (int i = 0; i < image_count; ++i) {
int count = reference_count_[i]; int count = reference_count_[i];
if (count == 0 && images_.at(count)->dtype() == dt) { if (count == 0 && images_.at(count)->dtype() == dt) {
auto image_shape = images_.at(count)->image_shape(); auto image_shape = images_.at(count)->shape();
if (image_shape[0] >= shape[0] && image_shape[1] >= shape[1]) { if (image_shape[0] >= shape[0] && image_shape[1] >= shape[1]) {
found_image_idx = i; found_image_idx = i;
break; break;
......
...@@ -215,7 +215,7 @@ class Tensor { ...@@ -215,7 +215,7 @@ class Tensor {
inline bool has_opencl_image() const { inline bool has_opencl_image() const {
return buffer_ != nullptr && !buffer_->OnHost() && return buffer_ != nullptr && !buffer_->OnHost() &&
typeid(*buffer_) == typeid(Image); buffer_->buffer_type() == core::BufferType::BT_IMAGE;
} }
inline bool has_opencl_buffer() const { inline bool has_opencl_buffer() const {
...@@ -226,7 +226,7 @@ class Tensor { ...@@ -226,7 +226,7 @@ class Tensor {
MACE_CHECK(buffer_ != nullptr, "Tensor ", name_, " is empty"); MACE_CHECK(buffer_ != nullptr, "Tensor ", name_, " is empty");
if (buffer_->OnHost()) { if (buffer_->OnHost()) {
return MemoryType::CPU_BUFFER; return MemoryType::CPU_BUFFER;
} else if (typeid(*buffer_) == typeid(Image)) { } else if (buffer_->buffer_type() == core::BufferType::BT_IMAGE) {
return MemoryType::GPU_IMAGE; return MemoryType::GPU_IMAGE;
} else { } else {
return MemoryType::GPU_BUFFER; return MemoryType::GPU_BUFFER;
...@@ -343,12 +343,11 @@ class Tensor { ...@@ -343,12 +343,11 @@ class Tensor {
} else { } else {
MACE_CHECK(has_opencl_image(), MACE_CHECK(has_opencl_image(),
name_, ": Cannot ResizeImage buffer, use Resize."); name_, ": Cannot ResizeImage buffer, use Resize.");
Image *image = dynamic_cast<Image *>(buffer_); MACE_CHECK(image_shape[0] <= buffer_->shape()[0] &&
MACE_CHECK(image_shape[0] <= image->image_shape()[0] && image_shape[1] <= buffer_->shape()[1],
image_shape[1] <= image->image_shape()[1],
"tensor (source op ", name_, "tensor (source op ", name_,
"): current physical image shape: ", image->image_shape()[0], "): current physical image shape: ", buffer_->shape()[0],
", ", image->image_shape()[1], " < logical image shape: ", ", ", buffer_->shape()[1], " < logical image shape: ",
image_shape[0], ", ", image_shape[1]); image_shape[0], ", ", image_shape[1]);
return MaceStatus::MACE_SUCCESS; return MaceStatus::MACE_SUCCESS;
} }
......
...@@ -272,11 +272,9 @@ MaceStatus Workspace::PreallocateOutputTensor( ...@@ -272,11 +272,9 @@ MaceStatus Workspace::PreallocateOutputTensor(
<< " Mem: " << tensor_mem.second.first << " Mem: " << tensor_mem.second.first
<< " Data type: " << tensor->dtype() << " Data type: " << tensor->dtype()
<< " Image shape: " << " Image shape: "
<< dynamic_cast<Image *>(tensor->UnderlyingBuffer()) << tensor->UnderlyingBuffer()->shape()[0]
->image_shape()[0]
<< ", " << ", "
<< dynamic_cast<Image *>(tensor->UnderlyingBuffer()) << tensor->UnderlyingBuffer()->shape()[1];
->image_shape()[1];
tensor->set_data_format(DataFormat::NHWC); tensor->set_data_format(DataFormat::NHWC);
} else { } else {
VLOG(1) << "Tensor: " << tensor_mem.first VLOG(1) << "Tensor: " << tensor_mem.first
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册