提交 a04341f6 编写于 作者: L Liangliang He

Update macros and add empty Conv2D implmentation

上级 aef11884
......@@ -21,7 +21,7 @@ Allocator* GetDeviceAllocator(DeviceType type) {
case DeviceType::NEON:
return cpu_allocator();
default:
REQUIRE(false, "device type ", type, " is not supported.");
MACE_CHECK(false, "device type ", type, " is not supported.");
}
return nullptr;
}
......
......@@ -47,9 +47,9 @@ class CPUAllocator: public Allocator {
#ifdef __ANDROID__
data = memalign(kMaceAlignment, nbytes);
#else
CHECK(posix_memalign(&data, kMaceAlignment, nbytes) == 0);
MACE_CHECK(posix_memalign(&data, kMaceAlignment, nbytes) == 0);
#endif
CHECK_NOTNULL(data);
MACE_CHECK_NOTNULL(data);
// TODO(heliangliang) This should be avoided sometimes
memset(data, 0, nbytes);
return data;
......
......@@ -31,7 +31,7 @@ private: \
classname& operator=(const classname&) = delete
#endif
#define MACE_NOT_IMPLEMENTED REQUIRE(false, "not implemented")
#define MACE_NOT_IMPLEMENTED MACE_CHECK(false, "not implemented")
#define kCostPerGroup 8192
......
......@@ -108,17 +108,15 @@ class LogMessageFatal : public LogMessage {
if (VLOG_IS_ON(lvl)) \
::mace::internal::LogMessage(__FILE__, __LINE__, mace::INFO)
// CHECK dies with a fatal error if condition is not true. It is *not*
// MACE_CHECK dies with a fatal error if condition is not true. It is *not*
// controlled by NDEBUG, so the check will be executed regardless of
// compilation mode. Therefore, it is safe to do things like:
// CHECK(fp->Write(x) == 4)
#define CHECK(condition) \
// MACE_CHECK(fp->Write(x) == 4)
// MACE_CHECK(fp->Write(x) == 4, "Write failed")
#define MACE_CHECK(condition, ...) \
if (!(condition)) \
LOG(FATAL) << "Check failed: " #condition " "
#define REQUIRE(condition, ...) \
if (!(condition)) \
LOG(FATAL) << "Check failed: " #condition " " << ::mace::internal::MakeString(__VA_ARGS__)
LOG(FATAL) << "Check failed: " #condition " " \
<< ::mace::internal::MakeString(__VA_ARGS__)
template <typename T>
T&& CheckNotNull(const char* file, int line, const char* exprtext, T&& t) {
......@@ -128,7 +126,7 @@ T&& CheckNotNull(const char* file, int line, const char* exprtext, T&& t) {
return std::forward<T>(t);
}
#define CHECK_NOTNULL(val) \
#define MACE_CHECK_NOTNULL(val) \
::mace::internal::CheckNotNull(__FILE__, __LINE__, \
"'" #val "' Must be non NULL", (val))
......
......@@ -20,18 +20,18 @@ class OperatorBase {
virtual ~OperatorBase() noexcept {}
inline bool HasArgument(const string &name) const {
REQUIRE(operator_def_, "operator_def was null!");
MACE_CHECK(operator_def_, "operator_def was null!");
return ArgumentHelper::HasArgument(*operator_def_, name);
}
template<typename T>
inline T GetSingleArgument(const string &name, const T &default_value) const {
REQUIRE(operator_def_, "operator_def was null!");
MACE_CHECK(operator_def_, "operator_def was null!");
return ArgumentHelper::GetSingleArgument<OperatorDef, T>(
*operator_def_, name, default_value);
}
template<typename T>
inline bool HasSingleArgumentOfType(const string &name) const {
REQUIRE(operator_def_, "operator_def was null!");
MACE_CHECK(operator_def_, "operator_def was null!");
return ArgumentHelper::HasSingleArgumentOfType<OperatorDef, T>(
*operator_def_, name);
}
......@@ -39,13 +39,13 @@ class OperatorBase {
inline vector<T> GetRepeatedArgument(
const string &name,
const vector<T> &default_value = {}) const {
REQUIRE(operator_def_, "operator_def was null!");
MACE_CHECK(operator_def_, "operator_def was null!");
return ArgumentHelper::GetRepeatedArgument<OperatorDef, T>(
*operator_def_, name, default_value);
}
inline const Tensor *Input(int idx) {
CHECK(idx < inputs_.size());
MACE_CHECK(idx < inputs_.size());
return inputs_[idx];
}
......@@ -61,7 +61,7 @@ class OperatorBase {
virtual bool Run() = 0;
inline const OperatorDef &debug_def() const {
REQUIRE(has_debug_def(), "operator_def was null!");
MACE_CHECK(has_debug_def(), "operator_def was null!");
return *operator_def_;
}
......@@ -90,7 +90,7 @@ class Operator : public OperatorBase {
: OperatorBase(operator_def, ws) {
for (const string &input_str : operator_def.input()) {
const Tensor *tensor = ws->GetTensor(input_str);
REQUIRE(
MACE_CHECK(
tensor != nullptr,
"op ",
operator_def.type(),
......@@ -100,7 +100,7 @@ class Operator : public OperatorBase {
}
for (const string &output_str : operator_def.output()) {
outputs_.push_back(CHECK_NOTNULL(ws->CreateTensor(output_str,
outputs_.push_back(MACE_CHECK_NOTNULL(ws->CreateTensor(output_str,
DeviceContext<D>::allocator(),
DataTypeToEnum<T>::v())));
}
......@@ -109,6 +109,21 @@ class Operator : public OperatorBase {
~Operator() noexcept override {}
};
// OP_INPUT_TAGS and OP_OUTPUT_TAGS are optional features to name the indices of the
// operator's inputs and outputs, in order to avoid confusion. For example, for
// a fully convolution layer that has input, weight and bias, you can define its
// input tags as:
// OP_INPUT_TAGS(INPUT, WEIGHT, BIAS);
// And in the code, instead of doing
// auto& weight = Input(1);
// you can now do
// auto& weight = Input(WEIGHT);
// to make it more clear.
#define OP_INPUT_TAGS(first_input, ...) \
enum _InputTags { first_input = 0, __VA_ARGS__ }
#define OP_OUTPUT_TAGS(first_input, ...) \
enum _OutputTags { first_input = 0, __VA_ARGS__ }
typedef Registry<std::string, OperatorBase, const OperatorDef &, Workspace *>
OperatorRegistry;
typedef Registry<std::string, OperatorBase, const OperatorDef &, Workspace *> *(
......
......@@ -101,7 +101,7 @@ using ::google::protobuf::io::CodedOutputStream;
bool ReadProtoFromTextFile(const char* filename, Message* proto) {
int fd = open(filename, O_RDONLY);
REQUIRE(fd != -1, "File not found: ", filename);
MACE_CHECK(fd != -1, "File not found: ", filename);
FileInputStream* input = new FileInputStream(fd);
bool success = google::protobuf::TextFormat::Parse(input, proto);
delete input;
......@@ -112,7 +112,7 @@ bool ReadProtoFromTextFile(const char* filename, Message* proto) {
void WriteProtoToTextFile(const Message& proto, const char* filename) {
int fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0644);
FileOutputStream* output = new FileOutputStream(fd);
CHECK(google::protobuf::TextFormat::Print(proto, output));
MACE_CHECK(google::protobuf::TextFormat::Print(proto, output));
delete output;
close(fd);
}
......@@ -123,7 +123,7 @@ bool ReadProtoFromBinaryFile(const char* filename, MessageLite* proto) {
#else
int fd = open(filename, O_RDONLY);
#endif
REQUIRE(fd != -1, "File not found: ", filename);
MACE_CHECK(fd != -1, "File not found: ", filename);
std::unique_ptr<ZeroCopyInputStream> raw_input(new FileInputStream(fd));
std::unique_ptr<CodedInputStream> coded_input(
new CodedInputStream(raw_input.get()));
......@@ -138,12 +138,12 @@ bool ReadProtoFromBinaryFile(const char* filename, MessageLite* proto) {
void WriteProtoToBinaryFile(const MessageLite& proto, const char* filename) {
int fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0644);
REQUIRE(
MACE_CHECK(
fd != -1, "File cannot be created: ", filename, " error number: ", errno);
std::unique_ptr<ZeroCopyOutputStream> raw_output(new FileOutputStream(fd));
std::unique_ptr<CodedOutputStream> coded_output(
new CodedOutputStream(raw_output.get()));
CHECK(proto.SerializeToCodedStream(coded_output.get()));
MACE_CHECK(proto.SerializeToCodedStream(coded_output.get()));
coded_output.reset();
raw_output.reset();
close(fd);
......@@ -154,7 +154,7 @@ void WriteProtoToBinaryFile(const MessageLite& proto, const char* filename) {
ArgumentHelper::ArgumentHelper(const OperatorDef &def) {
for (auto &arg : def.arg()) {
if (arg_map_.find(arg.name()) != arg_map_.end()) {
REQUIRE(
MACE_CHECK(
arg.SerializeAsString() == arg_map_[arg.name()].SerializeAsString(),
"Found argument of the same name '",
arg.name(),
......@@ -171,7 +171,7 @@ ArgumentHelper::ArgumentHelper(const OperatorDef &def) {
ArgumentHelper::ArgumentHelper(const NetDef& netdef) {
for (auto& arg : netdef.arg()) {
REQUIRE(
MACE_CHECK(
arg_map_.count(arg.name()) == 0,
"Duplicated argument name found in net def: ",
ProtoDebugString(netdef));
......@@ -202,7 +202,7 @@ bool SupportsLosslessConversion(const InputType& value) {
<< " for parameter " << name; \
return default_value; \
} \
REQUIRE( \
MACE_CHECK( \
arg_map_.at(name).has_##fieldname(), \
"Argument ", \
name, \
......@@ -211,7 +211,7 @@ bool SupportsLosslessConversion(const InputType& value) {
if (enforce_lossless_conversion) { \
auto supportsConversion = \
SupportsLosslessConversion<decltype(value), T>(value); \
REQUIRE( \
MACE_CHECK( \
supportsConversion, \
"Value", \
value, \
......@@ -255,7 +255,7 @@ INSTANTIATE_GET_SINGLE_ARGUMENT(string, s, false)
if (enforce_lossless_conversion) { \
auto supportsConversion = \
SupportsLosslessConversion<decltype(v), T>(v); \
REQUIRE( \
MACE_CHECK( \
supportsConversion, \
"Value", \
v, \
......@@ -328,7 +328,7 @@ const Argument& GetArgument(const OperatorDef& def, const string& name) {
return arg;
}
}
REQUIRE(false,
MACE_CHECK(false,
"Argument named ",
name,
"does not exist in operator ",
......@@ -341,7 +341,7 @@ bool GetFlagArgument(
bool def_value) {
for (const Argument& arg : def.arg()) {
if (arg.name() == name) {
REQUIRE(
MACE_CHECK(
arg.has_i(), "Can't parse argument as bool: ", ProtoDebugString(arg));
return arg.i();
}
......
......@@ -213,10 +213,10 @@ class ArgumentHelper {
template <typename MessageType>
MessageType GetMessageArgument(const string& name) const {
REQUIRE(arg_map_.count(name), "Cannot find parameter named " + name);
MACE_CHECK(arg_map_.count(name), "Cannot find parameter named " + name);
MessageType message;
if (arg_map_.at(name).has_s()) {
REQUIRE(
MACE_CHECK(
message.ParseFromString(arg_map_.at(name).s()),
"Faild to parse content from the string");
} else {
......@@ -227,10 +227,10 @@ class ArgumentHelper {
template <typename MessageType>
vector<MessageType> GetRepeatedMessageArgument(const string& name) const {
REQUIRE(arg_map_.count(name), "Cannot find parameter named " + name);
MACE_CHECK(arg_map_.count(name), "Cannot find parameter named " + name);
vector<MessageType> messages(arg_map_.at(name).strings_size());
for (int i = 0; i < messages.size(); ++i) {
REQUIRE(
MACE_CHECK(
messages[i].ParseFromString(arg_map_.at(name).strings(i)),
"Faild to parse content from the string");
}
......
......@@ -18,7 +18,7 @@ class Registry {
void Register(const SrcType& key, Creator creator) {
std::lock_guard<std::mutex> lock(register_mutex_);
REQUIRE(registry_.count(key) == 0, "Key already registered.");
MACE_CHECK(registry_.count(key) == 0, "Key already registered.");
registry_[key] = creator;
}
......
......@@ -71,13 +71,13 @@ class Tensor {
inline TIndex size() const { return size_; }
inline const void* raw_data() const {
CHECK(data_.get() || size_ == 0);
MACE_CHECK(data_.get() || size_ == 0);
return data_.get();
}
template <typename T>
inline const T* data() const {
REQUIRE(
MACE_CHECK(
data_.get() || size_ == 0,
"The tensor is of non-zero shape, but its data is not allocated yet. ");
return static_cast<T*>(data_.get());
......@@ -121,13 +121,13 @@ class Tensor {
template <typename T>
inline void Copy(const T* src, size_t size) {
REQUIRE(size == size_, "copy src and dst with different size.");
MACE_CHECK(size == size_, "copy src and dst with different size.");
CopyBytes(static_cast<const void*>(src), sizeof(T) * size);
}
template <typename SrcType, typename DstType>
inline void CopyWithCast(const SrcType* src, size_t size) {
REQUIRE(size == size_, "copy src and dst with different size.");
MACE_CHECK(size == size_, "copy src and dst with different size.");
unique_ptr<DstType[]> buffer(new DstType[size]);
for (int i = 0; i < size; ++i) {
buffer[i] = static_cast<DstType>(src[i]);
......
......@@ -38,13 +38,13 @@ Benchmark::Benchmark(const char* name, void (*fn)(int, int, int))
}
Benchmark* Benchmark::Arg(int x) {
CHECK(num_args_ == 1);
MACE_CHECK(num_args_ == 1);
args_.push_back(std::make_pair(x, -1));
return this;
}
Benchmark* Benchmark::ArgPair(int x, int y) {
CHECK(num_args_ == 2);
MACE_CHECK(num_args_ == 2);
args_.push_back(std::make_pair(x, y));
return this;
}
......
......@@ -11,7 +11,7 @@ Examples
bazel build -c opt mace/examples:helloworld \
--crosstool_top=//external:android/crosstool \
--host_crosstool_top=@bazel_tools//tools/cpp:toolchain \
--cpu=armeabi-v7a
--cpu=arm64-v8a
```
* To run adb inside docker, the container network should use 'host'
......
......@@ -13,8 +13,8 @@ namespace kernels {
template<typename T>
void AddNFuntion(const vector<const Tensor*>& input_tensor, Tensor *output_tensor) {
int n = input_tensor.size();
CHECK(n > 1);
CHECK_NOTNULL(input_tensor[0]);
MACE_CHECK(n > 1);
MACE_CHECK_NOTNULL(input_tensor[0]);
int64 size = input_tensor[0]->size();
vector<const T*> inputs(n);
for (int i = 0; i < n; ++i) {
......
......@@ -12,8 +12,8 @@ namespace kernels {
void NeonAddNFuntion_float(const vector<const Tensor *> &input_tensor,
Tensor *output_tensor) {
int n = input_tensor.size();
CHECK(n > 1);
CHECK_NOTNULL(input_tensor[0]);
MACE_CHECK(n > 1);
MACE_CHECK_NOTNULL(input_tensor[0]);
int64 size = input_tensor[0]->size();
output_tensor->ResizeLike(input_tensor[0]);
float *output = output_tensor->mutable_data<float>();
......
......@@ -22,5 +22,3 @@ cc_library(
copts = ['-std=c++11'],
alwayslink = 1,
)
//
// Copyright (c) 2017 XiaoMi All rights reserved.
//
#include "mace/ops/conv_2d.h"
#include "mace/proto/mace.pb.h"
namespace mace {
template <>
bool Conv2dOp<DeviceType::CPU, float>::Run() {
const Tensor* input = Input(INPUT);
const Tensor* filter = Input(FILTER);
const Tensor* bias = Input(BIAS);
Tensor* output = Output(OUTPUT);
// Test
VLOG(0) << "conv_2d([" << kernels_[0] << ", " << kernels_[1] << "], )";
const float* input_data = input->data<float>();
for (int i = 0; i < 6; ++i) {
VLOG(0) << input_data[i];
}
return true;
}
REGISTER_CPU_OPERATOR(Conv2d, Conv2dOp<DeviceType::CPU, float>);
}
//
// Copyright (c) 2017 XiaoMi All rights reserved.
//
#ifndef MACE_OPS_CONV_2D_H_
#define MACE_OPS_CONV_2D_H_
#include "mace/core/operator.h"
namespace mace {
template<DeviceType D, class T>
class Conv2dOp : public Operator<D, T> {
public:
Conv2dOp(const OperatorDef &operator_def, Workspace *ws)
: Operator<D, T>(operator_def, ws),
kernels_(OperatorBase::GetRepeatedArgument<int>("kernels")),
strides_(OperatorBase::GetRepeatedArgument<int>("strides")),
paddings_(OperatorBase::GetRepeatedArgument<int>("paddings")),
dilations_(OperatorBase::GetRepeatedArgument<int>("dilations")) {}
bool Run() override;
private:
vector<int> kernels_;
vector<int> strides_;
vector<int> paddings_;
vector<int> dilations_;
OP_INPUT_TAGS(INPUT, FILTER, BIAS);
OP_OUTPUT_TAGS(OUTPUT);
};
} // namespace mace
#endif // MACE_OPS_CONV_2D_H_
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册