提交 2b180489 编写于 作者: L Liangliang He

Remove half source dependency

上级 cedb6e8d
......@@ -58,6 +58,13 @@ new_git_repository(
remote = "https://github.com/KhronosGroup/OpenCL-CLHPP.git",
)
new_git_repository(
name = "half",
build_file = "mace/third_party/half.BUILD",
commit = "87d7f25f7ba2c7d3b051f6c857031de0ecac5afd",
remote = "http://v9.git.n.xiaomi.com/deep-computing/half.git",
)
git_repository(
name = "com_github_gflags_gflags",
#tag = "v2.2.0",
......
......@@ -52,6 +52,7 @@ cc_library(
":opencl_headers",
"//mace/utils",
"//mace/codegen:generated_version",
"@half//:half",
] + if_production_mode([
"//mace/utils:utils_prod",
"//mace/core:opencl_prod",
......
此差异已折叠。
......@@ -7,8 +7,8 @@
#include <cstdint>
#include "mace/core/half.h"
#include "mace/public/mace.h"
#include "include/half.hpp"
namespace mace {
......
......@@ -140,7 +140,7 @@ template <typename T>
class ActivationFunctor<DeviceType::OPENCL, T> {
public:
ActivationFunctor(ActivationType type, T relux_max_limit)
: activation_(type), relux_max_limit_(relux_max_limit) {}
: activation_(type), relux_max_limit_(static_cast<T>(relux_max_limit)) {}
void operator()(const Tensor *input,
const Tensor *alpha,
......
......@@ -138,7 +138,7 @@ struct PoolingFunctor : PoolingFunctorBase {
index_t out_offset =
(((b * height) + h) * width + w) * channels + c;
index_t in_offset = b * in_image_size * input_channels + c;
T sum = 0;
T sum = static_cast<T>(0);
int block_size = 0;
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
......
......@@ -18,7 +18,8 @@ class ActivationOp : public Operator<D, T> {
functor_(kernels::StringToActivationType(
OperatorBase::GetSingleArgument<std::string>("activation",
"NOOP")),
OperatorBase::GetSingleArgument<float>("max_limit", 0.0f)) {}
static_cast<T>(OperatorBase::GetSingleArgument<float>(
"max_limit", 0.0f))) {}
bool Run(StatsFuture *future) override {
const Tensor *input_tensor = this->Input(0);
......
......@@ -64,8 +64,9 @@ void SimpleValidTest() {
}
// Check
auto expected = CreateTensor<T>({1, 2, 2, 2}, {37.1f, 148.2f, 47.1f, 188.2f,
67.1f, 268.2f, 77.1f, 308.2f});
auto expected = CreateTensor<T>(
{1, 2, 2, 2}, VectorStaticCast<T>({37.1f, 148.2f, 47.1f, 188.2f, 67.1f,
268.2f, 77.1f, 308.2f}));
ExpectTensorNear<T>(*expected, *net.GetOutput("Output"), 1e-5);
}
......@@ -169,21 +170,22 @@ void ComplexValidTest() {
// Check
auto expected = CreateTensor<T>(
{1, 5, 5, 3},
{4.48200035, 4.63479996, 4.79079962, 5.85899973, 6.05599976,
6.25699997, 6.38100004, 6.59000015, 6.80300045, 6.90299988,
7.1239996, 7.34899998, 4.03559971, 4.16820002, 4.30319977,
8.90999985, 9.1760006, 9.44599915, 11.20499992, 11.54500103,
11.89000034, 11.74499989, 12.09999943, 12.46000004, 12.28499985,
12.65500069, 13.03000069, 7.00200033, 7.22399998, 7.44900036,
13.4100008, 13.79599953, 14.18599987, 16.60500145, 17.09499741,
17.59000015, 17.14500046, 17.65000153, 18.15999794, 17.68499947,
18.20499992, 18.72999954, 9.97200012, 10.28399944, 10.59899998,
17.90999985, 18.41600037, 18.92599869, 22.00500107, 22.64500046,
23.28999901, 22.54500008, 23.19999886, 23.8599987, 23.0850029,
23.75500107, 24.43000031, 12.94200039, 13.34400082, 13.7489996,
6.97500038, 7.29659986, 7.62060022, 8.32049942, 8.72700024,
9.13650036, 8.5095005, 8.92500019, 9.34349918, 8.69849968,
9.12300014, 9.55049992, 4.55220032, 4.80690002, 5.06340027});
VectorStaticCast<T>(
{4.48200035, 4.63479996, 4.79079962, 5.85899973, 6.05599976,
6.25699997, 6.38100004, 6.59000015, 6.80300045, 6.90299988,
7.1239996, 7.34899998, 4.03559971, 4.16820002, 4.30319977,
8.90999985, 9.1760006, 9.44599915, 11.20499992, 11.54500103,
11.89000034, 11.74499989, 12.09999943, 12.46000004, 12.28499985,
12.65500069, 13.03000069, 7.00200033, 7.22399998, 7.44900036,
13.4100008, 13.79599953, 14.18599987, 16.60500145, 17.09499741,
17.59000015, 17.14500046, 17.65000153, 18.15999794, 17.68499947,
18.20499992, 18.72999954, 9.97200012, 10.28399944, 10.59899998,
17.90999985, 18.41600037, 18.92599869, 22.00500107, 22.64500046,
23.28999901, 22.54500008, 23.19999886, 23.8599987, 23.0850029,
23.75500107, 24.43000031, 12.94200039, 13.34400082, 13.7489996,
6.97500038, 7.29659986, 7.62060022, 8.32049942, 8.72700024,
9.13650036, 8.5095005, 8.92500019, 9.34349918, 8.69849968,
9.12300014, 9.55049992, 4.55220032, 4.80690002, 5.06340027}));
ExpectTensorNear<T>(*expected, *net.GetOutput("Output"), 0.2);
}
......
......@@ -237,6 +237,16 @@ void GenerateRandomIntTypeData(const std::vector<index_t> &shape,
std::generate(res.begin(), res.end(), [&gen, &nd] { return nd(gen); });
}
template <typename T>
std::vector<T> VectorStaticCast(const std::vector<float> &&src) {
std::vector<T> dest;
dest.reserve(src.size());
for (float f : src) {
dest.push_back(static_cast<T>(f));
}
return std::move(dest);
}
template <typename T>
std::unique_ptr<Tensor> CreateTensor(const std::vector<index_t> &shape,
const std::vector<T> &data) {
......
cc_library(
name = "half",
hdrs = glob([
"include/half.hpp",
]),
visibility = ["//visibility:public"],
)
......@@ -10,6 +10,6 @@ genrule(
cc_library(
name = "opencl_clhpp",
srcs = ["include/CL/cl.hpp", "include/CL/cl2.hpp"],
hdrs = ["include/CL/cl.hpp", "include/CL/cl2.hpp"],
visibility = ["//visibility:public"],
)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册