diff --git a/src/operators/kernel/cl/cl_kernel/relu.cl b/src/operators/kernel/cl/cl_kernel/relu.cl new file mode 100644 index 0000000000000000000000000000000000000000..e773d1c2577461abb35fabfa752ffc272970492b --- /dev/null +++ b/src/operators/kernel/cl/cl_kernel/relu.cl @@ -0,0 +1,25 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +__kernel void relu(__read_only image2d_t input, + __write_only image2d_t output) + const int x = get_global_id(0); + const int y = get_global_id(1); + const sampler_t sampler = CLK_NORMALIZED_COORDS_TRUE | + CLK_ADDRESS_CLAMP | + CLK_FILTER_NEAREST; + half4 r = read_imageh(input, sampler, int2(x, y)); + r = max(half4(0, 0, 0, 0), r); + write_imageh(output, int2(x, y), r); +} \ No newline at end of file diff --git a/src/operators/kernel/cl/cl_kernel/reshape.cl b/src/operators/kernel/cl/cl_kernel/reshape.cl new file mode 100644 index 0000000000000000000000000000000000000000..4055445d1576b2ca54919ed03ad187d08cff14c2 --- /dev/null +++ b/src/operators/kernel/cl/cl_kernel/reshape.cl @@ -0,0 +1,49 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +__kernel void reshape(__read_only image2d_t input, + __write_only image2d_t output, + __private const int d0, + __private const int d1, + __private const int d2, + __private const int d3, + __private const int x0, + __private const int x1, + __private const int x2, + __private const int x3) { + const int x = get_global_id(0); + const int y = get_global_id(1); + int obx = x / x3; + int oby = y / x2; + int ox = x % x3; + int oy = y % x2; + const sampler_t sampler = CLK_NORMALIZED_COORDS_TRUE | + CLK_ADDRESS_CLAMP | + CLK_FILTER_NEAREST; + half4 r; + for (int i = 0; i < 4; i++) { + int t = obx * 4 + i; + if (t > x1) break; + int oindex = oby * x1 * x2 * x3 + t * x2 * x3 + ox * x3 + oy; + int i0, i1, i2, i3; + int i3 = oindex % d3; oindex /= d3; + int i2 = oindex % d2; oindex /= d2; + int i1 = oindex % d1; oindex /= d1; + int i0 = oindex; + int ix = (i1 / 4) * d3 + i3; + int iy = i0 * d2 + i2; + r[i] = read_imageh(input, sampler, int2(ix, iy))[i1%4]; + } + write_imageh(output, int2(x, y), r); +} \ No newline at end of file diff --git a/src/operators/kernel/cl/cl_kernel/softmax.cl b/src/operators/kernel/cl/cl_kernel/softmax.cl new file mode 100644 index 0000000000000000000000000000000000000000..60f0cf409596632b67817cd236f9621010522571 --- /dev/null +++ b/src/operators/kernel/cl/cl_kernel/softmax.cl @@ -0,0 +1,41 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +__kernel void softmax(__read_only image2d_t input, + __write_only image2d_t output, + __private const int d0, + __private const int d1, + __private const int d2, + __private const int d3) { + const int z = get_global_id(0); + const int x = get_global_id(1); + const int y = get_global_id(2); + const sampler_t sampler = CLK_NORMALIZED_COORDS_TRUE | + CLK_ADDRESS_CLAMP | + CLK_FILTER_NEAREST; + half4 maxv = read_imageh(input, sampler, int2(z * d3, y)); + half4 buf[d3] = {piece}; + for (int i = 1; i < d3; i++) { + buf[i] = read_imageh(input, sampler, int2(z * d3 + i, y)); + maxv = max(maxv, buf[i]); + } + float4 sum = 0; + for (int i = 0; i < d3; i++) { + buf[i] = exp(buf[i] - maxv); + sum += buf[i]; + } + half4 r = buf[x] / sum; + + write_imageh(output, int2(z * d3 + x, y), r); +} diff --git a/src/operators/kernel/cl/relu_kernel.cpp b/src/operators/kernel/cl/relu_kernel.cpp index f38c29f1827cd61b18a0dd59773e63169a4445a7..223841096c88e2705e6b2e4ca915a5f8067d2d8d 100644 --- a/src/operators/kernel/cl/relu_kernel.cpp +++ b/src/operators/kernel/cl/relu_kernel.cpp @@ -11,6 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifdef RELU_OP #include "operators/kernel/relu_kernel.h" @@ -19,13 +20,25 @@ namespace operators { template <> bool ReluKernel::Init(ReluParam *param) { + this->cl_helper_.AddKernel("relu", "relu.cl"); return true; } template <> -void ReluKernel::Compute(const ReluParam ¶m) {} +void ReluKernel::Compute(const ReluParam ¶m) { + auto kernel = this->cl_helper_.KernelAt(0); + const auto* input = param.InputX(); + auto* output = parma.Out(); + auto default_work_size = this->cl_helper_.DefaultWorkSize(*output); + clSetKernelArg((kernel, 0, sizeof(cl_mem), &input.getCLImage()); + clSetKernelArg((kernel, 1, sizeof(cl_mem), &output.getCLImage()); + int work_size[2] = { input.ImageWidth(), input.ImageHeight() }; + clEnqueueNDRangeKernel(this->cl_helper_.CLCommandQueue(), kernel, 3, NULL, + work_size, NULL, 0, NULL, NULL); +} template class ReluKernel; } // namespace operators } // namespace paddle_mobile +#endif \ No newline at end of file diff --git a/src/operators/kernel/cl/softmax_kernel.cpp b/src/operators/kernel/cl/softmax_kernel.cpp index d0a97cf076c5fe22c7b2612629616053c63dec6c..f3c0de357c31b68d239c61910415fcce756dd4e7 100644 --- a/src/operators/kernel/cl/softmax_kernel.cpp +++ b/src/operators/kernel/cl/softmax_kernel.cpp @@ -21,11 +21,28 @@ namespace operators { template <> bool SoftmaxKernel::Init(SoftmaxParam *param) { + this->cl_helper_.AddKernel("softmax", "softmax.cl"); return true; } template <> -void SoftmaxKernel::Compute(const SoftmaxParam ¶m) {} +void SoftmaxKernel::Compute(const SoftmaxParam ¶m) { + auto kernel = this->cl_helper_.KernelAt(0); + auto default_work_size = this->cl_helper_.DefaultWorkSize(*(param.Out())); + auto & input = param.InputX(); + auto & output = param.Out(); + clSetKernelArg(kernel, 0, sizeof(cl_mem), &input.getCLImage()); + clSetKernelArg(kernel, 1, sizeof(cl_mem), &output.getCLImage()); + const auto & inputDim = input.dims(); + int dims[4] = {inputDim[0], inputDim[1], inputDim[2], inputDim[3]}; + clSetKernelArg(kernel, 2, sizeof(int), dims); + clSetKernelArg(kernel, 3, sizeof(int), dims+1); + clSetKernelArg(kernel, 4, sizeof(int), dims+2); + clSetKernelArg(kernel, 5, sizeof(int), dims+3); + + clEnqueueNDRangeKernel(this->cl_helper_.CLCommandQueue(), kernel, 3, NULL, + default_work_size.data(), NULL, 0, NULL, NULL); +} template class SoftmaxKernel; diff --git a/tools/web-exporter/CMakeLists.txt b/tools/web-exporter/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..e9cddecd6794fd047a2d0e79719373adbe7f5959 --- /dev/null +++ b/tools/web-exporter/CMakeLists.txt @@ -0,0 +1,20 @@ +cmake_minimum_required(VERSION 3.6) + +project(web-exporter) + +set(CMAKE_CXX_STANDARD 11) + +file(GLOB PADDLE_MOBILE_CPP_FILES + "../../src/common/*.c" + "../../src/common/*.cpp" + "../../src/memory/*.cpp" + "../../src/framework/*.c" + "../../src/framework/*.cpp" + "../../src/framework/program/*.cpp" + "../../src/framework/program/program-optimize/*.cpp" +) +file(GLOB EXPORT_CPP_FILES "*.cpp") + +add_executable(web-exporter ${PADDLE_MOBILE_CPP_FILES} ${EXPORT_CPP_FILES}) +target_include_directories(web-exporter PRIVATE "../../src") +target_link_libraries(web-exporter) \ No newline at end of file diff --git a/tools/web-exporter/export-nodejs.cpp b/tools/web-exporter/export-nodejs.cpp new file mode 100644 index 0000000000000000000000000000000000000000..023d9e5874e5871cdeb4e2b568c63c69436dee6e --- /dev/null +++ b/tools/web-exporter/export-nodejs.cpp @@ -0,0 +1,49 @@ +#include "export.h" + +inline std::string indent(int i) { + return std::string(i, ' '); +} +void export_nodejs(ProgramPtr program, ScopePtr scope, std::ostream & os) { + os << "module.exports.program = {\n"; + os << indent(2) << var2str("blocks") << ": [\n"; + for (const auto& block: program->Blocks()) { + os << indent(4) << "{\n"; + os << indent(6) << var2str("vars") << ": {\n"; + for (const auto& var: block->Vars()) { + const auto& dim = var->Tensor_desc().Dims(); + os << indent(8) << var2str(var->Name()) << ": {\n"; + os << indent(10) << var2str("dim") << ": " << var2str(dim) << ",\n"; + os << indent(10) << var2str("persistable") << ": " << var2str(var->Persistable()) << "\n"; + os << indent(8) << "},\n"; + } + os << indent(6) << "},\n"; + os << indent(6) << var2str("ops") << ": [\n"; + for (const auto& op: block->Ops()) { + os << indent(8) << "{\n"; + os << indent(10) << var2str("type") << ": " << var2str(op->Type()) << ",\n"; + os << indent(10) << var2str("inputs") << ": {\n"; + for (const auto& kv: op->GetInputs()) { + os << indent(12) << var2str(kv.first) << ": " << var2str(kv.second) << ",\n"; + } + os << indent(10) << "},\n"; + + os << indent(10) << var2str("outputs") << ": {\n"; + for (const auto& kv: op->GetInputs()) { + os << indent(12) << var2str(kv.first) << ": " << var2str(kv.second) << ",\n"; + } + os << indent(10) << "},\n"; + + os << indent(10) << var2str("attrs") << ": {\n"; + for (const auto& kv: op->GetAttrMap()) { + os << indent(12) << var2str(kv.first) << ": "; + os << decltype(kv.second)::ApplyVistor(VarVisitor(), kv.second) << ",\n"; + } + os << indent(10) << "},\n"; + os << indent(8) << "},\n"; + } + os << indent(6) << "],\n"; + os << indent(4) << "},\n"; + } + os << indent(2) << "]\n"; + os << "}\n"; +} diff --git a/tools/web-exporter/export-scope.cpp b/tools/web-exporter/export-scope.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d5c2492ac74129fce25eb56dc9fc870e66f2dccc --- /dev/null +++ b/tools/web-exporter/export-scope.cpp @@ -0,0 +1,34 @@ +#include +#include "export.h" + +void export_scope(ProgramPtr program, ScopePtr scope, const std::string & dirname) { + for (const auto& block: program->Blocks()) { + for (const auto& var: block->Vars()) { + if (var->Name() == "feed" || var->Name() == "fetch") { + continue; + } + if (var->Persistable()) { + auto* v = scope->FindVar(var->Name()); + assert(v != nullptr); + int count = 1; + for (auto n: var->Tensor_desc().Dims()) { + count *= n; + } + + auto* tensor = v->GetMutable(); + const float * p = tensor->mutable_data(); + + std::string para_file_name = dirname + '/' + var->Name(); + FILE *para_file = fopen(para_file_name.c_str(), "w"); + assert(p != nullptr); + fwrite(p, sizeof(float), count, para_file); + fclose(para_file); + // std::cout << "==> " << var->Name() << " " << count << "\n"; + // for (int i = 0; i < count; i++) { + // std::cout << p[i] << ", "; + // } + // std::cout << "\n"; + } + } + } +} diff --git a/tools/web-exporter/export.cpp b/tools/web-exporter/export.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1f7c678b69e6af9b4f6694489304b85853bf6215 --- /dev/null +++ b/tools/web-exporter/export.cpp @@ -0,0 +1,52 @@ +#include "export.h" +#include +#include + +class FakeExecutor : public paddle_mobile::framework::Executor { +public: + FakeExecutor(const paddle_mobile::framework::Program p) { + program_ = p; + batch_size_ = 1; + use_optimize_ = true; + loddable_ = false; + if (use_optimize_) { + to_predict_program_ = program_.optimizeProgram; + } else { + to_predict_program_ = program_.originProgram; + } + auto *variable_ptr = program_.scope->Var("batch_size"); + variable_ptr[0].SetValue(1); + if (program_.combined) { + InitCombineMemory(); + } else { + InitMemory(); + } + } +}; + +int main(int argc, char** argv) { + if (argc != 3) { + std::cout << "Usage: " << argv[0] << " \n"; + return -1; + } + std::string model_dir = argv[1]; + std::string model_path = model_dir + "/model"; + std::string para_path = model_dir + "/params"; + + std::string out_dir = argv[2]; + std::string out_model_js = out_dir + "/model.js"; + std::string out_para_dir = out_dir + "/paras"; + mkdir(out_dir.c_str(), S_IRWXU|S_IRWXG|S_IRWXO); + mkdir(out_para_dir.c_str(), S_IRWXU|S_IRWXG|S_IRWXO); + + std::cout << "loading " << model_path << " & " << para_path << "\n"; + paddle_mobile::framework::Loader<> loader; + auto program = loader.Load(model_path, para_path, true); + FakeExecutor executor(program); + auto optimizedProgram = program.optimizeProgram; + export_scope(optimizedProgram, program.scope, out_para_dir); + std::ofstream fs(out_model_js.c_str()); + export_nodejs(optimizedProgram, program.scope, fs); + fs.close(); + return 0; +} diff --git a/tools/web-exporter/export.h b/tools/web-exporter/export.h new file mode 100644 index 0000000000000000000000000000000000000000..d9db3b31dfa490b4404baccd6336df456cc84755 --- /dev/null +++ b/tools/web-exporter/export.h @@ -0,0 +1,61 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "framework/loader.h" +#include "framework/executor.h" +#include "framework/scope.h" +#include "framework/program/program_desc.h" + +// using paddle_mobile::framework::ProgramDesc; +// using paddle_mobile::framework::Scope; + +using ProgramPtr = std::shared_ptr; +using ScopePtr = std::shared_ptr; + +void export_nodejs(ProgramPtr program, ScopePtr scope, std::ostream & os = std::cout); +void export_scope(ProgramPtr program, ScopePtr scope, const std::string & dirname = "."); + + +template +inline std::string var2str(const T & v) { + return std::to_string(v); +} + +template <> +inline std::string var2str(const std::string & v) { + return "\"" + v + "\""; +} + +inline std::string var2str(const char* v) { + return var2str(v); +} + +inline std::string var2str(const bool v) { + return v ? "true" : "false"; +} + +template +std::string var2str(const std::vector & v) { + std::string r = "["; + auto s = v.size(); + for (int i = 0; i < s; i++) { + if (i) r += ", "; + r += var2str(v[i]); + } + return r + "]"; +} + +struct VarVisitor { + using type_t = decltype(var2str(0)); + + template + type_t operator()(const T & v) { + return var2str(v); + } +}; \ No newline at end of file