diff --git a/r/Dockerfile b/r/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..2605e98f7684d481f5f4d2c4d9f77e8185467244 --- /dev/null +++ b/r/Dockerfile @@ -0,0 +1,47 @@ +FROM ubuntu:18.04 +MAINTAINER PaddlePaddle Authors + +WORKDIR /workspace + +ENV PATH /opt/python3/bin:/root/.local/bin:$PATH +ENV LD_LIBRARY_PATH $LD_LIBRARY_PATH:/opt/python3/lib + +# Install Python +ADD https://mirrors.tuna.tsinghua.edu.cn/anaconda/miniconda/Miniconda3-4.7.12.1-Linux-x86_64.sh miniconda3.sh +RUN /bin/bash miniconda3.sh -b -p /opt/python3/ && \ + rm -f miniconda3.sh + +RUN mkdir -p ~/.pip && \ + echo "[global]" >> ~/.pip/pip.conf && \ + echo "trusted-host = mirrors.aliyun.com" >> ~/.pip/pip.conf && \ + echo "index-url = https://mirrors.aliyun.com/pypi/simple" >> ~/.pip/pip.conf + +RUN echo "channels:" >> ~/.condarc && \ + echo " - conda-forge" >> ~/.condarc && \ + echo " - https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main/" >> ~/.condarc && \ + echo " - https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/" >> ~/.condarc && \ + echo " - defaults" >> ~/.condarc && \ + echo "custom_channels:" >> ~/.condarc && \ + echo " conda-forge: https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud" >> ~/.condarc && \ + echo " msys2: https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud" >> ~/.condarc && \ + echo " bioconda: https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud" >> ~/.condarc && \ + echo " menpo: https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud" >> ~/.condarc && \ + echo " pytorch: https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud" >> ~/.condarc && \ + echo " simpleitk: https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud" >> ~/.condarc && \ + echo "show_channel_urls: true" >> ~/.condarc && \ + echo "channel_priority: strict" >> ~/.condarc + +# Install R +RUN conda install -y r -c conda-forge + +# Install PaddlePaddle +RUN /opt/python3/bin/python -m pip install \ + https://paddle-wheel.bj.bcebos.com/0.0.0-cpu-mkl/paddlepaddle-0.0.0-cp37-cp37m-linux_x86_64.whl + +# Install reticulate, R interface to Python +RUN Rscript -e 'install.packages("reticulate", repos="https://cran.rstudio.com")' + +COPY example example +RUN cd example && \ + curl -O https://paddle-inference-dist.cdn.bcebos.com/mobilenet-test-model-data.tar.gz && \ + tar -zxvf mobilenet-test-model-data.tar.gz && rm mobilenet-test-model-data.tar.gz diff --git a/r/README.md b/r/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a1aab60ece448e23e537a5d0e45a7108ce0d6c5d --- /dev/null +++ b/r/README.md @@ -0,0 +1,121 @@ +# R support + +English | [简体中文](./README_cn.md) + +Use paddle in R. + +## Install +### Use docker +Download [`Dockerfile`](./Dockerfile), run +``` bash +docker build -t paddle-rapi:latest . +``` + +### Local installation +First, make sure `Python` is installed, assuming that the path is `/opt/python3.7`. + +``` bash +python -m pip install paddlepaddle # CPU version +python -m pip install paddlepaddle-gpu # GPU version +``` + +Install the R libraries needed to use paddle. +``` r +install.packages("reticulate") # call Python in R +install.packages("RcppCNPy") # use numpy.ndarray in R +``` + +## Use Paddle inference in R +First, load PaddlePaddle in R. +``` r +library(reticulate) +library(RcppCNPy) + +use_python("/opt/python3.7/bin/python3.7") +paddle <- import("paddle.fluid.core") +``` + +Create an `AnalysisConfig`, which is the configuration of the paddle inference engine. +``` r +config <- paddle$AnalysisConfig("") +``` + +Set model path. +``` r +config$set_model("model/__model__", "model/__params__") +``` + +Use zero copy inference. +``` r +config$switch_use_feed_fetch_ops(FALSE) +config$switch_specify_input_names(TRUE) +``` + +Other configuration options and descriptions are as fallows. +``` r +config$enable_profile() # turn on inference profile +config$enable_use_gpu(gpu_memory_mb, gpu_id) # use GPU +config$disable_gpu() # disable GPU +config$gpu_device_id() # get GPU id +config$switch_ir_optim(TRUE) # turn on IR optimize(default is TRUE) +config$enable_tensorrt_engine(workspace_size, + max_batch_size, + min_subgraph_size, + paddle$AnalysisConfig$Precision$FLOAT32, + use_static, + use_calib_mode + ) # use TensorRT +config$enable_mkldnn() # use MKLDNN +config$delete_pass(pass_name) # delete IR pass +``` + +Create inference engine. +``` r +predictor <- paddle$create_paddle_predictor(config) +``` + +Get input tensor(assume single input), and set input data +``` r +input_names <- predictor$get_input_names() +input_tensor <- predictor$get_input_tensor(input_names[1]) +input_shape <- as.integer(c(1, 3, 300, 300)) # shape has integer type +input_data <- np_array(data, dtype="float32")$reshape(input_shape) +input_tensor$copy_from_cpu(input_data) +``` + +Run inference. +``` r +predictor$zero_copy_run() +``` + +Get output tensor(assume single output). +``` r +output_names <- predictor$get_output_names() +output_tensor <- predictor$get_output_tensor(output_names[1]) +``` + +Parse output data, and convert to `numpy.ndarray` +``` r +output_data <- output_tensor$copy_to_cpu() +output_data <- np_array(output_data) +``` + +Click to see the full [R mobilenet example](./example/mobilenet.r) and the corresponding [Python mobilenet example](./example/mobilenet.py) the above. For more examples, see [R inference example](./example). + +## Quick start +Download [Dockerfile](./Dockerfile) and [example](./example) to local directory, and build docker image +``` bash +docker build -t paddle-rapi:latest . +``` + +Create and enter container +``` bash +docker run --rm -it paddle-rapi:latest bash +``` + +Run the following command in th container +``` +cd example +chmod +x mobilenet.r +./mobilenet.r +``` diff --git a/r/README_cn.md b/r/README_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..b844e0ad0849bf4502e8df80f32f15818ddde02f --- /dev/null +++ b/r/README_cn.md @@ -0,0 +1,120 @@ +# R 语言支持 +在 R 中使用 PaddlePaddle + +## 环境安装 +首先确保已安装Python,假设路径为`/opt/python3.7` + +使用Python安装Paddle +``` bash +/opt/python3.7/bin/python3.7 -m pip install paddlepaddle # CPU +/opt/python3.7/bin/python3.7 -m pip install paddlepaddle-gpu # GPU +``` + +安装r运行paddle预测所需要的库 +``` r +install.packages("reticulate") # 调用Paddle +``` + +## 在 R 中使用Paddle预测 +首先在 R 中引入paddle预测环境 + +``` r +library(reticulate) +use_python("/opt/python3.7/bin/python") + +paddle <- import("paddle.fluid.core") +``` + +创建一个AnalysisConfig,用于设置预测引擎的各选项 + +``` r +config <- paddle$AnalysisConfig("") +``` + +禁用feed和fetch OP,以使用 zero copy 预测 +``` r +config$switch_use_feed_fetch_ops(FALSE) +config$switch_specify_input_names(TRUE) +``` + +设置模型路径有两种形式: +- `model` 目录中存在一个模型文件和多个参数文件 +- `model` 目录中存在一个模型文件`__model__`和一个参数文件`__params__` + +分别对应如下设置 + +``` r +config$set_model("model") +config$set_model("model/__model__", "model/__params__") +``` + +其他一些配置选项及说明如下 +``` r +config$enable_profile() # 打开预测profile +config$enable_use_gpu(gpu_memory_mb, gpu_id) # 开启GPU预测 +config$disable_gpu() # 禁用GPU +config$gpu_device_id() # 返回使用的GPU ID +config$switch_ir_optim(TRUE) # 开启IR优化(默认开启) +config$enable_tensorrt_engine(workspace_size, + max_batch_size, + min_subgraph_size, + paddle$AnalysisConfig$Precision$Float32, + use_static, + use_calib_mode + ) # 开启TensorRT +config$enable_mkldnn() # 开启MKLDNN +config$disable_glog_info() # 禁用预测中的glog日志 +config$delete_pass(pass_name) # 预测的时候删除指定的pass + +``` + +创建预测引擎 +``` r +predictor <- paddle$create_paddle_predictor(config) +``` + +获取输入tensor(为简单起见,此处假设只有一个输入),并设置输入tensor中的数据(注意需要使用np_array以传入numpy.ndarray类型的数据) +``` r +input_names <- predictor$get_input_names() +input_tensor <- predictor$get_input_tensor(input_names[1]) + +input_shape <- as.integer(c(1, 3, 300, 300)) # shape 为int类型 +input_data <- np_array(data, dtype="float32")$reshape(input_shape) +input_tensor$copy_from_cpu(input_data) +``` + +运行预测引擎 +``` r +predictor$zero_copy_run() +``` + +获取输出tensor(为简单起见,此处假设只有一个输出) +``` r +output_names <- predictor$get_output_names() +output_tensor <- predictor$get_output_tensor(output_names[1]) +``` + +获取输出tensor中的数据,注意需要转为numpy.ndarray +``` r +output_data <- output_tensor$copy_to_cpu() +output_data <- np_array(output_data) +``` + +点击查看完整的[R预测示例](./example/mobilenet.r)及对应的[python预测示例](./example/mobilenet.py) + +### 快速运行 +将[Dockerfile](./Dockerfile)和[example](./example)下载到本地,使用以下命令构建docker镜像 +``` bash +docker build -t paddle-rapi:latest . +``` + +启动一个容器 +``` bash +docker run --rm -it paddle-rapi:latest bash +``` + +运行示例 +``` bash +cd example && chmod +x mobilenet.r +./mobilenet.r +``` diff --git a/r/example/mobilenet.py b/r/example/mobilenet.py new file mode 100755 index 0000000000000000000000000000000000000000..adb1c330a704f5349316d02d4c02d08e9d7222db --- /dev/null +++ b/r/example/mobilenet.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python3.7 +# pylint: skip-file + +import functools +import numpy as np +from paddle.fluid.core import AnalysisConfig +from paddle.fluid.core import AnalysisPredictor +from paddle.fluid.core import create_paddle_predictor + + +def main(): + config = set_config() + predictor = create_paddle_predictor(config) + + data, result = parse_data() + + input_names = predictor.get_input_names() + input_tensor = predictor.get_input_tensor(input_names[0]) + shape = (1, 3, 300, 300) + input_data = data[:-4].astype(np.float32).reshape(shape) + input_tensor.copy_from_cpu(input_data) + + predictor.zero_copy_run() + + output_names = predictor.get_output_names() + output_tensor = predictor.get_output_tensor(output_names[0]) + output_data = output_tensor.copy_to_cpu() + + +def set_config(): + config = AnalysisConfig("") + config.set_model("model/__model__", "model/__params__") + config.switch_use_feed_fetch_ops(False) + config.switch_specify_input_names(True) + config.enable_profile() + + return config + + +def parse_data(): + """ parse input and output data """ + with open('data/data.txt', 'r') as fr: + data = np.array([float(_) for _ in fr.read().split()]) + + with open('data/result.txt', 'r') as fr: + result = np.array([float(_) for _ in fr.read().split()]) + + return (data, result) + + +if __name__ == "__main__": + main() diff --git a/r/example/mobilenet.r b/r/example/mobilenet.r new file mode 100755 index 0000000000000000000000000000000000000000..3da8965eb5742a78335adfe541501c11e303acf7 --- /dev/null +++ b/r/example/mobilenet.r @@ -0,0 +1,45 @@ +#!/usr/bin/env Rscript + +library(reticulate) # call Python library + +use_python("/opt/python3.7/bin/python") + +np <- import("numpy") +paddle <- import("paddle.fluid.core") + +set_config <- function() { + config <- paddle$AnalysisConfig("") + config$set_model("data/model/__model__", "data/model/__params__") + config$switch_use_feed_fetch_ops(FALSE) + config$switch_specify_input_names(TRUE) + config$enable_profile() + + return(config) +} + +zero_copy_run_mobilenet <- function() { + data <- np$loadtxt("data/data.txt") + data <- data[0:(length(data) - 4)] + result <- np$loadtxt("data/result.txt") + result <- result[0:(length(result) - 4)] + + config <- set_config() + predictor <- paddle$create_paddle_predictor(config) + + input_names <- predictor$get_input_names() + input_tensor <- predictor$get_input_tensor(input_names[1]) + input_data <- np_array(data, dtype="float32")$reshape(as.integer(c(1, 3, 300, 300))) + input_tensor$copy_from_cpu(input_data) + + predictor$zero_copy_run() + + output_names <- predictor$get_output_names() + output_tensor <- predictor$get_output_tensor(output_names[1]) + output_data <- output_tensor$copy_to_cpu() + output_data <- np_array(output_data)$reshape(as.integer(-1)) + #all.equal(output_data, result) +} + +if (!interactive()) { + zero_copy_run_mobilenet() +}