labels = new ArrayList<>();
+ while ((text = bufferedReader.readLine()) != null) {
+ labels.add(text);
+ }
+ return labels;
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+ return null;
+ }
+}
diff --git a/deploy/fastdeploy/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/ui/layout/ActionBarLayout.java b/deploy/fastdeploy/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/ui/layout/ActionBarLayout.java
new file mode 100644
index 0000000000000000000000000000000000000000..099219fa9f677134ae58d3e695d9389b54ce9597
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/ui/layout/ActionBarLayout.java
@@ -0,0 +1,33 @@
+package com.baidu.paddle.fastdeploy.app.ui.layout;
+
+import android.content.Context;
+import android.graphics.Color;
+import android.support.annotation.Nullable;
+import android.util.AttributeSet;
+import android.widget.RelativeLayout;
+
+
+public class ActionBarLayout extends RelativeLayout {
+ private int layoutHeight = 150;
+
+ public ActionBarLayout(Context context) {
+ super(context);
+ }
+
+ public ActionBarLayout(Context context, @Nullable AttributeSet attrs) {
+ super(context, attrs);
+ }
+
+ public ActionBarLayout(Context context, @Nullable AttributeSet attrs, int defStyleAttr) {
+ super(context, attrs, defStyleAttr);
+ }
+
+ @Override
+ protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) {
+ super.onMeasure(widthMeasureSpec, heightMeasureSpec);
+ int width = MeasureSpec.getSize(widthMeasureSpec);
+ setMeasuredDimension(width, layoutHeight);
+ setBackgroundColor(Color.BLACK);
+ setAlpha(0.9f);
+ }
+}
\ No newline at end of file
diff --git a/deploy/fastdeploy/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/ui/view/AppCompatPreferenceActivity.java b/deploy/fastdeploy/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/ui/view/AppCompatPreferenceActivity.java
new file mode 100644
index 0000000000000000000000000000000000000000..c1a952dcff6873593c0d5e75dc909d9b3177b3d0
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/ui/view/AppCompatPreferenceActivity.java
@@ -0,0 +1,111 @@
+package com.baidu.paddle.fastdeploy.app.ui.view;
+
+import android.content.res.Configuration;
+import android.os.Bundle;
+import android.preference.PreferenceActivity;
+import android.support.annotation.LayoutRes;
+import android.support.annotation.Nullable;
+import android.support.v7.app.ActionBar;
+import android.support.v7.app.AppCompatDelegate;
+import android.support.v7.widget.Toolbar;
+import android.view.MenuInflater;
+import android.view.View;
+import android.view.ViewGroup;
+
+/**
+ * A {@link PreferenceActivity} which implements and proxies the necessary calls
+ * to be used with AppCompat.
+ *
+ * This technique can be used with an {@link android.app.Activity} class, not just
+ * {@link PreferenceActivity}.
+ */
+public abstract class AppCompatPreferenceActivity extends PreferenceActivity {
+ private AppCompatDelegate mDelegate;
+
+ @Override
+ protected void onCreate(Bundle savedInstanceState) {
+ getDelegate().installViewFactory();
+ getDelegate().onCreate(savedInstanceState);
+ super.onCreate(savedInstanceState);
+ }
+
+ @Override
+ protected void onPostCreate(Bundle savedInstanceState) {
+ super.onPostCreate(savedInstanceState);
+ getDelegate().onPostCreate(savedInstanceState);
+ }
+
+ public ActionBar getSupportActionBar() {
+ return getDelegate().getSupportActionBar();
+ }
+
+ public void setSupportActionBar(@Nullable Toolbar toolbar) {
+ getDelegate().setSupportActionBar(toolbar);
+ }
+
+ @Override
+ public MenuInflater getMenuInflater() {
+ return getDelegate().getMenuInflater();
+ }
+
+ @Override
+ public void setContentView(@LayoutRes int layoutResID) {
+ getDelegate().setContentView(layoutResID);
+ }
+
+ @Override
+ public void setContentView(View view) {
+ getDelegate().setContentView(view);
+ }
+
+ @Override
+ public void setContentView(View view, ViewGroup.LayoutParams params) {
+ getDelegate().setContentView(view, params);
+ }
+
+ @Override
+ public void addContentView(View view, ViewGroup.LayoutParams params) {
+ getDelegate().addContentView(view, params);
+ }
+
+ @Override
+ protected void onPostResume() {
+ super.onPostResume();
+ getDelegate().onPostResume();
+ }
+
+ @Override
+ protected void onTitleChanged(CharSequence title, int color) {
+ super.onTitleChanged(title, color);
+ getDelegate().setTitle(title);
+ }
+
+ @Override
+ public void onConfigurationChanged(Configuration newConfig) {
+ super.onConfigurationChanged(newConfig);
+ getDelegate().onConfigurationChanged(newConfig);
+ }
+
+ @Override
+ protected void onStop() {
+ super.onStop();
+ getDelegate().onStop();
+ }
+
+ @Override
+ protected void onDestroy() {
+ super.onDestroy();
+ getDelegate().onDestroy();
+ }
+
+ public void invalidateOptionsMenu() {
+ getDelegate().invalidateOptionsMenu();
+ }
+
+ private AppCompatDelegate getDelegate() {
+ if (mDelegate == null) {
+ mDelegate = AppCompatDelegate.create(this, null);
+ }
+ return mDelegate;
+ }
+}
diff --git a/deploy/fastdeploy/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/ui/view/CameraSurfaceView.java b/deploy/fastdeploy/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/ui/view/CameraSurfaceView.java
new file mode 100644
index 0000000000000000000000000000000000000000..e90874c627f671de2b7341334b92d872c7078bb6
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/ui/view/CameraSurfaceView.java
@@ -0,0 +1,353 @@
+package com.baidu.paddle.fastdeploy.app.ui.view;
+
+import android.content.Context;
+import android.graphics.Bitmap;
+import android.graphics.SurfaceTexture;
+import android.hardware.Camera;
+import android.hardware.Camera.CameraInfo;
+import android.hardware.Camera.Size;
+import android.opengl.GLES11Ext;
+import android.opengl.GLES20;
+import android.opengl.GLSurfaceView;
+import android.opengl.GLSurfaceView.Renderer;
+import android.opengl.GLUtils;
+import android.opengl.Matrix;
+import android.os.SystemClock;
+import android.util.AttributeSet;
+import android.util.Log;
+
+import com.baidu.paddle.fastdeploy.app.ui.Utils;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.nio.FloatBuffer;
+import java.util.List;
+
+import javax.microedition.khronos.egl.EGLConfig;
+import javax.microedition.khronos.opengles.GL10;
+
+public class CameraSurfaceView extends GLSurfaceView implements Renderer,
+ SurfaceTexture.OnFrameAvailableListener {
+ private static final String TAG = CameraSurfaceView.class.getSimpleName();
+
+ public static int EXPECTED_PREVIEW_WIDTH = 1280; // 1920
+ public static int EXPECTED_PREVIEW_HEIGHT = 720; // 960
+
+ protected int numberOfCameras;
+ protected int selectedCameraId;
+ protected boolean disableCamera = false;
+ protected Camera camera;
+
+ protected Context context;
+ protected SurfaceTexture surfaceTexture;
+ protected int surfaceWidth = 0;
+ protected int surfaceHeight = 0;
+ protected int textureWidth = 0;
+ protected int textureHeight = 0;
+
+ protected Bitmap ARGB8888ImageBitmap;
+ protected boolean bitmapReleaseMode = true;
+
+ // In order to manipulate the camera preview data and render the modified one
+ // to the screen, three textures are created and the data flow is shown as following:
+ // previewdata->camTextureId->fboTexureId->drawTexureId->framebuffer
+ protected int[] fbo = {0};
+ protected int[] camTextureId = {0};
+ protected int[] fboTexureId = {0};
+ protected int[] drawTexureId = {0};
+
+ private final String vss = ""
+ + "attribute vec2 vPosition;\n"
+ + "attribute vec2 vTexCoord;\n" + "varying vec2 texCoord;\n"
+ + "void main() {\n" + " texCoord = vTexCoord;\n"
+ + " gl_Position = vec4 (vPosition.x, vPosition.y, 0.0, 1.0);\n"
+ + "}";
+
+ private final String fssCam2FBO = ""
+ + "#extension GL_OES_EGL_image_external : require\n"
+ + "precision mediump float;\n"
+ + "uniform samplerExternalOES sTexture;\n"
+ + "varying vec2 texCoord;\n"
+ + "void main() {\n"
+ + " gl_FragColor = texture2D(sTexture,texCoord);\n" + "}";
+
+ private final String fssTex2Screen = ""
+ + "precision mediump float;\n"
+ + "uniform sampler2D sTexture;\n"
+ + "varying vec2 texCoord;\n"
+ + "void main() {\n"
+ + " gl_FragColor = texture2D(sTexture,texCoord);\n" + "}";
+
+ private final float[] vertexCoords = {
+ -1, -1,
+ -1, 1,
+ 1, -1,
+ 1, 1};
+ private float[] textureCoords = {
+ 0, 1,
+ 0, 0,
+ 1, 1,
+ 1, 0};
+
+ private FloatBuffer vertexCoordsBuffer;
+ private FloatBuffer textureCoordsBuffer;
+
+ private int progCam2FBO = -1;
+ private int progTex2Screen = -1;
+ private int vcCam2FBO;
+ private int tcCam2FBO;
+ private int vcTex2Screen;
+ private int tcTex2Screen;
+
+ public void setBitmapReleaseMode(boolean mode) {
+ synchronized (this) {
+ bitmapReleaseMode = mode;
+ }
+ }
+
+ public Bitmap getBitmap() {
+ return ARGB8888ImageBitmap; // may null or recycled.
+ }
+
+ public interface OnTextureChangedListener {
+ boolean onTextureChanged(Bitmap ARGB8888ImageBitmap);
+ }
+
+ private OnTextureChangedListener onTextureChangedListener = null;
+
+ public void setOnTextureChangedListener(OnTextureChangedListener listener) {
+ onTextureChangedListener = listener;
+ }
+
+ public CameraSurfaceView(Context ctx, AttributeSet attrs) {
+ super(ctx, attrs);
+ context = ctx;
+ setEGLContextClientVersion(2);
+ setRenderer(this);
+ setRenderMode(RENDERMODE_WHEN_DIRTY);
+
+ // Find the total number of available cameras and the ID of the default camera
+ numberOfCameras = Camera.getNumberOfCameras();
+ CameraInfo cameraInfo = new CameraInfo();
+ for (int i = 0; i < numberOfCameras; i++) {
+ Camera.getCameraInfo(i, cameraInfo);
+ if (cameraInfo.facing == CameraInfo.CAMERA_FACING_BACK) {
+ selectedCameraId = i;
+ }
+ }
+ }
+
+ @Override
+ public void onSurfaceCreated(GL10 gl, EGLConfig config) {
+ // Create OES texture for storing camera preview data(YUV format)
+ GLES20.glGenTextures(1, camTextureId, 0);
+ GLES20.glBindTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, camTextureId[0]);
+ GLES20.glTexParameteri(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, GLES20.GL_TEXTURE_WRAP_S, GLES20.GL_CLAMP_TO_EDGE);
+ GLES20.glTexParameteri(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, GLES20.GL_TEXTURE_WRAP_T, GLES20.GL_CLAMP_TO_EDGE);
+ GLES20.glTexParameteri(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, GLES20.GL_TEXTURE_MIN_FILTER, GLES20.GL_NEAREST);
+ GLES20.glTexParameteri(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, GLES20.GL_TEXTURE_MAG_FILTER, GLES20.GL_NEAREST);
+ surfaceTexture = new SurfaceTexture(camTextureId[0]);
+ surfaceTexture.setOnFrameAvailableListener(this);
+
+ // Prepare vertex and texture coordinates
+ int bytes = vertexCoords.length * Float.SIZE / Byte.SIZE;
+ vertexCoordsBuffer = ByteBuffer.allocateDirect(bytes).order(ByteOrder.nativeOrder()).asFloatBuffer();
+ textureCoordsBuffer = ByteBuffer.allocateDirect(bytes).order(ByteOrder.nativeOrder()).asFloatBuffer();
+ vertexCoordsBuffer.put(vertexCoords).position(0);
+ textureCoordsBuffer.put(textureCoords).position(0);
+
+ // Create vertex and fragment shaders
+ // camTextureId->fboTexureId
+ progCam2FBO = Utils.createShaderProgram(vss, fssCam2FBO);
+ vcCam2FBO = GLES20.glGetAttribLocation(progCam2FBO, "vPosition");
+ tcCam2FBO = GLES20.glGetAttribLocation(progCam2FBO, "vTexCoord");
+ GLES20.glEnableVertexAttribArray(vcCam2FBO);
+ GLES20.glEnableVertexAttribArray(tcCam2FBO);
+ // fboTexureId/drawTexureId -> screen
+ progTex2Screen = Utils.createShaderProgram(vss, fssTex2Screen);
+ vcTex2Screen = GLES20.glGetAttribLocation(progTex2Screen, "vPosition");
+ tcTex2Screen = GLES20.glGetAttribLocation(progTex2Screen, "vTexCoord");
+ GLES20.glEnableVertexAttribArray(vcTex2Screen);
+ GLES20.glEnableVertexAttribArray(tcTex2Screen);
+ }
+
+ @Override
+ public void onSurfaceChanged(GL10 gl, int width, int height) {
+ surfaceWidth = width;
+ surfaceHeight = height;
+ openCamera();
+ }
+
+ @Override
+ public void onDrawFrame(GL10 gl) {
+ if (surfaceTexture == null) return;
+
+ GLES20.glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
+ GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT | GLES20.GL_DEPTH_BUFFER_BIT);
+ surfaceTexture.updateTexImage();
+ float[] matrix = new float[16];
+ surfaceTexture.getTransformMatrix(matrix);
+
+ // camTextureId->fboTexureId
+ GLES20.glBindFramebuffer(GLES20.GL_FRAMEBUFFER, fbo[0]);
+ GLES20.glViewport(0, 0, textureWidth, textureHeight);
+ GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
+ GLES20.glUseProgram(progCam2FBO);
+ GLES20.glVertexAttribPointer(vcCam2FBO, 2, GLES20.GL_FLOAT, false, 4 * 2, vertexCoordsBuffer);
+ textureCoordsBuffer.clear();
+ textureCoordsBuffer.put(transformTextureCoordinates(textureCoords, matrix));
+ textureCoordsBuffer.position(0);
+ GLES20.glVertexAttribPointer(tcCam2FBO, 2, GLES20.GL_FLOAT, false, 4 * 2, textureCoordsBuffer);
+ GLES20.glActiveTexture(GLES20.GL_TEXTURE0);
+ GLES20.glBindTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, camTextureId[0]);
+ GLES20.glUniform1i(GLES20.glGetUniformLocation(progCam2FBO, "sTexture"), 0);
+ GLES20.glDrawArrays(GLES20.GL_TRIANGLE_STRIP, 0, 4);
+ GLES20.glFlush();
+
+ // Check if the draw texture is set
+ int targetTexureId = fboTexureId[0];
+ if (onTextureChangedListener != null) {
+ // Read pixels of FBO to a bitmap
+ ByteBuffer pixelBuffer = ByteBuffer.allocate(textureWidth * textureHeight * 4);
+ GLES20.glReadPixels(0, 0, textureWidth, textureHeight, GLES20.GL_RGBA, GLES20.GL_UNSIGNED_BYTE, pixelBuffer);
+
+ ARGB8888ImageBitmap = Bitmap.createBitmap(textureWidth, textureHeight, Bitmap.Config.ARGB_8888);
+ ARGB8888ImageBitmap.copyPixelsFromBuffer(pixelBuffer);
+
+ boolean modified = onTextureChangedListener.onTextureChanged(ARGB8888ImageBitmap);
+
+ if (modified) {
+ targetTexureId = drawTexureId[0];
+ // Update a bitmap to the GL texture if modified
+ GLES20.glActiveTexture(targetTexureId);
+ // GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, targetTexureId);
+ GLES20.glBindTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, targetTexureId);
+ GLUtils.texImage2D(GL10.GL_TEXTURE_2D, 0, ARGB8888ImageBitmap, 0);
+ }
+ if (bitmapReleaseMode) {
+ ARGB8888ImageBitmap.recycle();
+ }
+ }
+
+ // fboTexureId/drawTexureId->Screen
+ GLES20.glBindFramebuffer(GLES20.GL_FRAMEBUFFER, 0);
+ GLES20.glViewport(0, 0, surfaceWidth, surfaceHeight);
+ GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
+ GLES20.glUseProgram(progTex2Screen);
+ GLES20.glVertexAttribPointer(vcTex2Screen, 2, GLES20.GL_FLOAT, false, 4 * 2, vertexCoordsBuffer);
+ textureCoordsBuffer.clear();
+ textureCoordsBuffer.put(textureCoords);
+ textureCoordsBuffer.position(0);
+ GLES20.glVertexAttribPointer(tcTex2Screen, 2, GLES20.GL_FLOAT, false, 4 * 2, textureCoordsBuffer);
+ GLES20.glActiveTexture(GLES20.GL_TEXTURE0);
+ GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, targetTexureId);
+ GLES20.glUniform1i(GLES20.glGetUniformLocation(progTex2Screen, "sTexture"), 0);
+ GLES20.glDrawArrays(GLES20.GL_TRIANGLE_STRIP, 0, 4);
+ GLES20.glFlush();
+ }
+
+ private float[] transformTextureCoordinates(float[] coords, float[] matrix) {
+ float[] result = new float[coords.length];
+ float[] vt = new float[4];
+ for (int i = 0; i < coords.length; i += 2) {
+ float[] v = {coords[i], coords[i + 1], 0, 1};
+ Matrix.multiplyMV(vt, 0, matrix, 0, v, 0);
+ result[i] = vt[0];
+ result[i + 1] = vt[1];
+ }
+ return result;
+ }
+
+ @Override
+ public void onResume() {
+ super.onResume();
+ }
+
+ @Override
+ public void onPause() {
+ super.onPause();
+ releaseCamera();
+ }
+
+ @Override
+ public void onFrameAvailable(SurfaceTexture surfaceTexture) {
+ requestRender();
+ }
+
+ public void disableCamera() {
+ disableCamera = true;
+ }
+
+ public void enableCamera() {
+ disableCamera = false;
+ }
+
+ public void switchCamera() {
+ releaseCamera();
+ selectedCameraId = (selectedCameraId + 1) % numberOfCameras;
+ openCamera();
+ }
+
+ public void openCamera() {
+ if (disableCamera) return;
+ camera = Camera.open(selectedCameraId);
+ List supportedPreviewSizes = camera.getParameters().getSupportedPreviewSizes();
+ Size previewSize = Utils.getOptimalPreviewSize(supportedPreviewSizes, EXPECTED_PREVIEW_WIDTH,
+ EXPECTED_PREVIEW_HEIGHT);
+ Camera.Parameters parameters = camera.getParameters();
+ parameters.setPreviewSize(previewSize.width, previewSize.height);
+ if (parameters.getSupportedFocusModes().contains(Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO)) {
+ parameters.setFocusMode(Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO);
+ }
+ camera.setParameters(parameters);
+ int degree = Utils.getCameraDisplayOrientation(context, selectedCameraId);
+ camera.setDisplayOrientation(degree);
+ boolean rotate = degree == 90 || degree == 270;
+ textureWidth = rotate ? previewSize.height : previewSize.width;
+ textureHeight = rotate ? previewSize.width : previewSize.height;
+ // Destroy FBO and draw textures
+ GLES20.glBindFramebuffer(GLES20.GL_FRAMEBUFFER, 0);
+ GLES20.glDeleteFramebuffers(1, fbo, 0);
+ GLES20.glDeleteTextures(1, drawTexureId, 0);
+ GLES20.glDeleteTextures(1, fboTexureId, 0);
+ // Normal texture for storing modified camera preview data(RGBA format)
+ GLES20.glGenTextures(1, drawTexureId, 0);
+ GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, drawTexureId[0]);
+ GLES20.glTexImage2D(GLES20.GL_TEXTURE_2D, 0, GLES20.GL_RGBA, textureWidth, textureHeight, 0,
+ GLES20.GL_RGBA, GLES20.GL_UNSIGNED_BYTE, null);
+ GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_WRAP_S, GLES20.GL_CLAMP_TO_EDGE);
+ GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_WRAP_T, GLES20.GL_CLAMP_TO_EDGE);
+ GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_MIN_FILTER, GLES20.GL_NEAREST);
+ GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_MAG_FILTER, GLES20.GL_NEAREST);
+ // FBO texture for storing camera preview data(RGBA format)
+ GLES20.glGenTextures(1, fboTexureId, 0);
+ GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, fboTexureId[0]);
+ GLES20.glTexImage2D(GLES20.GL_TEXTURE_2D, 0, GLES20.GL_RGBA, textureWidth, textureHeight, 0,
+ GLES20.GL_RGBA, GLES20.GL_UNSIGNED_BYTE, null);
+ GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_WRAP_S, GLES20.GL_CLAMP_TO_EDGE);
+ GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_WRAP_T, GLES20.GL_CLAMP_TO_EDGE);
+ GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_MIN_FILTER, GLES20.GL_NEAREST);
+ GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_MAG_FILTER, GLES20.GL_NEAREST);
+ // Generate FBO and bind to FBO texture
+ GLES20.glGenFramebuffers(1, fbo, 0);
+ GLES20.glBindFramebuffer(GLES20.GL_FRAMEBUFFER, fbo[0]);
+ GLES20.glFramebufferTexture2D(GLES20.GL_FRAMEBUFFER, GLES20.GL_COLOR_ATTACHMENT0, GLES20.GL_TEXTURE_2D,
+ fboTexureId[0], 0);
+ try {
+ camera.setPreviewTexture(surfaceTexture);
+ } catch (IOException exception) {
+ Log.e(TAG, "IOException caused by setPreviewDisplay()", exception);
+ }
+ camera.startPreview();
+ }
+
+ public void releaseCamera() {
+ if (camera != null) {
+ camera.setPreviewCallback(null);
+ camera.stopPreview();
+ camera.release();
+ camera = null;
+ }
+ }
+}
diff --git a/deploy/fastdeploy/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/ui/view/ResultListView.java b/deploy/fastdeploy/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/ui/view/ResultListView.java
new file mode 100644
index 0000000000000000000000000000000000000000..62b48a0547dca5c1dd80440918bb813811f35844
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/ui/view/ResultListView.java
@@ -0,0 +1,43 @@
+package com.baidu.paddle.fastdeploy.app.ui.view;
+
+import android.content.Context;
+import android.os.Handler;
+import android.util.AttributeSet;
+import android.widget.ListView;
+
+public class ResultListView extends ListView {
+ public ResultListView(Context context) {
+ super(context);
+ }
+
+ public ResultListView(Context context, AttributeSet attrs) {
+ super(context, attrs);
+ }
+
+ public ResultListView(Context context, AttributeSet attrs, int defStyleAttr) {
+ super(context, attrs, defStyleAttr);
+ }
+
+ private Handler handler;
+
+ public void setHandler(Handler mHandler) {
+ handler = mHandler;
+ }
+
+ public void clear() {
+ handler.post(new Runnable() {
+ @Override
+ public void run() {
+ removeAllViewsInLayout();
+ invalidate();
+ }
+ });
+ }
+
+ @Override
+ protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) {
+ int expandSpec = MeasureSpec.makeMeasureSpec(Integer.MAX_VALUE >> 2,
+ MeasureSpec.AT_MOST);
+ super.onMeasure(widthMeasureSpec, expandSpec);
+ }
+}
diff --git a/deploy/fastdeploy/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/ui/view/adapter/BaseResultAdapter.java b/deploy/fastdeploy/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/ui/view/adapter/BaseResultAdapter.java
new file mode 100644
index 0000000000000000000000000000000000000000..62747965adc25714bd35fa254c6fce1e6009fa0e
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/ui/view/adapter/BaseResultAdapter.java
@@ -0,0 +1,48 @@
+package com.baidu.paddle.fastdeploy.app.ui.view.adapter;
+
+import android.content.Context;
+import android.support.annotation.NonNull;
+import android.support.annotation.Nullable;
+import android.view.LayoutInflater;
+import android.view.View;
+import android.view.ViewGroup;
+import android.widget.ArrayAdapter;
+import android.widget.TextView;
+
+import com.baidu.paddle.fastdeploy.app.examples.R;
+import com.baidu.paddle.fastdeploy.app.ui.view.model.BaseResultModel;
+
+import java.text.DecimalFormat;
+import java.util.List;
+
+public class BaseResultAdapter extends ArrayAdapter {
+ private int resourceId;
+
+ public BaseResultAdapter(@NonNull Context context, int resource) {
+ super(context, resource);
+ }
+
+ public BaseResultAdapter(@NonNull Context context, int resource, @NonNull List objects) {
+ super(context, resource, objects);
+ resourceId = resource;
+ }
+
+ @NonNull
+ @Override
+ public View getView(int position, @Nullable View convertView, @NonNull ViewGroup parent) {
+ BaseResultModel model = getItem(position);
+ View view = LayoutInflater.from(getContext()).inflate(resourceId, null);
+ TextView indexText = (TextView) view.findViewById(R.id.index);
+ TextView nameText = (TextView) view.findViewById(R.id.name);
+ TextView confidenceText = (TextView) view.findViewById(R.id.confidence);
+ indexText.setText(String.valueOf(model.getIndex()));
+ nameText.setText(String.valueOf(model.getName()));
+ confidenceText.setText(formatFloatString(model.getConfidence()));
+ return view;
+ }
+
+ public static String formatFloatString(float number) {
+ DecimalFormat df = new DecimalFormat("0.00");
+ return df.format(number);
+ }
+}
diff --git a/deploy/fastdeploy/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/ui/view/model/BaseResultModel.java b/deploy/fastdeploy/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/ui/view/model/BaseResultModel.java
new file mode 100644
index 0000000000000000000000000000000000000000..cae71b6909db125894a2ce0da8ac3485dd48619f
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/java/com/baidu/paddle/fastdeploy/app/ui/view/model/BaseResultModel.java
@@ -0,0 +1,41 @@
+package com.baidu.paddle.fastdeploy.app.ui.view.model;
+
+public class BaseResultModel {
+ private int index;
+ private String name;
+ private float confidence;
+
+ public BaseResultModel() {
+
+ }
+
+ public BaseResultModel(int index, String name, float confidence) {
+ this.index = index;
+ this.name = name;
+ this.confidence = confidence;
+ }
+
+ public float getConfidence() {
+ return confidence;
+ }
+
+ public void setConfidence(float confidence) {
+ this.confidence = confidence;
+ }
+
+ public int getIndex() {
+ return index;
+ }
+
+ public void setIndex(int index) {
+ this.index = index;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+}
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-v24/action_button_layer.xml b/deploy/fastdeploy/android/app/src/main/res/drawable-v24/action_button_layer.xml
new file mode 100644
index 0000000000000000000000000000000000000000..a0d2e76bfa39dc7faa6cca58132ea6c0691c3f15
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/drawable-v24/action_button_layer.xml
@@ -0,0 +1,14 @@
+
+
+ -
+
+
+
+
+
+ -
+
+
+
+
+
\ No newline at end of file
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-v24/album_btn.xml b/deploy/fastdeploy/android/app/src/main/res/drawable-v24/album_btn.xml
new file mode 100644
index 0000000000000000000000000000000000000000..26d01c584185231af27b424b26de8b957a8f5c28
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/drawable-v24/album_btn.xml
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-v24/ic_launcher_foreground.xml b/deploy/fastdeploy/android/app/src/main/res/drawable-v24/ic_launcher_foreground.xml
new file mode 100644
index 0000000000000000000000000000000000000000..1f6bb290603d7caa16c5fb6f61bbfdc750622f5c
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/drawable-v24/ic_launcher_foreground.xml
@@ -0,0 +1,34 @@
+
+
+
+
+
+
+
+
+
+
+
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-v24/realtime_start_btn.xml b/deploy/fastdeploy/android/app/src/main/res/drawable-v24/realtime_start_btn.xml
new file mode 100644
index 0000000000000000000000000000000000000000..664134453069f0353eb0e34893bb7d9b6efa8a78
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/drawable-v24/realtime_start_btn.xml
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-v24/realtime_stop_btn.xml b/deploy/fastdeploy/android/app/src/main/res/drawable-v24/realtime_stop_btn.xml
new file mode 100644
index 0000000000000000000000000000000000000000..8869a1b2bf0a73abee8438ee12ddda8ec1e8524f
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/drawable-v24/realtime_stop_btn.xml
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-v24/result_page_border_section_bk.xml b/deploy/fastdeploy/android/app/src/main/res/drawable-v24/result_page_border_section_bk.xml
new file mode 100644
index 0000000000000000000000000000000000000000..bd068f169f551e5f88942ed65c5dca83fc8a6033
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/drawable-v24/result_page_border_section_bk.xml
@@ -0,0 +1,12 @@
+
+
+ -
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-v24/round_corner_btn.xml b/deploy/fastdeploy/android/app/src/main/res/drawable-v24/round_corner_btn.xml
new file mode 100644
index 0000000000000000000000000000000000000000..c5dcc45d56375ae8bfad057aea837a1d34c6aac2
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/drawable-v24/round_corner_btn.xml
@@ -0,0 +1,10 @@
+
+
+
+
+
\ No newline at end of file
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-v24/seekbar_progress_realtime.xml b/deploy/fastdeploy/android/app/src/main/res/drawable-v24/seekbar_progress_realtime.xml
new file mode 100644
index 0000000000000000000000000000000000000000..b349d15a6aa37105a7ce2a1d09db4490ff715341
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/drawable-v24/seekbar_progress_realtime.xml
@@ -0,0 +1,18 @@
+
+
+
+
+ -
+
+
+
+ -
+
+
+
+
+
+
+
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-v24/seekbar_progress_result.xml b/deploy/fastdeploy/android/app/src/main/res/drawable-v24/seekbar_progress_result.xml
new file mode 100644
index 0000000000000000000000000000000000000000..17cb68ed80ccb203d76c20bf6be25cf3408f7a22
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/drawable-v24/seekbar_progress_result.xml
@@ -0,0 +1,18 @@
+
+
+
+
+ -
+
+
+
+
+
+ -
+
+
+
+
+
+
+
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-v24/seekbar_thumb.xml b/deploy/fastdeploy/android/app/src/main/res/drawable-v24/seekbar_thumb.xml
new file mode 100644
index 0000000000000000000000000000000000000000..96bd95e0a1736f5eb1bf574c041fd631a888f2b4
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/drawable-v24/seekbar_thumb.xml
@@ -0,0 +1,9 @@
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-v24/seekbar_thumb_shape.xml b/deploy/fastdeploy/android/app/src/main/res/drawable-v24/seekbar_thumb_shape.xml
new file mode 100644
index 0000000000000000000000000000000000000000..26d033b6df27d3bdec275cb938914d5087d753ce
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/drawable-v24/seekbar_thumb_shape.xml
@@ -0,0 +1,26 @@
+
+
+
+ -
+
+
+
+
+
+
+ -
+
+
+
+
+
+
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-v24/switch_side_btn.xml b/deploy/fastdeploy/android/app/src/main/res/drawable-v24/switch_side_btn.xml
new file mode 100644
index 0000000000000000000000000000000000000000..b9b2edfb6a55a246302cbf7b67e6a8110ceebe54
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/drawable-v24/switch_side_btn.xml
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-v24/take_picture_btn.xml b/deploy/fastdeploy/android/app/src/main/res/drawable-v24/take_picture_btn.xml
new file mode 100644
index 0000000000000000000000000000000000000000..4966675c35cfae5b1514b6600ada79f855550a92
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/drawable-v24/take_picture_btn.xml
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/album.png b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/album.png
new file mode 100644
index 0000000000000000000000000000000000000000..3a6fdedaee3cce52cf376ecb9977ea750a6014df
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/album.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/album_pressed.png b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/album_pressed.png
new file mode 100644
index 0000000000000000000000000000000000000000..aa873424ebb9921081bbb9618875fc410bf9c84d
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/album_pressed.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/back_btn.png b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/back_btn.png
new file mode 100644
index 0000000000000000000000000000000000000000..ff121e85f5614dfd022f39627028af825a46d683
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/back_btn.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/more_menu.png b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/more_menu.png
new file mode 100644
index 0000000000000000000000000000000000000000..edf9f3ccced5afeb71d9516d93ea19f26c7d9984
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/more_menu.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/realtime_start.png b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/realtime_start.png
new file mode 100644
index 0000000000000000000000000000000000000000..94ab0817247bfa462d539237441cdc5795f1fdb0
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/realtime_start.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/realtime_start_pressed.png b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/realtime_start_pressed.png
new file mode 100644
index 0000000000000000000000000000000000000000..feef0fea62a15ab72af6556cae2811f9e5f1e3c5
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/realtime_start_pressed.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/realtime_stop.png b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/realtime_stop.png
new file mode 100644
index 0000000000000000000000000000000000000000..8c926367db6d1b66e1a2ef0cfe79c2eee2dbc789
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/realtime_stop.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/realtime_stop_pressed.png b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/realtime_stop_pressed.png
new file mode 100644
index 0000000000000000000000000000000000000000..309082788b0ca3b7686ded57f123e9e501110182
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/realtime_stop_pressed.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/scan_icon.png b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/scan_icon.png
new file mode 100644
index 0000000000000000000000000000000000000000..7517d99d09403cad513c22da492c43c8cde6c9e3
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/scan_icon.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/seekbar_handle.png b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/seekbar_handle.png
new file mode 100644
index 0000000000000000000000000000000000000000..55f5f73991da608090a5586e95158dfd31760609
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/seekbar_handle.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/seekbar_progress_dotted.png b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/seekbar_progress_dotted.png
new file mode 100644
index 0000000000000000000000000000000000000000..e6241d12e6e67c53f45d8955bdae0707e8c68683
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/seekbar_progress_dotted.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/seekbar_thumb_invisible.png b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/seekbar_thumb_invisible.png
new file mode 100644
index 0000000000000000000000000000000000000000..acfe8d374a41fdd2db428f9e5242c790fd0b3926
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/seekbar_thumb_invisible.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/switch_side.png b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/switch_side.png
new file mode 100644
index 0000000000000000000000000000000000000000..3e6ae9a9472b10d72aac63c4755d67ff33704f31
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/switch_side.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/switch_side_pressed.png b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/switch_side_pressed.png
new file mode 100644
index 0000000000000000000000000000000000000000..25e1522768f55c7ff7f8f4f6b12073b084dcb2ae
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/switch_side_pressed.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/take_picture.png b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/take_picture.png
new file mode 100644
index 0000000000000000000000000000000000000000..d6ced986e82ce3eefe6e1f81fb662dc3797cb764
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/take_picture.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/take_picture_pressed.png b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/take_picture_pressed.png
new file mode 100644
index 0000000000000000000000000000000000000000..5f9c8ee3b51b5849d375136ee6fef178103d9738
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/drawable-xhdpi/take_picture_pressed.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-xxhdpi-v4/btn_switch_default.png b/deploy/fastdeploy/android/app/src/main/res/drawable-xxhdpi-v4/btn_switch_default.png
new file mode 100644
index 0000000000000000000000000000000000000000..b9e66c7f605dd5a02d13f04284a046810b292add
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/drawable-xxhdpi-v4/btn_switch_default.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable-xxhdpi-v4/btn_switch_pressed.png b/deploy/fastdeploy/android/app/src/main/res/drawable-xxhdpi-v4/btn_switch_pressed.png
new file mode 100644
index 0000000000000000000000000000000000000000..9544133bdade8f57552f9ab22976be3172c95b86
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/drawable-xxhdpi-v4/btn_switch_pressed.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable/btn_settings.xml b/deploy/fastdeploy/android/app/src/main/res/drawable/btn_settings.xml
new file mode 100644
index 0000000000000000000000000000000000000000..917897b99981d18082d18a87a4ad5176ad8e8f8d
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/drawable/btn_settings.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable/btn_settings_default.xml b/deploy/fastdeploy/android/app/src/main/res/drawable/btn_settings_default.xml
new file mode 100644
index 0000000000000000000000000000000000000000..e19589a97e419249eaacd05f3d75deeeada3e128
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/drawable/btn_settings_default.xml
@@ -0,0 +1,13 @@
+
+
+
+
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable/btn_settings_pressed.xml b/deploy/fastdeploy/android/app/src/main/res/drawable/btn_settings_pressed.xml
new file mode 100644
index 0000000000000000000000000000000000000000..c4af2a042de3a8ae00ab253f889a20dedffa4874
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/drawable/btn_settings_pressed.xml
@@ -0,0 +1,13 @@
+
+
+
+
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable/btn_shutter.xml b/deploy/fastdeploy/android/app/src/main/res/drawable/btn_shutter.xml
new file mode 100644
index 0000000000000000000000000000000000000000..4f9826d3ae340b54046a48e4250a9d7e0b9d9139
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/drawable/btn_shutter.xml
@@ -0,0 +1,5 @@
+
+
+
+
+
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable/btn_shutter_default.xml b/deploy/fastdeploy/android/app/src/main/res/drawable/btn_shutter_default.xml
new file mode 100644
index 0000000000000000000000000000000000000000..234ca014a76b9647959814fa28e0c02324a8d814
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/drawable/btn_shutter_default.xml
@@ -0,0 +1,17 @@
+
+
+
+
+
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable/btn_shutter_pressed.xml b/deploy/fastdeploy/android/app/src/main/res/drawable/btn_shutter_pressed.xml
new file mode 100644
index 0000000000000000000000000000000000000000..accc7acedb91cc4fb8171d78eeba24eaa6b0c2db
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/drawable/btn_shutter_pressed.xml
@@ -0,0 +1,17 @@
+
+
+
+
+
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable/btn_switch.xml b/deploy/fastdeploy/android/app/src/main/res/drawable/btn_switch.xml
new file mode 100644
index 0000000000000000000000000000000000000000..691e8c2e97d7a65d580e4d12d6b77608083b5617
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/drawable/btn_switch.xml
@@ -0,0 +1,5 @@
+
+
+
+
+
diff --git a/deploy/fastdeploy/android/app/src/main/res/drawable/ic_launcher_background.xml b/deploy/fastdeploy/android/app/src/main/res/drawable/ic_launcher_background.xml
new file mode 100644
index 0000000000000000000000000000000000000000..0d025f9bf6b67c63044a36a9ff44fbc69e5c5822
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/drawable/ic_launcher_background.xml
@@ -0,0 +1,170 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/deploy/fastdeploy/android/app/src/main/res/layout-land/ocr_activity_main.xml b/deploy/fastdeploy/android/app/src/main/res/layout-land/ocr_activity_main.xml
new file mode 100644
index 0000000000000000000000000000000000000000..b30f35edf73786cd8d8b97db03f90567922647d9
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/layout-land/ocr_activity_main.xml
@@ -0,0 +1,14 @@
+
+
+
+
+
+
+
diff --git a/deploy/fastdeploy/android/app/src/main/res/layout/ocr_activity_main.xml b/deploy/fastdeploy/android/app/src/main/res/layout/ocr_activity_main.xml
new file mode 100644
index 0000000000000000000000000000000000000000..b30f35edf73786cd8d8b97db03f90567922647d9
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/layout/ocr_activity_main.xml
@@ -0,0 +1,14 @@
+
+
+
+
+
+
+
diff --git a/deploy/fastdeploy/android/app/src/main/res/layout/ocr_camera_page.xml b/deploy/fastdeploy/android/app/src/main/res/layout/ocr_camera_page.xml
new file mode 100644
index 0000000000000000000000000000000000000000..6f31c2c7e4423867f4f96ede92ca1594f432ac58
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/layout/ocr_camera_page.xml
@@ -0,0 +1,160 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/deploy/fastdeploy/android/app/src/main/res/layout/ocr_result_page.xml b/deploy/fastdeploy/android/app/src/main/res/layout/ocr_result_page.xml
new file mode 100644
index 0000000000000000000000000000000000000000..958a85940147f5726208f6504bc3c94212939b95
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/layout/ocr_result_page.xml
@@ -0,0 +1,160 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/deploy/fastdeploy/android/app/src/main/res/layout/ocr_result_page_item.xml b/deploy/fastdeploy/android/app/src/main/res/layout/ocr_result_page_item.xml
new file mode 100644
index 0000000000000000000000000000000000000000..6a2b09ebff16c3398c0fe64dff2772c00ba6be53
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/layout/ocr_result_page_item.xml
@@ -0,0 +1,26 @@
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/deploy/fastdeploy/android/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml b/deploy/fastdeploy/android/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml
new file mode 100644
index 0000000000000000000000000000000000000000..eca70cfe52eac1ba66ba280a68ca7be8fcf88a16
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml
@@ -0,0 +1,5 @@
+
+
+
+
+
\ No newline at end of file
diff --git a/deploy/fastdeploy/android/app/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml b/deploy/fastdeploy/android/app/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml
new file mode 100644
index 0000000000000000000000000000000000000000..eca70cfe52eac1ba66ba280a68ca7be8fcf88a16
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml
@@ -0,0 +1,5 @@
+
+
+
+
+
\ No newline at end of file
diff --git a/deploy/fastdeploy/android/app/src/main/res/mipmap-hdpi/ic_launcher.png b/deploy/fastdeploy/android/app/src/main/res/mipmap-hdpi/ic_launcher.png
new file mode 100644
index 0000000000000000000000000000000000000000..898f3ed59ac9f3248734a00e5902736c9367d455
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/mipmap-hdpi/ic_launcher.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/mipmap-hdpi/ic_launcher_round.png b/deploy/fastdeploy/android/app/src/main/res/mipmap-hdpi/ic_launcher_round.png
new file mode 100644
index 0000000000000000000000000000000000000000..dffca3601eba7bf5f409bdd520820e2eb5122c75
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/mipmap-hdpi/ic_launcher_round.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/mipmap-mdpi/ic_launcher.png b/deploy/fastdeploy/android/app/src/main/res/mipmap-mdpi/ic_launcher.png
new file mode 100644
index 0000000000000000000000000000000000000000..64ba76f75e9ce021aa3d95c213491f73bcacb597
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/mipmap-mdpi/ic_launcher.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/mipmap-mdpi/ic_launcher_round.png b/deploy/fastdeploy/android/app/src/main/res/mipmap-mdpi/ic_launcher_round.png
new file mode 100644
index 0000000000000000000000000000000000000000..dae5e082342fcdeee5db8a6e0b27028e2d2808f5
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/mipmap-mdpi/ic_launcher_round.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/mipmap-xhdpi/ic_launcher.png b/deploy/fastdeploy/android/app/src/main/res/mipmap-xhdpi/ic_launcher.png
new file mode 100644
index 0000000000000000000000000000000000000000..e5ed46597ea8447d91ab1786a34e30f1c26b18bd
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/mipmap-xhdpi/ic_launcher.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/mipmap-xhdpi/ic_launcher_round.png b/deploy/fastdeploy/android/app/src/main/res/mipmap-xhdpi/ic_launcher_round.png
new file mode 100644
index 0000000000000000000000000000000000000000..14ed0af35023e4f1901cf03487b6c524257b8483
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/mipmap-xhdpi/ic_launcher_round.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/mipmap-xxhdpi/ic_launcher.png b/deploy/fastdeploy/android/app/src/main/res/mipmap-xxhdpi/ic_launcher.png
new file mode 100644
index 0000000000000000000000000000000000000000..b0907cac3bfd8fbfdc46e1108247f0a1055387ec
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/mipmap-xxhdpi/ic_launcher.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.png b/deploy/fastdeploy/android/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.png
new file mode 100644
index 0000000000000000000000000000000000000000..d8ae03154975f397f8ed1b84f2d4bf9783ecfa26
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/mipmap-xxxhdpi/ic_launcher.png b/deploy/fastdeploy/android/app/src/main/res/mipmap-xxxhdpi/ic_launcher.png
new file mode 100644
index 0000000000000000000000000000000000000000..2c18de9e66108411737e910f5c1972476f03ddbf
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/mipmap-xxxhdpi/ic_launcher.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png b/deploy/fastdeploy/android/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png
new file mode 100644
index 0000000000000000000000000000000000000000..beed3cdd2c32af5114a7dc70b9ef5b698eb8797e
Binary files /dev/null and b/deploy/fastdeploy/android/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png differ
diff --git a/deploy/fastdeploy/android/app/src/main/res/values/arrays.xml b/deploy/fastdeploy/android/app/src/main/res/values/arrays.xml
new file mode 100644
index 0000000000000000000000000000000000000000..c7cf123788b49665435742d26fdb4dcc576c8a9a
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/values/arrays.xml
@@ -0,0 +1,39 @@
+
+
+
+ - 1 threads
+ - 2 threads
+ - 4 threads
+ - 8 threads
+
+
+ - 1
+ - 2
+ - 4
+ - 8
+
+
+ - HIGH(only big cores)
+ - LOW(only LITTLE cores)
+ - FULL(all cores)
+ - NO_BIND(depends on system)
+ - RAND_HIGH
+ - RAND_LOW
+
+
+ - LITE_POWER_HIGH
+ - LITE_POWER_LOW
+ - LITE_POWER_FULL
+ - LITE_POWER_NO_BIND
+ - LITE_POWER_RAND_HIGH
+ - LITE_POWER_RAND_LOW
+
+
+ - true
+ - false
+
+
+ - true
+ - false
+
+
\ No newline at end of file
diff --git a/deploy/fastdeploy/android/app/src/main/res/values/colors.xml b/deploy/fastdeploy/android/app/src/main/res/values/colors.xml
new file mode 100644
index 0000000000000000000000000000000000000000..f8ec1f0c3bca8b1b8cf4a82334fdd6ab18f35862
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/values/colors.xml
@@ -0,0 +1,22 @@
+
+
+ #008577
+ #00574B
+ #D81B60
+ #FF000000
+ #00000000
+ #00000000
+ #FFFFFFFF
+
+ #000000
+ #3B85F5
+ #F5A623
+ #FFFFFF
+
+ #EEEEEE
+
+ #3B85F5
+ #333333
+ #E5E5E5
+ #3b85f5
+
diff --git a/deploy/fastdeploy/android/app/src/main/res/values/dimens.xml b/deploy/fastdeploy/android/app/src/main/res/values/dimens.xml
new file mode 100644
index 0000000000000000000000000000000000000000..2df89499da7090787effe0b811af18a2612b0f4c
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/values/dimens.xml
@@ -0,0 +1,17 @@
+
+
+ 26dp
+ 36dp
+ 34dp
+ 60dp
+ 16dp
+ 67dp
+ 67dp
+ 56dp
+ 56dp
+ 46dp
+ 46dp
+ 32dp
+ 24dp
+ 16dp
+
diff --git a/deploy/fastdeploy/android/app/src/main/res/values/strings.xml b/deploy/fastdeploy/android/app/src/main/res/values/strings.xml
new file mode 100644
index 0000000000000000000000000000000000000000..b5c396f5f781f3eee74272953c95bf7fd78ae369
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/values/strings.xml
@@ -0,0 +1,51 @@
+
+
+ EasyEdge
+
+ EasyEdge
+ EasyEdge
+ EasyEdge
+ EasyEdge
+ EasyEdge
+
+ CHOOSE_INSTALLED_MODEL_KEY
+ MODEL_DIR_KEY
+ LABEL_PATH_KEY
+ CPU_THREAD_NUM_KEY
+ CPU_POWER_MODE_KEY
+ SCORE_THRESHOLD_KEY
+ ENABLE_LITE_FP16_MODE_KEY
+
+ 2
+ LITE_POWER_HIGH
+ 0.4
+ 0.1
+ 0.25
+ true
+
+
+ models/picodet_s_320_coco_lcnet
+ labels/coco_label_list.txt
+
+ models
+ labels/ppocr_keys_v1.txt
+
+ models/MobileNetV1_x0_25_infer
+ labels/imagenet1k_label_list.txt
+
+ models/scrfd_500m_bnkps_shape320x320_pd
+
+ models/human_pp_humansegv1_lite_192x192_inference_model
+
+ 拍照识别
+ 实时识别
+ <
+ 模型名称
+ 识别结果
+ 序号
+ 名称
+ 置信度
+ 阈值控制
+ 重新识别
+ 保存结果
+
diff --git a/deploy/fastdeploy/android/app/src/main/res/values/styles.xml b/deploy/fastdeploy/android/app/src/main/res/values/styles.xml
new file mode 100644
index 0000000000000000000000000000000000000000..67c147594487ee33165cb1c13d0cc8bc332671a9
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/values/styles.xml
@@ -0,0 +1,70 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/deploy/fastdeploy/android/app/src/main/res/values/values.xml b/deploy/fastdeploy/android/app/src/main/res/values/values.xml
new file mode 100644
index 0000000000000000000000000000000000000000..156146d9ad86481e7aaa245be39936fbaa1f765f
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/values/values.xml
@@ -0,0 +1,17 @@
+
+
+ 120dp
+ 46px
+
+ 126px
+ 136px
+
+ 46px
+
+ 36px
+
+ 15dp
+
+ 15dp
+
+
\ No newline at end of file
diff --git a/deploy/fastdeploy/android/app/src/main/res/xml/ocr_settings.xml b/deploy/fastdeploy/android/app/src/main/res/xml/ocr_settings.xml
new file mode 100644
index 0000000000000000000000000000000000000000..692b74b4cd21fe040ca6dd825040c07e5ecb2f67
--- /dev/null
+++ b/deploy/fastdeploy/android/app/src/main/res/xml/ocr_settings.xml
@@ -0,0 +1,45 @@
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/deploy/fastdeploy/android/build.gradle b/deploy/fastdeploy/android/build.gradle
new file mode 100644
index 0000000000000000000000000000000000000000..d8d678b3ffd56e367294f6c5fb7c4be25df22a7c
--- /dev/null
+++ b/deploy/fastdeploy/android/build.gradle
@@ -0,0 +1,37 @@
+// Top-level build file where you can add configuration options common to all sub-projects/modules.
+//plugins {
+// id 'com.android.application' version '7.2.2' apply false
+// id 'com.android.library' version '7.2.2' apply false
+//}
+//
+//task clean(type: Delete) {
+// delete rootProject.buildDir
+//}
+
+buildscript {
+ repositories {
+ google()
+ jcenter()
+ // mavenCentral()
+
+ }
+ dependencies {
+ classpath 'com.android.tools.build:gradle:7.2.2'
+
+ // NOTE: Do not place your application dependencies here; they belong
+ // in the individual module build.gradle files
+ }
+}
+
+allprojects {
+ repositories {
+ google()
+ jcenter()
+ // mavenCentral()
+
+ }
+}
+
+task clean(type: Delete) {
+ delete rootProject.buildDir
+}
diff --git a/deploy/fastdeploy/android/gradle.properties b/deploy/fastdeploy/android/gradle.properties
new file mode 100644
index 0000000000000000000000000000000000000000..ae995d47ccd9199fa367c2566d87f18caf10b8e5
--- /dev/null
+++ b/deploy/fastdeploy/android/gradle.properties
@@ -0,0 +1,13 @@
+# Project-wide Gradle settings.
+# IDE (e.g. Android Studio) users:
+# Gradle settings configured through the IDE *will override*
+# any settings specified in this file.
+# For more details on how to configure your build environment visit
+# http://www.gradle.org/docs/current/userguide/build_environment.html
+# Specifies the JVM arguments used for the daemon process.
+# The setting is particularly useful for tweaking memory settings.
+org.gradle.jvmargs=-Xmx3096m
+# When configured, Gradle will run in incubating parallel mode.
+# This option should only be used with decoupled projects. More details, visit
+# http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects
+# org.gradle.parallel=true
diff --git a/deploy/fastdeploy/android/gradle/wrapper/gradle-wrapper.jar b/deploy/fastdeploy/android/gradle/wrapper/gradle-wrapper.jar
new file mode 100644
index 0000000000000000000000000000000000000000..e708b1c023ec8b20f512888fe07c5bd3ff77bb8f
Binary files /dev/null and b/deploy/fastdeploy/android/gradle/wrapper/gradle-wrapper.jar differ
diff --git a/deploy/fastdeploy/android/gradle/wrapper/gradle-wrapper.properties b/deploy/fastdeploy/android/gradle/wrapper/gradle-wrapper.properties
new file mode 100644
index 0000000000000000000000000000000000000000..7855fafe4997690cd9fdc4db93d3b7491f7fb747
--- /dev/null
+++ b/deploy/fastdeploy/android/gradle/wrapper/gradle-wrapper.properties
@@ -0,0 +1,6 @@
+#Sat Oct 08 17:24:34 CST 2022
+distributionBase=GRADLE_USER_HOME
+distributionUrl=https\://services.gradle.org/distributions/gradle-7.3.3-bin.zip
+distributionPath=wrapper/dists
+zipStorePath=wrapper/dists
+zipStoreBase=GRADLE_USER_HOME
diff --git a/deploy/fastdeploy/android/gradlew b/deploy/fastdeploy/android/gradlew
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/deploy/fastdeploy/android/gradlew.bat b/deploy/fastdeploy/android/gradlew.bat
new file mode 100644
index 0000000000000000000000000000000000000000..107acd32c4e687021ef32db511e8a206129b88ec
--- /dev/null
+++ b/deploy/fastdeploy/android/gradlew.bat
@@ -0,0 +1,89 @@
+@rem
+@rem Copyright 2015 the original author or authors.
+@rem
+@rem Licensed under the Apache License, Version 2.0 (the "License");
+@rem you may not use this file except in compliance with the License.
+@rem You may obtain a copy of the License at
+@rem
+@rem https://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+
+@if "%DEBUG%" == "" @echo off
+@rem ##########################################################################
+@rem
+@rem Gradle startup script for Windows
+@rem
+@rem ##########################################################################
+
+@rem Set local scope for the variables with windows NT shell
+if "%OS%"=="Windows_NT" setlocal
+
+set DIRNAME=%~dp0
+if "%DIRNAME%" == "" set DIRNAME=.
+set APP_BASE_NAME=%~n0
+set APP_HOME=%DIRNAME%
+
+@rem Resolve any "." and ".." in APP_HOME to make it shorter.
+for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi
+
+@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
+set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m"
+
+@rem Find java.exe
+if defined JAVA_HOME goto findJavaFromJavaHome
+
+set JAVA_EXE=java.exe
+%JAVA_EXE% -version >NUL 2>&1
+if "%ERRORLEVEL%" == "0" goto execute
+
+echo.
+echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
+echo.
+echo Please set the JAVA_HOME variable in your environment to match the
+echo location of your Java installation.
+
+goto fail
+
+:findJavaFromJavaHome
+set JAVA_HOME=%JAVA_HOME:"=%
+set JAVA_EXE=%JAVA_HOME%/bin/java.exe
+
+if exist "%JAVA_EXE%" goto execute
+
+echo.
+echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
+echo.
+echo Please set the JAVA_HOME variable in your environment to match the
+echo location of your Java installation.
+
+goto fail
+
+:execute
+@rem Setup the command line
+
+set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
+
+
+@rem Execute Gradle
+"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %*
+
+:end
+@rem End local scope for the variables with windows NT shell
+if "%ERRORLEVEL%"=="0" goto mainEnd
+
+:fail
+rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
+rem the _cmd.exe /c_ return code!
+if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
+exit /b 1
+
+:mainEnd
+if "%OS%"=="Windows_NT" endlocal
+
+:omega
diff --git a/deploy/fastdeploy/android/local.properties b/deploy/fastdeploy/android/local.properties
new file mode 100644
index 0000000000000000000000000000000000000000..aaa0de9aa3c1c41e9997edd9bc95a5aeba2fed62
--- /dev/null
+++ b/deploy/fastdeploy/android/local.properties
@@ -0,0 +1,8 @@
+## This file must *NOT* be checked into Version Control Systems,
+# as it contains information specific to your local configuration.
+#
+# Location of the SDK. This is only used by Gradle.
+# For customization when using a Version Control System, please read the
+# header note.
+#Tue Nov 29 18:47:20 CST 2022
+sdk.dir=D\:\\androidsdk
diff --git a/deploy/fastdeploy/android/settings.gradle b/deploy/fastdeploy/android/settings.gradle
new file mode 100644
index 0000000000000000000000000000000000000000..e7b4def49cb53d9aa04228dd3edb14c9e635e003
--- /dev/null
+++ b/deploy/fastdeploy/android/settings.gradle
@@ -0,0 +1 @@
+include ':app'
diff --git a/deploy/fastdeploy/ascend/README.md b/deploy/fastdeploy/ascend/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..3e13de3ef8bdbc98c530f6acce2d882a56210b6d
--- /dev/null
+++ b/deploy/fastdeploy/ascend/README.md
@@ -0,0 +1,23 @@
+[English](README.md) | 简体中文
+
+# PaddleOCR 模型在华为昇腾上部署方案-FastDeploy
+
+## 1. 说明
+PaddleOCR支持通过FastDeploy在华为昇腾上部署相关模型
+
+## 2. 支持模型列表
+
+下表中的模型下载链接由PaddleOCR模型库提供, 详见[PP-OCR系列模型列表](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.6/doc/doc_ch/models_list.md)
+
+| PaddleOCR版本 | 文本框检测 | 方向分类模型 | 文字识别 |字典文件| 说明 |
+|:----|:----|:----|:----|:----|:--------|
+| ch_PP-OCRv3[推荐] |[ch_PP-OCRv3_det](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [ch_PP-OCRv3_rec](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar) | [ppocr_keys_v1.txt](https://bj.bcebos.com/paddlehub/fastdeploy/ppocr_keys_v1.txt) | OCRv3系列原始超轻量模型,支持中英文、多语种文本检测 |
+| en_PP-OCRv3[推荐] |[en_PP-OCRv3_det](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [en_PP-OCRv3_rec](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_rec_infer.tar) | [en_dict.txt](https://bj.bcebos.com/paddlehub/fastdeploy/en_dict.txt) | OCRv3系列原始超轻量模型,支持英文与数字识别,除检测模型和识别模型的训练数据与中文模型不同以外,无其他区别 |
+| ch_PP-OCRv2 |[ch_PP-OCRv2_det](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [ch_PP-OCRv2_rec](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar) | [ppocr_keys_v1.txt](https://bj.bcebos.com/paddlehub/fastdeploy/ppocr_keys_v1.txt) | OCRv2系列原始超轻量模型,支持中英文、多语种文本检测 |
+| ch_PP-OCRv2_mobile |[ch_ppocr_mobile_v2.0_det](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [ch_ppocr_mobile_v2.0_rec](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar) | [ppocr_keys_v1.txt](https://bj.bcebos.com/paddlehub/fastdeploy/ppocr_keys_v1.txt) | OCRv2系列原始超轻量模型,支持中英文、多语种文本检测,比PPOCRv2更加轻量 |
+| ch_PP-OCRv2_server |[ch_ppocr_server_v2.0_det](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [ch_ppocr_server_v2.0_rec](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar) |[ppocr_keys_v1.txt](https://bj.bcebos.com/paddlehub/fastdeploy/ppocr_keys_v1.txt) | OCRv2服务器系列模型, 支持中英文、多语种文本检测,比超轻量模型更大,但效果更好|
+
+
+## 3. 详细部署的部署示例
+- [Python部署](python)
+- [C++部署](cpp)
diff --git a/deploy/fastdeploy/ascend/cpp/CMakeLists.txt b/deploy/fastdeploy/ascend/cpp/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..93540a7e83e05228bcb38042a91166c858c95137
--- /dev/null
+++ b/deploy/fastdeploy/ascend/cpp/CMakeLists.txt
@@ -0,0 +1,14 @@
+PROJECT(infer_demo C CXX)
+CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
+
+# 指定下载解压后的fastdeploy库路径
+option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
+
+include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
+
+# 添加FastDeploy依赖头文件
+include_directories(${FASTDEPLOY_INCS})
+
+add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
+# 添加FastDeploy库依赖
+target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})
diff --git a/deploy/fastdeploy/ascend/cpp/README.md b/deploy/fastdeploy/ascend/cpp/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..03cef163ca00c2cb5e75a21d2b4f9bb21c945255
--- /dev/null
+++ b/deploy/fastdeploy/ascend/cpp/README.md
@@ -0,0 +1,57 @@
+[English](README.md) | 简体中文
+# PP-OCRv3 Ascend C++部署示例
+
+本目录下提供`infer.cc`, 供用户完成PP-OCRv3在华为昇腾AI处理器上的部署.
+
+## 1. 部署环境准备
+在部署前,需确认以下两个步骤
+- 1. 在部署前,需自行编译基于华为昇腾AI处理器的预测库,参考文档[华为昇腾AI处理器部署环境编译](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install#自行编译安装)
+- 2. 部署时需要环境初始化, 请参考[如何使用C++在华为昇腾AI处理器部署](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/faq/use_sdk_on_ascend.md)
+
+
+## 2.部署模型准备
+在部署前, 请准备好您所需要运行的推理模型, 您可以在[FastDeploy支持的PaddleOCR模型列表](../README.md)中下载所需模型.
+
+## 3.运行部署示例
+```
+# 下载部署示例代码
+git clone https://github.com/PaddlePaddle/PaddleOCR.git
+cd PaddleOCR/deploy/fastdeploy/PP-OCRv3/ascend/cpp
+
+mkdir build
+cd build
+# 使用编译完成的FastDeploy库编译infer_demo
+cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-ascend
+make -j
+
+# 下载PP-OCRv3文字检测模型
+wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar
+tar -xvf ch_PP-OCRv3_det_infer.tar
+# 下载文字方向分类器模型
+wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar
+tar -xvf ch_ppocr_mobile_v2.0_cls_infer.tar
+# 下载PP-OCRv3文字识别模型
+wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar
+tar -xvf ch_PP-OCRv3_rec_infer.tar
+
+# 下载预测图片与字典文件
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/doc/imgs/12.jpg
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_keys_v1.txt
+
+# 按照上文提供的文档完成环境初始化, 并执行以下命令
+./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg
+
+# NOTE:若用户需要连续地预测图片, 输入图片尺寸需要准备为统一尺寸, 例如 N 张, 尺寸为 A * B 的图片.
+```
+
+运行完成可视化结果如下图所示
+
+
+
+
+
+## 4. 更多指南
+- [PP-OCR系列 C++ API查阅](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/cpp/html/namespacefastdeploy_1_1vision_1_1ocr.html)
+- [FastDeploy部署PaddleOCR模型概览](../../)
+- [PP-OCRv3 Python部署](../python)
+- 如果用户想要调整前后处理超参数、单独使用文字检测识别模型、使用其他模型等,更多详细文档与说明请参考[PP-OCR系列在CPU/GPU上的部署](../../cpu-gpu/python/README.md)
diff --git a/deploy/fastdeploy/ascend/cpp/infer.cc b/deploy/fastdeploy/ascend/cpp/infer.cc
new file mode 100644
index 0000000000000000000000000000000000000000..dc0a986707e7be784c075a35886fc6b2f4bab340
--- /dev/null
+++ b/deploy/fastdeploy/ascend/cpp/infer.cc
@@ -0,0 +1,108 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision.h"
+#ifdef WIN32
+const char sep = '\\';
+#else
+const char sep = '/';
+#endif
+
+void AscendInfer(const std::string &det_model_dir,
+ const std::string &cls_model_dir,
+ const std::string &rec_model_dir,
+ const std::string &rec_label_file,
+ const std::string &image_file,
+ const fastdeploy::RuntimeOption &option) {
+ auto det_model_file = det_model_dir + sep + "inference.pdmodel";
+ auto det_params_file = det_model_dir + sep + "inference.pdiparams";
+
+ auto cls_model_file = cls_model_dir + sep + "inference.pdmodel";
+ auto cls_params_file = cls_model_dir + sep + "inference.pdiparams";
+
+ auto rec_model_file = rec_model_dir + sep + "inference.pdmodel";
+ auto rec_params_file = rec_model_dir + sep + "inference.pdiparams";
+
+ fastdeploy::RuntimeOption option;
+ option.UseAscend();
+
+ auto det_option = option;
+ auto cls_option = option;
+ auto rec_option = option;
+
+ auto det_model = fastdeploy::vision::ocr::DBDetector(
+ det_model_file, det_params_file, det_option);
+ auto cls_model = fastdeploy::vision::ocr::Classifier(
+ cls_model_file, cls_params_file, cls_option);
+ auto rec_model = fastdeploy::vision::ocr::Recognizer(
+ rec_model_file, rec_params_file, rec_label_file, rec_option);
+
+ // When deploy on Ascend, rec model must enable static shape infer as below.
+ rec_model.GetPreprocessor().SetStaticShapeInfer(true);
+
+ assert(det_model.Initialized());
+ assert(cls_model.Initialized());
+ assert(rec_model.Initialized());
+
+ // The classification model is optional, so the PP-OCR can also be connected
+ // in series as follows
+ // auto ppocr_v3 = fastdeploy::pipeline::PPOCRv3(&det_model, &rec_model);
+ auto ppocr_v3 =
+ fastdeploy::pipeline::PPOCRv3(&det_model, &cls_model, &rec_model);
+
+ // When users enable static shape infer for rec model, the batch size of cls
+ // and rec model must to be set to 1.
+ ppocr_v3.SetClsBatchSize(1);
+ ppocr_v3.SetRecBatchSize(1);
+
+ if (!ppocr_v3.Initialized()) {
+ std::cerr << "Failed to initialize PP-OCR." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+
+ fastdeploy::vision::OCRResult result;
+ if (!ppocr_v3.Predict(im, &result)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+
+ std::cout << result.Str() << std::endl;
+
+ auto vis_im = fastdeploy::vision::VisOcr(im, result);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+ if (argc < 6) {
+ std::cout << "Usage: infer_demo path/to/det_model path/to/cls_model "
+ "path/to/rec_model path/to/rec_label_file path/to/image "
+ "e.g ./infer_demo ./ch_PP-OCRv3_det_infer "
+ "./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer "
+ "./ppocr_keys_v1.txt ./12.jpg"
+ << std::endl;
+ return -1;
+ }
+
+ std::string det_model_dir = argv[1];
+ std::string cls_model_dir = argv[2];
+ std::string rec_model_dir = argv[3];
+ std::string rec_label_file = argv[4];
+ std::string test_image = argv[5];
+ AscendInfer(det_model_dir, cls_model_dir, rec_model_dir, rec_label_file,
+ test_image);
+ return 0;
+}
diff --git a/deploy/fastdeploy/ascend/python/README.md b/deploy/fastdeploy/ascend/python/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..74815703aa24a2e42fba8280038c34bd2a286757
--- /dev/null
+++ b/deploy/fastdeploy/ascend/python/README.md
@@ -0,0 +1,49 @@
+[English](README.md) | 简体中文
+# PP-OCRv3 Ascend Python部署示例
+
+本目录下提供`infer.py`, 供用户完成PP-OCRv3在华为昇腾AI处理器上的部署.
+
+## 1. 部署环境准备
+在部署前,需自行编译基于华为昇腾AI处理器的FastDeploy python wheel包并安装,参考文档,参考文档[华为昇腾AI处理器部署环境编译](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install#自行编译安装)
+
+## 2.部署模型准备
+在部署前, 请准备好您所需要运行的推理模型, 您可以在[FastDeploy支持的PaddleOCR模型列表](../README.md)中下载所需模型.
+
+## 3.运行部署示例
+```
+# 下载部署示例代码
+git clone https://github.com/PaddlePaddle/PaddleOCR.git
+cd PaddleOCR/deploy/fastdeploy/PP-OCRv3/ascend/python
+
+# 下载PP-OCRv3文字检测模型
+wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar
+tar -xvf ch_PP-OCRv3_det_infer.tar
+# 下载文字方向分类器模型
+wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar
+tar -xvf ch_ppocr_mobile_v2.0_cls_infer.tar
+# 下载PP-OCRv3文字识别模型
+wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar
+tar -xvf ch_PP-OCRv3_rec_infer.tar
+
+# 下载预测图片与字典文件
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/doc/imgs/12.jpg
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_keys_v1.txt
+
+python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg
+# NOTE:若用户需要连续地预测图片, 输入图片尺寸需要准备为统一尺寸, 例如 N 张, 尺寸为 A * B 的图片.
+```
+
+运行完成可视化结果如下图所示
+
+
+
+
+
+## 4. 更多指南
+- [PP-OCR系列 Python API查阅](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/python/html/ocr.html)
+- [FastDeploy部署PaddleOCR模型概览](../../)
+- [PP-OCRv3 C++部署](../cpp)
+- 如果用户想要调整前后处理超参数、单独使用文字检测识别模型、使用其他模型等,更多详细文档与说明请参考[PP-OCR系列在CPU/GPU上的部署](../../cpu-gpu/python/README.md)
+
+## 5. 常见问题
+- [如何将视觉模型预测结果转为numpy格式](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/faq/vision_result_related_problems.md)
diff --git a/deploy/fastdeploy/ascend/python/infer.py b/deploy/fastdeploy/ascend/python/infer.py
new file mode 100755
index 0000000000000000000000000000000000000000..ceb28e0f7f5855b871a8619d0d920f8adb77b8bb
--- /dev/null
+++ b/deploy/fastdeploy/ascend/python/infer.py
@@ -0,0 +1,103 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import fastdeploy as fd
+import cv2
+import os
+
+
+def parse_arguments():
+ import argparse
+ import ast
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--det_model", required=True, help="Path of Detection model of PPOCR.")
+ parser.add_argument(
+ "--cls_model",
+ required=True,
+ help="Path of Classification model of PPOCR.")
+ parser.add_argument(
+ "--rec_model",
+ required=True,
+ help="Path of Recognization model of PPOCR.")
+ parser.add_argument(
+ "--rec_label_file",
+ required=True,
+ help="Path of Recognization model of PPOCR.")
+ parser.add_argument(
+ "--image", type=str, required=True, help="Path of test image file.")
+ return parser.parse_args()
+
+
+def build_option(args):
+
+ det_option = fd.RuntimeOption()
+ cls_option = fd.RuntimeOption()
+ rec_option = fd.RuntimeOption()
+
+ det_option.use_ascend()
+ cls_option.use_ascend()
+ rec_option.use_ascend()
+
+ return det_option, cls_option, rec_option
+
+
+args = parse_arguments()
+
+det_model_file = os.path.join(args.det_model, "inference.pdmodel")
+det_params_file = os.path.join(args.det_model, "inference.pdiparams")
+
+cls_model_file = os.path.join(args.cls_model, "inference.pdmodel")
+cls_params_file = os.path.join(args.cls_model, "inference.pdiparams")
+
+rec_model_file = os.path.join(args.rec_model, "inference.pdmodel")
+rec_params_file = os.path.join(args.rec_model, "inference.pdiparams")
+rec_label_file = args.rec_label_file
+
+det_option, cls_option, rec_option = build_option(args)
+
+det_model = fd.vision.ocr.DBDetector(
+ det_model_file, det_params_file, runtime_option=det_option)
+
+cls_model = fd.vision.ocr.Classifier(
+ cls_model_file, cls_params_file, runtime_option=cls_option)
+
+rec_model = fd.vision.ocr.Recognizer(
+ rec_model_file, rec_params_file, rec_label_file, runtime_option=rec_option)
+
+# Rec model enable static shape infer.
+# When deploy on Ascend, it must be true.
+rec_model.preprocessor.static_shape_infer = True
+
+# Create PP-OCRv3, if cls_model is not needed,
+# just set cls_model=None .
+ppocr_v3 = fd.vision.ocr.PPOCRv3(
+ det_model=det_model, cls_model=cls_model, rec_model=rec_model)
+
+# The batch size must be set to 1, when enable static shape infer.
+ppocr_v3.cls_batch_size = 1
+ppocr_v3.rec_batch_size = 1
+
+# Prepare image.
+im = cv2.imread(args.image)
+
+# Print the results.
+result = ppocr_v3.predict(im)
+
+print(result)
+
+# Visuliaze the output.
+vis_im = fd.vision.vis_ppocr(im, result)
+cv2.imwrite("visualized_result.jpg", vis_im)
+print("Visualized result save in ./visualized_result.jpg")
diff --git a/deploy/fastdeploy/cpu-gpu/README.md b/deploy/fastdeploy/cpu-gpu/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..69a8e3e7e9f0aef831e4cfa803b33ddafb0d0ed2
--- /dev/null
+++ b/deploy/fastdeploy/cpu-gpu/README.md
@@ -0,0 +1,26 @@
+[English](README.md) | 简体中文
+
+# PaddleOCR 模型在CPU与GPU上的部署方案-FastDeploy
+
+## 1. 说明
+PaddleOCR支持通过FastDeploy在NVIDIA GPU、X86 CPU、飞腾CPU、ARM CPU、Intel GPU(独立显卡/集成显卡)硬件上快速部署PaddleOCR系列模型
+
+## 2. 支持的PaddleOCR推理模型
+
+下表中的推理模型为FastDeploy测试过的模型, 下载链接由PaddleOCR模型库提供,
+更多的模型, 详见[PP-OCR系列模型列表](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.6/doc/doc_ch/models_list.md), 欢迎用户尝试.
+
+| PaddleOCR版本 | 文本框检测 | 方向分类模型 | 文字识别 |字典文件| 说明 |
+|:----|:----|:----|:----|:----|:--------|
+| ch_PP-OCRv3[推荐] |[ch_PP-OCRv3_det](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [ch_PP-OCRv3_rec](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar) | [ppocr_keys_v1.txt](https://bj.bcebos.com/paddlehub/fastdeploy/ppocr_keys_v1.txt) | OCRv3系列原始超轻量模型,支持中英文、多语种文本检测 |
+| en_PP-OCRv3[推荐] |[en_PP-OCRv3_det](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [en_PP-OCRv3_rec](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_rec_infer.tar) | [en_dict.txt](https://bj.bcebos.com/paddlehub/fastdeploy/en_dict.txt) | OCRv3系列原始超轻量模型,支持英文与数字识别,除检测模型和识别模型的训练数据与中文模型不同以外,无其他区别 |
+| ch_PP-OCRv2 |[ch_PP-OCRv2_det](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [ch_PP-OCRv2_rec](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar) | [ppocr_keys_v1.txt](https://bj.bcebos.com/paddlehub/fastdeploy/ppocr_keys_v1.txt) | OCRv2系列原始超轻量模型,支持中英文、多语种文本检测 |
+| ch_PP-OCRv2_mobile |[ch_ppocr_mobile_v2.0_det](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [ch_ppocr_mobile_v2.0_rec](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar) | [ppocr_keys_v1.txt](https://bj.bcebos.com/paddlehub/fastdeploy/ppocr_keys_v1.txt) | OCRv2系列原始超轻量模型,支持中英文、多语种文本检测,比PPOCRv2更加轻量 |
+| ch_PP-OCRv2_server |[ch_ppocr_server_v2.0_det](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [ch_ppocr_server_v2.0_rec](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar) |[ppocr_keys_v1.txt](https://bj.bcebos.com/paddlehub/fastdeploy/ppocr_keys_v1.txt) | OCRv2服务器系列模型, 支持中英文、多语种文本检测,比超轻量模型更大,但效果更好|
+
+
+## 3. 详细部署的部署示例
+- [Python部署](python)
+- [C++部署](cpp)
+- [C部署](c)
+- [C#部署](csharp)
diff --git a/deploy/fastdeploy/cpu-gpu/c/CMakeLists.txt b/deploy/fastdeploy/cpu-gpu/c/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b228346da862604d54a0e11ac98512395ffde2da
--- /dev/null
+++ b/deploy/fastdeploy/cpu-gpu/c/CMakeLists.txt
@@ -0,0 +1,13 @@
+PROJECT(infer_demo C)
+CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
+
+# 指定下载解压后的fastdeploy库路径
+option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
+
+include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
+
+# 添加FastDeploy依赖头文件
+include_directories(${FASTDEPLOY_INCS})
+
+add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.c)
+target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})
diff --git a/deploy/fastdeploy/cpu-gpu/c/README.md b/deploy/fastdeploy/cpu-gpu/c/README.md
new file mode 100755
index 0000000000000000000000000000000000000000..efaea2ecf9e56a727606a203e302a7a4305ec086
--- /dev/null
+++ b/deploy/fastdeploy/cpu-gpu/c/README.md
@@ -0,0 +1,257 @@
+[English](README.md) | 简体中文
+# PaddleOCR CPU-GPU C部署示例
+
+本目录下提供`infer.c`来调用C API快速完成PP-OCRv3模型在CPU/GPU上部署的示例。
+
+## 1. 说明
+PaddleOCR支持利用FastDeploy在NVIDIA GPU、X86 CPU、飞腾CPU、ARM CPU、Intel GPU(独立显卡/集成显卡)硬件上快速部署OCR模型.
+
+## 2. 部署环境准备
+在部署前,需确认软硬件环境,同时下载预编译部署库,参考[FastDeploy安装文档](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install#FastDeploy预编译库安装)安装FastDeploy预编译库.
+以Linux上推理为例,在本目录执行如下命令即可完成编译测试,支持此模型需保证FastDeploy版本1.0.4以上(x.x.x>=1.0.4)
+
+## 3. 部署模型准备
+在部署前, 请准备好您所需要运行的推理模型, 您可以在[FastDeploy支持的PaddleOCR模型列表](../README.md)中下载所需模型.
+
+## 4.运行部署示例
+```bash
+# 下载部署示例代码
+git clone https://github.com/PaddlePaddle/PaddleOCR.git
+cd PaddleOCR/deploy/fastdeploy/cpu-gpu/c
+
+mkdir build
+cd build
+
+# 下载FastDeploy预编译库,用户可在上文提到的`FastDeploy预编译库`中自行选择合适的版本使用
+wget https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-x.x.x.tgz
+
+# 编译Demo
+tar xvf fastdeploy-linux-x64-x.x.x.tgz
+cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-x.x.x
+make -j
+
+# 下载PP-OCRv3文字检测模型
+wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar
+tar -xvf ch_PP-OCRv3_det_infer.tar
+# 下载文字方向分类器模型
+wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar
+tar -xvf ch_ppocr_mobile_v2.0_cls_infer.tar
+# 下载PP-OCRv3文字识别模型
+wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar
+tar -xvf ch_PP-OCRv3_rec_infer.tar
+
+# 下载预测图片与字典文件
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/doc/imgs/12.jpg
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_keys_v1.txt
+
+# 在CPU上使用Paddle Inference推理
+./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 0
+# 在GPU上使用Paddle Inference推理
+./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 1
+```
+以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:
+- [如何在Windows中使用FastDeploy C++ SDK](../../../../../docs/cn/faq/use_sdk_on_windows.md)
+
+
+运行完成可视化结果如下图所示
+
+
+
+## 5. PP-OCRv3 C API接口简介
+下面提供了PP-OCRv3的C API简介
+
+- 如果用户想要更换部署后端或进行其他定制化操作, 请查看[C Runtime API](https://baidu-paddle.github.io/fastdeploy-api/c/html/runtime__option_8h.html).
+- 更多 PP-OCR C API 请查看 [C PP-OCR API](https://github.com/PaddlePaddle/FastDeploy/blob/develop/c_api/fastdeploy_capi/vision/ocr/ppocr/model.h)
+
+### 配置
+
+```c
+FD_C_RuntimeOptionWrapper* FD_C_CreateRuntimeOptionWrapper()
+```
+
+> 创建一个RuntimeOption的配置对象,并且返回操作它的指针。
+>
+> **返回**
+>
+> * **fd_c_runtime_option_wrapper**(FD_C_RuntimeOptionWrapper*): 指向RuntimeOption对象的指针
+
+
+```c
+void FD_C_RuntimeOptionWrapperUseCpu(
+ FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper)
+```
+
+> 开启CPU推理
+>
+> **参数**
+>
+> * **fd_c_runtime_option_wrapper**(FD_C_RuntimeOptionWrapper*): 指向RuntimeOption对象的指针
+
+```c
+void FD_C_RuntimeOptionWrapperUseGpu(
+ FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
+ int gpu_id)
+```
+> 开启GPU推理
+>
+> **参数**
+>
+> * **fd_c_runtime_option_wrapper**(FD_C_RuntimeOptionWrapper*): 指向RuntimeOption对象的指针
+> * **gpu_id**(int): 显卡号
+
+
+### 模型
+
+```c
+FD_C_DBDetectorWrapper* FD_C_CreateDBDetectorWrapper(
+ const char* model_file, const char* params_file,
+ FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
+ const FD_C_ModelFormat model_format
+)
+```
+
+> 创建一个DBDetector的模型,并且返回操作它的指针。
+>
+> **参数**
+>
+> * **model_file**(const char*): 模型文件路径
+> * **params_file**(const char*): 参数文件路径
+> * **fd_c_runtime_option_wrapper**(FD_C_RuntimeOptionWrapper*): 指向RuntimeOption的指针,表示后端推理配置
+> * **model_format**(FD_C_ModelFormat): 模型格式
+>
+> **返回**
+> * **fd_c_dbdetector_wrapper**(FD_C_DBDetectorWrapper*): 指向DBDetector模型对象的指针
+
+```c
+FD_C_ClassifierWrapper* FD_C_CreateClassifierWrapper(
+ const char* model_file, const char* params_file,
+ FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
+ const FD_C_ModelFormat model_format
+)
+```
+> 创建一个Classifier的模型,并且返回操作它的指针。
+>
+> **参数**
+>
+> * **model_file**(const char*): 模型文件路径
+> * **params_file**(const char*): 参数文件路径
+> * **fd_c_runtime_option_wrapper**(FD_C_RuntimeOptionWrapper*): 指向RuntimeOption的指针,表示后端推理配置
+> * **model_format**(FD_C_ModelFormat): 模型格式
+>
+> **返回**
+>
+> * **fd_c_classifier_wrapper**(FD_C_ClassifierWrapper*): 指向Classifier模型对象的指针
+
+```c
+FD_C_RecognizerWrapper* FD_C_CreateRecognizerWrapper(
+ const char* model_file, const char* params_file, const char* label_path,
+ FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
+ const FD_C_ModelFormat model_format
+)
+```
+> 创建一个Recognizer的模型,并且返回操作它的指针。
+>
+> **参数**
+>
+> * **model_file**(const char*): 模型文件路径
+> * **params_file**(const char*): 参数文件路径
+> * **label_path**(const char*): 标签文件路径
+> * **fd_c_runtime_option_wrapper**(FD_C_RuntimeOptionWrapper*): 指向RuntimeOption的指针,表示后端推理配置
+> * **model_format**(FD_C_ModelFormat): 模型格式
+>
+> **返回**
+> * **fd_c_recognizer_wrapper**(FD_C_RecognizerWrapper*): 指向Recognizer模型对象的指针
+
+```c
+FD_C_PPOCRv3Wrapper* FD_C_CreatePPOCRv3Wrapper(
+ FD_C_DBDetectorWrapper* det_model,
+ FD_C_ClassifierWrapper* cls_model,
+ FD_C_RecognizerWrapper* rec_model
+)
+```
+> 创建一个PP-OCRv3的模型,并且返回操作它的指针。
+>
+> **参数**
+>
+> * **det_model**(FD_C_DBDetectorWrapper*): DBDetector模型
+> * **cls_model**(FD_C_ClassifierWrapper*): Classifier模型
+> * **rec_model**(FD_C_RecognizerWrapper*): Recognizer模型
+>
+> **返回**
+>
+> * **fd_c_ppocrv3_wrapper**(FD_C_PPOCRv3Wrapper*): 指向PP-OCRv3模型对象的指针
+
+
+
+### 读写图像
+
+```c
+FD_C_Mat FD_C_Imread(const char* imgpath)
+```
+
+> 读取一个图像,并且返回cv::Mat的指针。
+>
+> **参数**
+>
+> * **imgpath**(const char*): 图像文件路径
+>
+> **返回**
+>
+> * **imgmat**(FD_C_Mat): 指向图像数据cv::Mat的指针。
+
+
+```c
+FD_C_Bool FD_C_Imwrite(const char* savepath, FD_C_Mat img);
+```
+
+> 将图像写入文件中。
+>
+> **参数**
+>
+> * **savepath**(const char*): 保存图像的路径
+> * **img**(FD_C_Mat): 指向图像数据的指针
+>
+> **返回**
+>
+> * **result**(FD_C_Bool): 表示操作是否成功
+
+
+### Predict函数
+
+```c
+FD_C_Bool FD_C_PPOCRv3WrapperPredict(
+ FD_C_PPOCRv3Wrapper* fd_c_ppocrv3_wrapper,
+ FD_C_Mat img,
+ FD_C_OCRResult* result)
+```
+>
+> 模型预测接口,输入图像直接并生成结果。
+>
+> **参数**
+> * **fd_c_ppocrv3_wrapper**(FD_C_PPOCRv3Wrapper*): 指向PP-OCRv3模型的指针
+> * **img**(FD_C_Mat): 输入图像的指针,指向cv::Mat对象,可以调用FD_C_Imread读取图像获取
+> * **result**(FD_C_OCRResult*): OCR预测结果,包括由检测模型输出的检测框位置,分类模型输出的方向分类,以及识别模型输出的识别结果, OCRResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/)
+
+
+### Predict结果
+
+```c
+FD_C_Mat FD_C_VisOcr(FD_C_Mat im, FD_C_OCRResult* ocr_result)
+```
+>
+> 对结果进行可视化,返回可视化的图像。
+>
+> **参数**
+> * **im**(FD_C_Mat): 指向输入图像的指针
+> * **ocr_result**(FD_C_OCRResult*): 指向 FD_C_OCRResult结构的指针
+>
+> **返回**
+> * **vis_im**(FD_C_Mat): 指向可视化图像的指针
+
+
+## 6. 其它文档
+
+- [FastDeploy部署PaddleOCR模型概览](../../)
+- [PP-OCRv3 Python部署](../python)
+- [PP-OCRv3 C++ 部署](../cpp)
+- [PP-OCRv3 C# 部署](../csharp)
diff --git a/deploy/fastdeploy/cpu-gpu/c/infer.c b/deploy/fastdeploy/cpu-gpu/c/infer.c
new file mode 100644
index 0000000000000000000000000000000000000000..62bbc2d00246d0ed8ab5acbae15ebc7917c0270a
--- /dev/null
+++ b/deploy/fastdeploy/cpu-gpu/c/infer.c
@@ -0,0 +1,249 @@
+// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include
+#include
+
+#include "fastdeploy_capi/vision.h"
+
+#ifdef WIN32
+const char sep = '\\';
+#else
+const char sep = '/';
+#endif
+
+void CpuInfer(const char *det_model_dir, const char *cls_model_dir,
+ const char *rec_model_dir, const char *rec_label_file,
+ const char *image_file) {
+ char det_model_file[100];
+ char det_params_file[100];
+
+ char cls_model_file[100];
+ char cls_params_file[100];
+
+ char rec_model_file[100];
+ char rec_params_file[100];
+
+ int max_size = 99;
+ snprintf(det_model_file, max_size, "%s%c%s", det_model_dir, sep,
+ "inference.pdmodel");
+ snprintf(det_params_file, max_size, "%s%c%s", det_model_dir, sep,
+ "inference.pdiparams");
+
+ snprintf(cls_model_file, max_size, "%s%c%s", cls_model_dir, sep,
+ "inference.pdmodel");
+ snprintf(cls_params_file, max_size, "%s%c%s", cls_model_dir, sep,
+ "inference.pdiparams");
+
+ snprintf(rec_model_file, max_size, "%s%c%s", rec_model_dir, sep,
+ "inference.pdmodel");
+ snprintf(rec_params_file, max_size, "%s%c%s", rec_model_dir, sep,
+ "inference.pdiparams");
+
+ FD_C_RuntimeOptionWrapper *det_option = FD_C_CreateRuntimeOptionWrapper();
+ FD_C_RuntimeOptionWrapper *cls_option = FD_C_CreateRuntimeOptionWrapper();
+ FD_C_RuntimeOptionWrapper *rec_option = FD_C_CreateRuntimeOptionWrapper();
+ FD_C_RuntimeOptionWrapperUseCpu(det_option);
+ FD_C_RuntimeOptionWrapperUseCpu(cls_option);
+ FD_C_RuntimeOptionWrapperUseCpu(rec_option);
+
+ FD_C_DBDetectorWrapper *det_model = FD_C_CreateDBDetectorWrapper(
+ det_model_file, det_params_file, det_option, FD_C_ModelFormat_PADDLE);
+ FD_C_ClassifierWrapper *cls_model = FD_C_CreateClassifierWrapper(
+ cls_model_file, cls_params_file, cls_option, FD_C_ModelFormat_PADDLE);
+ FD_C_RecognizerWrapper *rec_model = FD_C_CreateRecognizerWrapper(
+ rec_model_file, rec_params_file, rec_label_file, rec_option,
+ FD_C_ModelFormat_PADDLE);
+
+ FD_C_PPOCRv3Wrapper *ppocr_v3 =
+ FD_C_CreatePPOCRv3Wrapper(det_model, cls_model, rec_model);
+ if (!FD_C_PPOCRv3WrapperInitialized(ppocr_v3)) {
+ printf("Failed to initialize.\n");
+ FD_C_DestroyRuntimeOptionWrapper(det_option);
+ FD_C_DestroyRuntimeOptionWrapper(cls_option);
+ FD_C_DestroyRuntimeOptionWrapper(rec_option);
+ FD_C_DestroyClassifierWrapper(cls_model);
+ FD_C_DestroyDBDetectorWrapper(det_model);
+ FD_C_DestroyRecognizerWrapper(rec_model);
+ FD_C_DestroyPPOCRv3Wrapper(ppocr_v3);
+ return;
+ }
+
+ FD_C_Mat im = FD_C_Imread(image_file);
+
+ FD_C_OCRResult *result = (FD_C_OCRResult *)malloc(sizeof(FD_C_OCRResult));
+
+ if (!FD_C_PPOCRv3WrapperPredict(ppocr_v3, im, result)) {
+ printf("Failed to predict.\n");
+ FD_C_DestroyRuntimeOptionWrapper(det_option);
+ FD_C_DestroyRuntimeOptionWrapper(cls_option);
+ FD_C_DestroyRuntimeOptionWrapper(rec_option);
+ FD_C_DestroyClassifierWrapper(cls_model);
+ FD_C_DestroyDBDetectorWrapper(det_model);
+ FD_C_DestroyRecognizerWrapper(rec_model);
+ FD_C_DestroyPPOCRv3Wrapper(ppocr_v3);
+ FD_C_DestroyMat(im);
+ free(result);
+ return;
+ }
+
+ // print res
+ char res[2000];
+ FD_C_OCRResultStr(result, res);
+ printf("%s", res);
+ FD_C_Mat vis_im = FD_C_VisOcr(im, result);
+ FD_C_Imwrite("vis_result.jpg", vis_im);
+ printf("Visualized result saved in ./vis_result.jpg\n");
+
+ FD_C_DestroyRuntimeOptionWrapper(det_option);
+ FD_C_DestroyRuntimeOptionWrapper(cls_option);
+ FD_C_DestroyRuntimeOptionWrapper(rec_option);
+ FD_C_DestroyClassifierWrapper(cls_model);
+ FD_C_DestroyDBDetectorWrapper(det_model);
+ FD_C_DestroyRecognizerWrapper(rec_model);
+ FD_C_DestroyPPOCRv3Wrapper(ppocr_v3);
+ FD_C_DestroyOCRResult(result);
+ FD_C_DestroyMat(im);
+ FD_C_DestroyMat(vis_im);
+}
+
+void GpuInfer(const char *det_model_dir, const char *cls_model_dir,
+ const char *rec_model_dir, const char *rec_label_file,
+ const char *image_file) {
+ char det_model_file[100];
+ char det_params_file[100];
+
+ char cls_model_file[100];
+ char cls_params_file[100];
+
+ char rec_model_file[100];
+ char rec_params_file[100];
+
+ int max_size = 99;
+ snprintf(det_model_file, max_size, "%s%c%s", det_model_dir, sep,
+ "inference.pdmodel");
+ snprintf(det_params_file, max_size, "%s%c%s", det_model_dir, sep,
+ "inference.pdiparams");
+
+ snprintf(cls_model_file, max_size, "%s%c%s", cls_model_dir, sep,
+ "inference.pdmodel");
+ snprintf(cls_params_file, max_size, "%s%c%s", cls_model_dir, sep,
+ "inference.pdiparams");
+
+ snprintf(rec_model_file, max_size, "%s%c%s", rec_model_dir, sep,
+ "inference.pdmodel");
+ snprintf(rec_params_file, max_size, "%s%c%s", rec_model_dir, sep,
+ "inference.pdiparams");
+
+ FD_C_RuntimeOptionWrapper *det_option = FD_C_CreateRuntimeOptionWrapper();
+ FD_C_RuntimeOptionWrapper *cls_option = FD_C_CreateRuntimeOptionWrapper();
+ FD_C_RuntimeOptionWrapper *rec_option = FD_C_CreateRuntimeOptionWrapper();
+ FD_C_RuntimeOptionWrapperUseGpu(det_option, 0);
+ FD_C_RuntimeOptionWrapperUseGpu(cls_option, 0);
+ FD_C_RuntimeOptionWrapperUseGpu(rec_option, 0);
+
+ FD_C_DBDetectorWrapper *det_model = FD_C_CreateDBDetectorWrapper(
+ det_model_file, det_params_file, det_option, FD_C_ModelFormat_PADDLE);
+ FD_C_ClassifierWrapper *cls_model = FD_C_CreateClassifierWrapper(
+ cls_model_file, cls_params_file, cls_option, FD_C_ModelFormat_PADDLE);
+ FD_C_RecognizerWrapper *rec_model = FD_C_CreateRecognizerWrapper(
+ rec_model_file, rec_params_file, rec_label_file, rec_option,
+ FD_C_ModelFormat_PADDLE);
+
+ FD_C_PPOCRv3Wrapper *ppocr_v3 =
+ FD_C_CreatePPOCRv3Wrapper(det_model, cls_model, rec_model);
+ if (!FD_C_PPOCRv3WrapperInitialized(ppocr_v3)) {
+ printf("Failed to initialize.\n");
+ FD_C_DestroyRuntimeOptionWrapper(det_option);
+ FD_C_DestroyRuntimeOptionWrapper(cls_option);
+ FD_C_DestroyRuntimeOptionWrapper(rec_option);
+ FD_C_DestroyClassifierWrapper(cls_model);
+ FD_C_DestroyDBDetectorWrapper(det_model);
+ FD_C_DestroyRecognizerWrapper(rec_model);
+ FD_C_DestroyPPOCRv3Wrapper(ppocr_v3);
+ return;
+ }
+
+ FD_C_Mat im = FD_C_Imread(image_file);
+
+ FD_C_OCRResult *result = (FD_C_OCRResult *)malloc(sizeof(FD_C_OCRResult));
+
+ if (!FD_C_PPOCRv3WrapperPredict(ppocr_v3, im, result)) {
+ printf("Failed to predict.\n");
+ FD_C_DestroyRuntimeOptionWrapper(det_option);
+ FD_C_DestroyRuntimeOptionWrapper(cls_option);
+ FD_C_DestroyRuntimeOptionWrapper(rec_option);
+ FD_C_DestroyClassifierWrapper(cls_model);
+ FD_C_DestroyDBDetectorWrapper(det_model);
+ FD_C_DestroyRecognizerWrapper(rec_model);
+ FD_C_DestroyPPOCRv3Wrapper(ppocr_v3);
+ FD_C_DestroyMat(im);
+ free(result);
+ return;
+ }
+
+ // print res
+ char res[2000];
+ FD_C_OCRResultStr(result, res);
+ printf("%s", res);
+ FD_C_Mat vis_im = FD_C_VisOcr(im, result);
+ FD_C_Imwrite("vis_result.jpg", vis_im);
+ printf("Visualized result saved in ./vis_result.jpg\n");
+
+ FD_C_DestroyRuntimeOptionWrapper(det_option);
+ FD_C_DestroyRuntimeOptionWrapper(cls_option);
+ FD_C_DestroyRuntimeOptionWrapper(rec_option);
+ FD_C_DestroyClassifierWrapper(cls_model);
+ FD_C_DestroyDBDetectorWrapper(det_model);
+ FD_C_DestroyRecognizerWrapper(rec_model);
+ FD_C_DestroyPPOCRv3Wrapper(ppocr_v3);
+ FD_C_DestroyOCRResult(result);
+ FD_C_DestroyMat(im);
+ FD_C_DestroyMat(vis_im);
+}
+int main(int argc, char *argv[]) {
+ if (argc < 7) {
+ printf("Usage: infer_demo path/to/det_model path/to/cls_model "
+ "path/to/rec_model path/to/rec_label_file path/to/image "
+ "run_option, "
+ "e.g ./infer_demo ./ch_PP-OCRv3_det_infer "
+ "./ch_ppocr_mobile_v3.0_cls_infer ./ch_PP-OCRv3_rec_infer "
+ "./ppocr_keys_v1.txt ./12.jpg 0\n");
+ printf(
+ "The data type of run_option is int, 0: run with cpu; 1: run with gpu"
+ "\n");
+ return -1;
+ }
+
+ if (atoi(argv[6]) == 0) {
+ CpuInfer(argv[1], argv[2], argv[3], argv[4], argv[5]);
+ } else if (atoi(argv[6]) == 1) {
+ GpuInfer(argv[1], argv[2], argv[3], argv[4], argv[5]);
+ }
+ return 0;
+}
diff --git a/deploy/fastdeploy/cpu-gpu/cpp/CMakeLists.txt b/deploy/fastdeploy/cpu-gpu/cpp/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9844c3b1bb12575e8ed273fc7d9aaf46eeed17c0
--- /dev/null
+++ b/deploy/fastdeploy/cpu-gpu/cpp/CMakeLists.txt
@@ -0,0 +1,30 @@
+PROJECT(infer_demo C CXX)
+CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
+
+# 指定下载解压后的fastdeploy库路径
+option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
+
+include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
+
+# 添加FastDeploy依赖头文件
+include_directories(${FASTDEPLOY_INCS})
+
+# PP-OCR
+add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
+# 添加FastDeploy库依赖
+target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})
+
+# Only Det
+add_executable(infer_det ${PROJECT_SOURCE_DIR}/infer_det.cc)
+# 添加FastDeploy库依赖
+target_link_libraries(infer_det ${FASTDEPLOY_LIBS})
+
+# Only Cls
+add_executable(infer_cls ${PROJECT_SOURCE_DIR}/infer_cls.cc)
+# 添加FastDeploy库依赖
+target_link_libraries(infer_cls ${FASTDEPLOY_LIBS})
+
+# Only Rec
+add_executable(infer_rec ${PROJECT_SOURCE_DIR}/infer_rec.cc)
+# 添加FastDeploy库依赖
+target_link_libraries(infer_rec ${FASTDEPLOY_LIBS})
\ No newline at end of file
diff --git a/deploy/fastdeploy/cpu-gpu/cpp/README.md b/deploy/fastdeploy/cpu-gpu/cpp/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..5ab4edfdeb87356dc9e295e6c6ef9a294d1c5c02
--- /dev/null
+++ b/deploy/fastdeploy/cpu-gpu/cpp/README.md
@@ -0,0 +1,158 @@
+[English](README.md) | 简体中文
+# PaddleOCR CPU-GPU C++部署示例
+
+本目录下提供`infer.cc`快速完成PP-OCRv3在CPU/GPU,以及GPU上通过Paddle-TensorRT加速部署的示例.
+
+## 1. 说明
+PaddleOCR支持利用FastDeploy在NVIDIA GPU、X86 CPU、飞腾CPU、ARM CPU、Intel GPU(独立显卡/集成显卡)硬件上快速部署OCR模型.
+
+## 2. 部署环境准备
+在部署前,需确认软硬件环境,同时下载预编译部署库,参考[FastDeploy安装文档](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install#FastDeploy预编译库安装)安装FastDeploy预编译库.
+
+## 3. 部署模型准备
+在部署前, 请准备好您所需要运行的推理模型, 您可以在[FastDeploy支持的PaddleOCR模型列表](../README.md)中下载所需模型.
+
+## 4. 运行部署示例
+以Linux上推理为例,在本目录执行如下命令即可完成编译测试,支持此模型需保证FastDeploy版本1.0.0以上(x.x.x>=1.0.0)
+
+```bash
+# 下载部署示例代码
+git clone https://github.com/PaddlePaddle/PaddleOCR.git
+cd PaddleOCR/deploy/fastdeploy/cpu-gpu/cpp
+
+# 下载FastDeploy预编译库,用户可在上文提到的`FastDeploy预编译库`中自行选择合适的版本使用
+wget https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-x.x.x.tgz
+tar xvf fastdeploy-linux-x64-x.x.x.tgz
+
+# 编译部署示例
+mkdir build && cd build
+cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-x.x.x
+make -j
+
+# 下载PP-OCRv3文字检测模型
+wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar
+tar -xvf ch_PP-OCRv3_det_infer.tar
+# 下载文字方向分类器模型
+wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar
+tar -xvf ch_ppocr_mobile_v2.0_cls_infer.tar
+# 下载PP-OCRv3文字识别模型
+wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar
+tar -xvf ch_PP-OCRv3_rec_infer.tar
+
+# 下载预测图片与字典文件
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/doc/imgs/12.jpg
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_keys_v1.txt
+
+# 运行部署示例
+# 在CPU上使用Paddle Inference推理
+./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 0
+# 在CPU上使用OenVINO推理
+./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 1
+# 在CPU上使用ONNX Runtime推理
+./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 2
+# 在CPU上使用Paddle Lite推理
+./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 3
+# 在GPU上使用Paddle Inference推理
+./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 4
+# 在GPU上使用Paddle TensorRT推理
+./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 5
+# 在GPU上使用ONNX Runtime推理
+./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 6
+# 在GPU上使用Nvidia TensorRT推理
+./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 7
+
+# 同时, FastDeploy提供文字检测,文字分类,文字识别三个模型的单独推理,
+# 有需要的用户, 请准备合适的图片, 同时根据自己的需求, 参考infer.cc来配置自定义硬件与推理后端.
+
+# 在CPU上,单独使用文字检测模型部署
+./infer_det ./ch_PP-OCRv3_det_infer ./12.jpg 0
+
+# 在CPU上,单独使用文字方向分类模型部署
+./infer_cls ./ch_ppocr_mobile_v2.0_cls_infer ./12.jpg 0
+
+# 在CPU上,单独使用文字识别模型部署
+./infer_rec ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 0
+```
+
+运行完成可视化结果如下图所示
+
+
+
+
+- 注意,以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考文档: [如何在Windows中使用FastDeploy C++ SDK](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/faq/use_sdk_on_windows.md)
+- 关于如何通过FastDeploy使用更多不同的推理后端,以及如何使用不同的硬件,请参考文档:[如何切换模型推理后端引擎](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/faq/how_to_change_backend.md)
+
+## 5. 部署示例选项说明
+在我们使用`infer_demo`时, 输入了6个参数, 分别为文字检测模型, 文字分类模型, 文字识别模型, 预测图片, 字典文件与最后一位的数字选项.
+现在下表将解释最后一位数字选项的含义.
+|数字选项|含义|
+|:---:|:---:|
+|0| 在CPU上使用Paddle Inference推理 |
+|1| 在CPU上使用OenVINO推理 |
+|2| 在CPU上使用ONNX Runtime推理 |
+|3| 在CPU上使用Paddle Lite推理 |
+|4| 在GPU上使用Paddle Inference推理 |
+|5| 在GPU上使用Paddle TensorRT推理 |
+|6| 在GPU上使用ONNX Runtime推理 |
+|7| 在GPU上使用Nvidia TensorRT推理 |
+
+关于如何通过FastDeploy使用更多不同的推理后端,以及如何使用不同的硬件,请参考文档:[如何切换模型推理后端引擎](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/faq/how_to_change_backend.md)
+
+## 6. 更多指南
+
+### 6.1 如何使用C++部署PP-OCRv2系列模型.
+本目录下的`infer.cc`代码是以PP-OCRv3模型为例, 如果用户有使用PP-OCRv2的需求, 只需要按照下面所示的方式, 来创建PP-OCRv2并使用.
+
+```cpp
+// 此行为创建PP-OCRv3模型的代码
+auto ppocr_v3 = fastdeploy::pipeline::PPOCRv3(&det_model, &cls_model, &rec_model);
+// 只需要将PPOCRv3改为PPOCRv2,即可创造PPOCRv2模型, 同时, 后续的接口均使用ppocr_v2来调用
+auto ppocr_v2 = fastdeploy::pipeline::PPOCRv2(&det_model, &cls_model, &rec_model);
+
+// 如果用户在部署PP-OCRv2时, 需要使用TensorRT推理, 还需要改动Rec模型的TensorRT的输入shape.
+// 建议如下修改, 需要把 H 维度改为32, W 纬度按需修改.
+rec_option.SetTrtInputShape("x", {1, 3, 32, 10}, {rec_batch_size, 3, 32, 320},
+ {rec_batch_size, 3, 32, 2304});
+```
+### 6.2 如何在PP-OCRv2/v3系列模型中, 关闭文字方向分类器的使用.
+
+在PP-OCRv3/v2中, 文字方向分类器是可选的, 用户可以按照以下方式, 来决定自己是否使用方向分类器.
+```cpp
+// 使用 Cls 模型
+auto ppocr_v3 = fastdeploy::pipeline::PPOCRv3(&det_model, &cls_model, &rec_model);
+
+// 不使用 Cls 模型
+auto ppocr_v3 = fastdeploy::pipeline::PPOCRv3(&det_model, &rec_model);
+
+// 当不使用Cls模型时, 请删掉或者注释掉相关代码
+```
+
+### 6.3 如何修改前后处理超参数.
+在示例代码中, 我们展示出了修改前后处理超参数的接口,并设置为默认值,其中, FastDeploy提供的超参数的含义与文档[PaddleOCR推理模型参数解释](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/inference_args.md)是相同的. 如果用户想要进行更多定制化的开发, 请阅读[PP-OCR系列 C++ API查阅](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/cpp/html/namespacefastdeploy_1_1vision_1_1ocr.html)
+
+```cpp
+// 设置检测模型的max_side_len
+det_model.GetPreprocessor().SetMaxSideLen(960);
+// 其他...
+```
+
+### 6.4 其他指南
+- [FastDeploy部署PaddleOCR模型概览](../../)
+- [PP-OCRv3 Python部署](../python)
+- [PP-OCRv3 C 部署](../c)
+- [PP-OCRv3 C# 部署](../csharp)
+
+## 7. 常见问题
+- PaddleOCR能在FastDeploy支持的多种后端上推理,支持情况如下表所示, 如何切换后端, 详见文档[如何切换模型推理后端引擎](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/faq/how_to_change_backend.md)
+
+|硬件类型|支持的后端|
+|:---:|:---:|
+|X86 CPU| Paddle Inference, ONNX Runtime, OpenVINO |
+|ARM CPU| Paddle Lite |
+|飞腾 CPU| ONNX Runtime |
+|NVIDIA GPU| Paddle Inference, ONNX Runtime, TensorRT |
+
+- [Intel GPU(独立显卡/集成显卡)的使用](https://github.com/PaddlePaddle/FastDeploy/blob/develop/tutorials/intel_gpu/README.md)
+- [编译CPU部署库](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install/cpu.md)
+- [编译GPU部署库](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install/gpu.md)
+- [编译Jetson部署库](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install/jetson.md)
diff --git a/deploy/fastdeploy/cpu-gpu/cpp/infer.cc b/deploy/fastdeploy/cpu-gpu/cpp/infer.cc
new file mode 100644
index 0000000000000000000000000000000000000000..6906a01717fff23029de352b375c5e7a68052074
--- /dev/null
+++ b/deploy/fastdeploy/cpu-gpu/cpp/infer.cc
@@ -0,0 +1,174 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision.h"
+#ifdef WIN32
+const char sep = '\\';
+#else
+const char sep = '/';
+#endif
+
+void InitAndInfer(const std::string &det_model_dir,
+ const std::string &cls_model_dir,
+ const std::string &rec_model_dir,
+ const std::string &rec_label_file,
+ const std::string &image_file,
+ const fastdeploy::RuntimeOption &option) {
+ auto det_model_file = det_model_dir + sep + "inference.pdmodel";
+ auto det_params_file = det_model_dir + sep + "inference.pdiparams";
+
+ auto cls_model_file = cls_model_dir + sep + "inference.pdmodel";
+ auto cls_params_file = cls_model_dir + sep + "inference.pdiparams";
+
+ auto rec_model_file = rec_model_dir + sep + "inference.pdmodel";
+ auto rec_params_file = rec_model_dir + sep + "inference.pdiparams";
+
+ auto det_option = option;
+ auto cls_option = option;
+ auto rec_option = option;
+
+ // The cls and rec model can inference a batch of images now.
+ // User could initialize the inference batch size and set them after create
+ // PP-OCR model.
+ int cls_batch_size = 1;
+ int rec_batch_size = 6;
+
+ // If use TRT backend, the dynamic shape will be set as follow.
+ // We recommend that users set the length and height of the detection model to
+ // a multiple of 32.
+ // We also recommend that users set the Trt input shape as follow.
+ det_option.SetTrtInputShape("x", {1, 3, 64, 64}, {1, 3, 640, 640},
+ {1, 3, 960, 960});
+ cls_option.SetTrtInputShape("x", {1, 3, 48, 10}, {cls_batch_size, 3, 48, 320},
+ {cls_batch_size, 3, 48, 1024});
+ rec_option.SetTrtInputShape("x", {1, 3, 48, 10}, {rec_batch_size, 3, 48, 320},
+ {rec_batch_size, 3, 48, 2304});
+
+ // Users could save TRT cache file to disk as follow.
+ // det_option.SetTrtCacheFile(det_model_dir + sep + "det_trt_cache.trt");
+ // cls_option.SetTrtCacheFile(cls_model_dir + sep + "cls_trt_cache.trt");
+ // rec_option.SetTrtCacheFile(rec_model_dir + sep + "rec_trt_cache.trt");
+
+ auto det_model = fastdeploy::vision::ocr::DBDetector(
+ det_model_file, det_params_file, det_option);
+ auto cls_model = fastdeploy::vision::ocr::Classifier(
+ cls_model_file, cls_params_file, cls_option);
+ auto rec_model = fastdeploy::vision::ocr::Recognizer(
+ rec_model_file, rec_params_file, rec_label_file, rec_option);
+
+ assert(det_model.Initialized());
+ assert(cls_model.Initialized());
+ assert(rec_model.Initialized());
+
+ // Parameters settings for pre and post processing of Det/Cls/Rec Models.
+ // All parameters are set to default values.
+ det_model.GetPreprocessor().SetMaxSideLen(960);
+ det_model.GetPostprocessor().SetDetDBThresh(0.3);
+ det_model.GetPostprocessor().SetDetDBBoxThresh(0.6);
+ det_model.GetPostprocessor().SetDetDBUnclipRatio(1.5);
+ det_model.GetPostprocessor().SetDetDBScoreMode("slow");
+ det_model.GetPostprocessor().SetUseDilation(0);
+ cls_model.GetPostprocessor().SetClsThresh(0.9);
+
+ // The classification model is optional, so the PP-OCR can also be connected
+ // in series as follows
+ // auto ppocr_v3 = fastdeploy::pipeline::PPOCRv3(&det_model, &rec_model);
+ auto ppocr_v3 =
+ fastdeploy::pipeline::PPOCRv3(&det_model, &cls_model, &rec_model);
+
+ // Set inference batch size for cls model and rec model, the value could be -1
+ // and 1 to positive infinity.
+ // When inference batch size is set to -1, it means that the inference batch
+ // size
+ // of the cls and rec models will be the same as the number of boxes detected
+ // by the det model.
+ ppocr_v3.SetClsBatchSize(cls_batch_size);
+ ppocr_v3.SetRecBatchSize(rec_batch_size);
+
+ if (!ppocr_v3.Initialized()) {
+ std::cerr << "Failed to initialize PP-OCR." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+ auto im_bak = im.clone();
+
+ fastdeploy::vision::OCRResult result;
+ if (!ppocr_v3.Predict(&im, &result)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+
+ std::cout << result.Str() << std::endl;
+
+ auto vis_im = fastdeploy::vision::VisOcr(im_bak, result);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+ if (argc < 7) {
+ std::cout << "Usage: infer_demo path/to/det_model path/to/cls_model "
+ "path/to/rec_model path/to/rec_label_file path/to/image "
+ "run_option, "
+ "e.g ./infer_demo ./ch_PP-OCRv3_det_infer "
+ "./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer "
+ "./ppocr_keys_v1.txt ./12.jpg 0"
+ << std::endl;
+ std::cout << "The data type of run_option is int, e.g. 0: run with paddle "
+ "inference on cpu;"
+ << std::endl;
+ return -1;
+ }
+
+ fastdeploy::RuntimeOption option;
+ int flag = std::atoi(argv[6]);
+
+ if (flag == 0) {
+ option.UseCpu();
+ option.UsePaddleBackend(); // Paddle Inference
+ } else if (flag == 1) {
+ option.UseCpu();
+ option.UseOpenVINOBackend(); // OpenVINO
+ } else if (flag == 2) {
+ option.UseCpu();
+ option.UseOrtBackend(); // ONNX Runtime
+ } else if (flag == 3) {
+ option.UseCpu();
+ option.UseLiteBackend(); // Paddle Lite
+ } else if (flag == 4) {
+ option.UseGpu();
+ option.UsePaddleBackend(); // Paddle Inference
+ } else if (flag == 5) {
+ option.UseGpu();
+ option.UseTrtBackend();
+ option.EnablePaddleTrtCollectShape();
+ option.EnablePaddleToTrt(); // Paddle-TensorRT
+ } else if (flag == 6) {
+ option.UseGpu();
+ option.UseOrtBackend(); // ONNX Runtime
+ } else if (flag == 7) {
+ option.UseGpu();
+ option.UseTrtBackend(); // TensorRT
+ }
+
+ std::string det_model_dir = argv[1];
+ std::string cls_model_dir = argv[2];
+ std::string rec_model_dir = argv[3];
+ std::string rec_label_file = argv[4];
+ std::string test_image = argv[5];
+ InitAndInfer(det_model_dir, cls_model_dir, rec_model_dir, rec_label_file,
+ test_image, option);
+ return 0;
+}
diff --git a/deploy/fastdeploy/cpu-gpu/cpp/infer_cls.cc b/deploy/fastdeploy/cpu-gpu/cpp/infer_cls.cc
new file mode 100644
index 0000000000000000000000000000000000000000..953d9683dab18c075051f51055cb0d034a1ecd76
--- /dev/null
+++ b/deploy/fastdeploy/cpu-gpu/cpp/infer_cls.cc
@@ -0,0 +1,80 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision.h"
+#ifdef WIN32
+const char sep = '\\';
+#else
+const char sep = '/';
+#endif
+
+void InitAndInfer(const std::string &cls_model_dir,
+ const std::string &image_file,
+ const fastdeploy::RuntimeOption &option) {
+
+ auto cls_model_file = cls_model_dir + sep + "inference.pdmodel";
+ auto cls_params_file = cls_model_dir + sep + "inference.pdiparams";
+ auto cls_option = option;
+
+ auto cls_model = fastdeploy::vision::ocr::Classifier(
+ cls_model_file, cls_params_file, cls_option);
+ assert(cls_model.Initialized());
+
+ // Parameters settings for pre and post processing of Cls Model.
+ cls_model.GetPostprocessor().SetClsThresh(0.9);
+
+ auto im = cv::imread(image_file);
+ auto im_bak = im.clone();
+
+ fastdeploy::vision::OCRResult result;
+ if (!cls_model.Predict(im, &result)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+
+ // User can infer a batch of images by following code.
+ // if (!cls_model.BatchPredict({im}, &result)) {
+ // std::cerr << "Failed to predict." << std::endl;
+ // return;
+ // }
+
+ std::cout << result.Str() << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+ if (argc < 4) {
+ std::cout << "Usage: infer_demo path/to/cls_model path/to/image "
+ "run_option, "
+ "e.g ./infer_demo ./ch_ppocr_mobile_v2.0_cls_infer ./12.jpg 0"
+ << std::endl;
+ std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
+ "with gpu;."
+ << std::endl;
+ return -1;
+ }
+
+ fastdeploy::RuntimeOption option;
+ int flag = std::atoi(argv[3]);
+
+ if (flag == 0) {
+ option.UseCpu();
+ } else if (flag == 1) {
+ option.UseGpu();
+ }
+
+ std::string cls_model_dir = argv[1];
+ std::string test_image = argv[2];
+ InitAndInfer(cls_model_dir, test_image, option);
+ return 0;
+}
\ No newline at end of file
diff --git a/deploy/fastdeploy/cpu-gpu/cpp/infer_det.cc b/deploy/fastdeploy/cpu-gpu/cpp/infer_det.cc
new file mode 100644
index 0000000000000000000000000000000000000000..8b1cea4b9ef9e5788624aac0e7edcb024dbf2605
--- /dev/null
+++ b/deploy/fastdeploy/cpu-gpu/cpp/infer_det.cc
@@ -0,0 +1,82 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision.h"
+#ifdef WIN32
+const char sep = '\\';
+#else
+const char sep = '/';
+#endif
+
+void InitAndInfer(const std::string &det_model_dir,
+ const std::string &image_file,
+ const fastdeploy::RuntimeOption &option) {
+ auto det_model_file = det_model_dir + sep + "inference.pdmodel";
+ auto det_params_file = det_model_dir + sep + "inference.pdiparams";
+ auto det_option = option;
+
+ auto det_model = fastdeploy::vision::ocr::DBDetector(
+ det_model_file, det_params_file, det_option);
+ assert(det_model.Initialized());
+
+ // Parameters settings for pre and post processing of Det Model.
+ det_model.GetPreprocessor().SetMaxSideLen(960);
+ det_model.GetPostprocessor().SetDetDBThresh(0.3);
+ det_model.GetPostprocessor().SetDetDBBoxThresh(0.6);
+ det_model.GetPostprocessor().SetDetDBUnclipRatio(1.5);
+ det_model.GetPostprocessor().SetDetDBScoreMode("slow");
+ det_model.GetPostprocessor().SetUseDilation(0);
+
+ auto im = cv::imread(image_file);
+ auto im_bak = im.clone();
+
+ fastdeploy::vision::OCRResult result;
+ if (!det_model.Predict(im, &result)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+
+ std::cout << result.Str() << std::endl;
+
+ auto vis_im = fastdeploy::vision::VisOcr(im_bak, result);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+ if (argc < 4) {
+ std::cout << "Usage: infer_demo path/to/det_model path/to/image "
+ "run_option, "
+ "e.g ./infer_demo ./ch_PP-OCRv3_det_infer ./12.jpg 0"
+ << std::endl;
+ std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
+ "with gpu;."
+ << std::endl;
+ return -1;
+ }
+
+ fastdeploy::RuntimeOption option;
+ int flag = std::atoi(argv[3]);
+
+ if (flag == 0) {
+ option.UseCpu();
+ } else if (flag == 1) {
+ option.UseGpu();
+ }
+
+ std::string det_model_dir = argv[1];
+ std::string test_image = argv[2];
+ InitAndInfer(det_model_dir, test_image, option);
+ return 0;
+}
diff --git a/deploy/fastdeploy/cpu-gpu/cpp/infer_rec.cc b/deploy/fastdeploy/cpu-gpu/cpp/infer_rec.cc
new file mode 100644
index 0000000000000000000000000000000000000000..b1ab9d491172f076a787b0aa916ef6e366976186
--- /dev/null
+++ b/deploy/fastdeploy/cpu-gpu/cpp/infer_rec.cc
@@ -0,0 +1,84 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision.h"
+#ifdef WIN32
+const char sep = '\\';
+#else
+const char sep = '/';
+#endif
+
+void InitAndInfer(const std::string &rec_model_dir,
+ const std::string &rec_label_file,
+ const std::string &image_file,
+ const fastdeploy::RuntimeOption &option) {
+
+ auto rec_model_file = rec_model_dir + sep + "inference.pdmodel";
+ auto rec_params_file = rec_model_dir + sep + "inference.pdiparams";
+ auto rec_option = option;
+
+ auto rec_model = fastdeploy::vision::ocr::Recognizer(
+ rec_model_file, rec_params_file, rec_label_file, rec_option);
+
+ assert(rec_model.Initialized());
+
+ auto im = cv::imread(image_file);
+ auto im_bak = im.clone();
+
+ fastdeploy::vision::OCRResult result;
+
+ if (!rec_model.Predict(im, &result)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+
+ // User can infer a batch of images by following code.
+ // if (!rec_model.BatchPredict({im}, &result)) {
+ // std::cerr << "Failed to predict." << std::endl;
+ // return;
+ // }
+
+ std::cout << result.Str() << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+ if (argc < 5) {
+ std::cout << "Usage: infer_demo"
+ "path/to/rec_model path/to/rec_label_file path/to/image "
+ "run_option, "
+ "e.g ./infer_demo "
+ "./ch_PP-OCRv3_rec_infer "
+ "./ppocr_keys_v1.txt ./12.jpg 0"
+ << std::endl;
+ std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
+ "with gpu;"
+ << std::endl;
+ return -1;
+ }
+
+ fastdeploy::RuntimeOption option;
+ int flag = std::atoi(argv[4]);
+
+ if (flag == 0) {
+ option.UseCpu();
+ } else if (flag == 1) {
+ option.UseGpu();
+ }
+
+ std::string rec_model_dir = argv[1];
+ std::string rec_label_file = argv[2];
+ std::string test_image = argv[3];
+ InitAndInfer(rec_model_dir, rec_label_file, test_image, option);
+ return 0;
+}
diff --git a/deploy/fastdeploy/cpu-gpu/csharp/CMakeLists.txt b/deploy/fastdeploy/cpu-gpu/csharp/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7ae8e2aba35b71c4f92cc908f1baa983bce0757b
--- /dev/null
+++ b/deploy/fastdeploy/cpu-gpu/csharp/CMakeLists.txt
@@ -0,0 +1,22 @@
+PROJECT(infer_demo CSharp)
+CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
+
+# Set the C# language version (defaults to 3.0 if not set).
+set(CMAKE_CSharp_FLAGS "/langversion:10")
+set(CMAKE_DOTNET_TARGET_FRAMEWORK "net6.0")
+set(CMAKE_DOTNET_SDK "Microsoft.NET.Sdk")
+
+# 指定下载解压后的fastdeploy库路径
+option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
+
+include(${FASTDEPLOY_INSTALL_DIR}/FastDeployCSharp.cmake)
+
+
+add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cs)
+
+set_property(TARGET infer_demo PROPERTY VS_DOTNET_REFERENCES
+ ${FASTDEPLOY_DOTNET_REFERENCES}
+)
+
+set_property(TARGET infer_demo
+ PROPERTY VS_PACKAGE_REFERENCES ${FASTDEPLOY_PACKAGE_REFERENCES})
diff --git a/deploy/fastdeploy/cpu-gpu/csharp/README.md b/deploy/fastdeploy/cpu-gpu/csharp/README.md
new file mode 100755
index 0000000000000000000000000000000000000000..335c774aed01cacd26ebdd82af924b8f62eebeb2
--- /dev/null
+++ b/deploy/fastdeploy/cpu-gpu/csharp/README.md
@@ -0,0 +1,167 @@
+[English](README.md) | 简体中文
+# PaddleOCR CPU-GPU C#部署示例
+
+本目录下提供`infer.cs`来调用C# API快速完成PPOCRv3模型在CPU/GPU上部署的示例。
+
+## 1. 说明
+PaddleOCR支持利用FastDeploy在NVIDIA GPU、X86 CPU、飞腾CPU、ARM CPU、Intel GPU(独立显卡/集成显卡)硬件上快速部署OCR模型.
+
+## 2. 部署环境准备
+在部署前,需确认软硬件环境,同时下载预编译部署库,参考[FastDeploy安装文档](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install#FastDeploy预编译库安装)安装FastDeploy预编译库. 在本目录执行如下命令即可在Windows完成编译测试,支持此模型需保证FastDeploy版本1.0.4以上(x.x.x>=1.0.4)
+
+## 3. 部署模型准备
+在部署前, 请准备好您所需要运行的推理模型, 您可以在[FastDeploy支持的PaddleOCR模型列表](../README.md)中下载所需模型.
+
+## 4. 部署示例
+
+### 4.1 下载C#包管理程序nuget客户端
+> https://dist.nuget.org/win-x86-commandline/v6.4.0/nuget.exe
+下载完成后将该程序添加到环境变量**PATH**中
+
+### 4.2. 下载模型文件和测试图片
+> https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar # (下载后解压缩)
+> https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar
+> https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar
+> https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/doc/imgs/12.jpg
+> https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_keys_v1.txt
+
+### 4.3 编译示例代码
+
+本文档编译的示例代码的编译工具依赖VS 2019,**Windows打开x64 Native Tools Command Prompt for VS 2019命令工具**,通过如下命令开始编译
+
+
+```shell
+## 下载FastDeploy预编译库,用户可在上文提到的`FastDeploy预编译库`中自行选择合适的版本使用
+https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-x.x.x.tgz
+
+# 下载部署示例代码
+git clone https://github.com/PaddlePaddle/PaddleOCR.git
+cd D:\PaddleOCR\deploy\fastdeploy\cpu-gpu\csharp
+
+mkdir build && cd build
+cmake .. -G "Visual Studio 16 2019" -A x64 -DFASTDEPLOY_INSTALL_DIR=D:\fastdeploy-win-x64-gpu-x.x.x -DCUDA_DIRECTORY="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.2"
+
+nuget restore
+msbuild infer_demo.sln /m:4 /p:Configuration=Release /p:Platform=x64
+```
+
+关于使用Visual Studio 2019创建sln工程,或者CMake工程等方式编译的更详细信息,可参考如下文档
+- [在 Windows 使用 FastDeploy C++ SDK](https://github.com/PaddlePaddle/FastDeploy/tree/develop/docs/cn/faq/use_sdk_on_windows.md)
+- [FastDeploy C++库在Windows上的多种使用方式](https://github.com/PaddlePaddle/FastDeploy/tree/develop/docs/cn/faq/use_sdk_on_windows_build.md)
+
+### 4.4 运行可执行程序
+
+注意Windows上运行时,需要将FastDeploy依赖的库拷贝至可执行程序所在目录, 或者配置环境变量。FastDeploy提供了工具帮助我们快速将所有依赖库拷贝至可执行程序所在目录,通过如下命令将所有依赖的dll文件拷贝至可执行程序所在的目录(可能生成的可执行文件在Release下还有一层目录,这里假设生成的可执行文件在Release处)
+```shell
+cd D:\fastdeploy-win-x64-gpu-x.x.x
+
+fastdeploy_init.bat install %cd% D:\PaddleOCR\deploy\fastdeploy\cpu-gpu\csharp\build\Release
+```
+
+将dll拷贝到当前路径后,准备好模型和图片,使用如下命令运行可执行程序即可
+```shell
+cd Release
+# CPU推理
+infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v3.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 0
+# GPU推理
+infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v3.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 1
+```
+
+## 5. PP-OCRv3 C# API接口简介
+下面提供了PP-OCRv3的C# API简介
+
+- 如果用户想要更换部署后端或进行其他定制化操作, 请查看[C# Runtime API](https://github.com/PaddlePaddle/FastDeploy/blob/develop/csharp/fastdeploy/runtime_option.cs).
+- 更多 PP-OCR C# API 请查看 [C# PP-OCR API](https://github.com/PaddlePaddle/FastDeploy/blob/develop/csharp/fastdeploy/vision/ocr/model.cs)
+
+### 模型
+
+```c#
+fastdeploy.vision.ocr.DBDetector(
+ string model_file,
+ string params_file,
+ fastdeploy.RuntimeOption runtime_option = null,
+ fastdeploy.ModelFormat model_format = ModelFormat.PADDLE)
+```
+
+> DBDetector模型加载和初始化。
+
+> **参数**
+
+>> * **model_file**(str): 模型文件路径
+>> * **params_file**(str): 参数文件路径
+>> * **runtime_option**(RuntimeOption): 后端推理配置,默认为null,即采用默认配置
+>> * **model_format**(ModelFormat): 模型格式,默认为PADDLE格式
+
+```c#
+fastdeploy.vision.ocr.Classifier(
+ string model_file,
+ string params_file,
+ fastdeploy.RuntimeOption runtime_option = null,
+ fastdeploy.ModelFormat model_format = ModelFormat.PADDLE)
+```
+
+> Classifier模型加载和初始化。
+
+> **参数**
+
+>> * **model_file**(str): 模型文件路径
+>> * **params_file**(str): 参数文件路径
+>> * **runtime_option**(RuntimeOption): 后端推理配置,默认为null,即采用默认配置
+>> * **model_format**(ModelFormat): 模型格式,默认为PADDLE格式
+
+```c#
+fastdeploy.vision.ocr.Recognizer(
+ string model_file,
+ string params_file,
+ string label_path,
+ fastdeploy.RuntimeOption runtime_option = null,
+ fastdeploy.ModelFormat model_format = ModelFormat.PADDLE)
+```
+
+> Recognizer模型加载和初始化。
+
+> **参数**
+
+>> * **model_file**(str): 模型文件路径
+>> * **params_file**(str): 参数文件路径
+>> * **label_path**(str): 标签文件路径
+>> * **runtime_option**(RuntimeOption): 后端推理配置,默认为null,即采用默认配置
+>> * **model_format**(ModelFormat): 模型格式,默认为PADDLE格式
+
+```c#
+fastdeploy.pipeline.PPOCRv3Model(
+ DBDetector dbdetector,
+ Classifier classifier,
+ Recognizer recognizer)
+```
+
+> PP-OCRv3Model模型加载和初始化。
+
+> **参数**
+
+>> * **det_model**(FD_C_DBDetectorWrapper*): DBDetector模型
+>> * **cls_model**(FD_C_ClassifierWrapper*): Classifier模型
+>> * **rec_model**(FD_C_RecognizerWrapper*): Recognizer模型文件
+
+#### Predict函数
+
+```c#
+fastdeploy.OCRResult Predict(OpenCvSharp.Mat im)
+```
+
+> 模型预测接口,输入图像直接输出结果。
+>
+> **参数**
+>
+>> * **im**(Mat): 输入图像,注意需为HWC,BGR格式
+>>
+> **返回值**
+>
+>> * **result**: OCR预测结果,包括由检测模型输出的检测框位置,分类模型输出的方向分类,以及识别模型输出的识别结果, OCRResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/)
+
+
+## 6. 其它文档
+- [FastDeploy部署PaddleOCR模型概览](../../)
+- [PP-OCRv3 Python部署](../python)
+- [PP-OCRv3 C++ 部署](../cpp)
+- [PP-OCRv3 C 部署](../c)
diff --git a/deploy/fastdeploy/cpu-gpu/csharp/infer.cs b/deploy/fastdeploy/cpu-gpu/csharp/infer.cs
new file mode 100644
index 0000000000000000000000000000000000000000..962500e08a1b1e8b8cd19350d0137c1243a88d79
--- /dev/null
+++ b/deploy/fastdeploy/cpu-gpu/csharp/infer.cs
@@ -0,0 +1,79 @@
+// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+using System;
+using System.IO;
+using System.Runtime.InteropServices;
+using OpenCvSharp;
+using fastdeploy;
+
+namespace Test
+{
+ public class TestPPOCRv3
+ {
+ public static void Main(string[] args)
+ {
+ if (args.Length < 6) {
+ Console.WriteLine(
+ "Usage: infer_demo path/to/det_model path/to/cls_model " +
+ "path/to/rec_model path/to/rec_label_file path/to/image " +
+ "run_option, " +
+ "e.g ./infer_demo ./ch_PP-OCRv2_det_infer " +
+ "./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv2_rec_infer " +
+ "./ppocr_keys_v1.txt ./12.jpg 0"
+ );
+ Console.WriteLine( "The data type of run_option is int, 0: run with cpu; 1: run with gpu");
+ return;
+ }
+ string det_model_dir = args[0];
+ string cls_model_dir = args[1];
+ string rec_model_dir = args[2];
+ string rec_label_file = args[3];
+ string image_path = args[4];
+ RuntimeOption runtimeoption = new RuntimeOption();
+ int device_option = Int32.Parse(args[5]);
+ if(device_option==0){
+ runtimeoption.UseCpu();
+ }else{
+ runtimeoption.UseGpu();
+ }
+ string sep = "\\";
+ string det_model_file = det_model_dir + sep + "inference.pdmodel";
+ string det_params_file = det_model_dir + sep + "inference.pdiparams";
+
+ string cls_model_file = cls_model_dir + sep + "inference.pdmodel";
+ string cls_params_file = cls_model_dir + sep + "inference.pdiparams";
+
+ string rec_model_file = rec_model_dir + sep + "inference.pdmodel";
+ string rec_params_file = rec_model_dir + sep + "inference.pdiparams";
+
+ fastdeploy.vision.ocr.DBDetector dbdetector = new fastdeploy.vision.ocr.DBDetector(det_model_file, det_params_file, runtimeoption, ModelFormat.PADDLE);
+ fastdeploy.vision.ocr.Classifier classifier = new fastdeploy.vision.ocr.Classifier(cls_model_file, cls_params_file, runtimeoption, ModelFormat.PADDLE);
+ fastdeploy.vision.ocr.Recognizer recognizer = new fastdeploy.vision.ocr.Recognizer(rec_model_file, rec_params_file, rec_label_file, runtimeoption, ModelFormat.PADDLE);
+ fastdeploy.pipeline.PPOCRv3 model = new fastdeploy.pipeline.PPOCRv3(dbdetector, classifier, recognizer);
+ if(!model.Initialized()){
+ Console.WriteLine("Failed to initialize.\n");
+ }
+ Mat image = Cv2.ImRead(image_path);
+ fastdeploy.vision.OCRResult res = model.Predict(image);
+ Console.WriteLine(res.ToString());
+ Mat res_img = fastdeploy.vision.Visualize.VisOcr(image, res);
+ Cv2.ImShow("result.png", res_img);
+ Cv2.ImWrite("result.png", res_img);
+ Cv2.WaitKey(0);
+
+ }
+
+ }
+}
\ No newline at end of file
diff --git a/deploy/fastdeploy/cpu-gpu/python/README.md b/deploy/fastdeploy/cpu-gpu/python/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..9ddb940f196074836fea6df907ffce0ba96ef8fb
--- /dev/null
+++ b/deploy/fastdeploy/cpu-gpu/python/README.md
@@ -0,0 +1,147 @@
+[English](README.md) | 简体中文
+# PaddleOCR CPU-GPU Python部署示例
+本目录下提供`infer.py`快速完成PP-OCRv3在CPU/GPU,以及GPU上通过Paddle-TensorRT加速部署的示例.
+
+## 1. 说明
+PaddleOCR支持利用FastDeploy在NVIDIA GPU、X86 CPU、飞腾CPU、ARM CPU、Intel GPU(独立显卡/集成显卡)硬件上快速部署OCR模型
+
+## 2. 部署环境准备
+在部署前,需确认软硬件环境,同时下载预编译部署库,参考[FastDeploy安装文档](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install#FastDeploy预编译库安装)安装FastDeploy预编译库.
+
+## 3. 部署模型准备
+在部署前, 请准备好您所需要运行的推理模型, 您可以在[FastDeploy支持的PaddleOCR模型列表](../README.md)中下载所需模型.
+
+## 4. 运行部署示例
+```bash
+# 安装FastDpeloy python包(详细文档请参考`部署环境准备`)
+pip install fastdeploy-gpu-python -f https://www.paddlepaddle.org.cn/whl/fastdeploy.html
+conda config --add channels conda-forge && conda install cudatoolkit=11.2 cudnn=8.2
+
+# 下载部署示例代码
+git clone https://github.com/PaddlePaddle/PaddleOCR.git
+cd PaddleOCR/deploy/fastdeploy/cpu-gpu/python
+
+# 下载PP-OCRv3文字检测模型
+wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar
+tar -xvf ch_PP-OCRv3_det_infer.tar
+# 下载文字方向分类器模型
+wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar
+tar -xvf ch_ppocr_mobile_v2.0_cls_infer.tar
+# 下载PP-OCRv3文字识别模型
+wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar
+tar -xvf ch_PP-OCRv3_rec_infer.tar
+
+# 下载预测图片与字典文件
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/doc/imgs/12.jpg
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_keys_v1.txt
+
+# 运行部署示例
+# 在CPU上使用Paddle Inference推理
+python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device cpu --backend paddle
+# 在CPU上使用OenVINO推理
+python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device cpu --backend openvino
+# 在CPU上使用ONNX Runtime推理
+python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device cpu --backend ort
+# 在CPU上使用Paddle Lite推理
+python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device cpu --backend pplite
+# 在GPU上使用Paddle Inference推理
+python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device gpu --backend paddle
+# 在GPU上使用Paddle TensorRT推理
+python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device gpu --backend pptrt
+# 在GPU上使用ONNX Runtime推理
+python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device gpu --backend ort
+# 在GPU上使用Nvidia TensorRT推理
+python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device gpu --backend trt
+
+# 同时, FastDeploy提供文字检测,文字分类,文字识别三个模型的单独推理,
+# 有需要的用户, 请准备合适的图片, 同时根据自己的需求, 参考infer.py来配置自定义硬件与推理后端.
+
+# 在CPU上,单独使用文字检测模型部署
+python infer_det.py --det_model ch_PP-OCRv3_det_infer --image 12.jpg --device cpu
+
+# 在CPU上,单独使用文字方向分类模型部署
+python infer_cls.py --cls_model ch_ppocr_mobile_v2.0_cls_infer --image 12.jpg --device cpu
+
+# 在CPU上,单独使用文字识别模型部署
+python infer_rec.py --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device cpu
+
+```
+
+运行完成可视化结果如下图所示
+
+
+
+
+## 5. 部署示例选项说明
+
+|参数|含义|默认值
+|---|---|---|
+|--det_model|指定检测模型文件夹所在的路径|None|
+|--cls_model|指定分类模型文件夹所在的路径|None|
+|--rec_model|指定识别模型文件夹所在的路径|None|
+|--rec_label_file|识别模型所需label所在的路径|None|
+|--image|指定测试图片所在的路径|None|
+|--device|指定即将运行的硬件类型,支持的值为`[cpu, gpu]`,当设置为cpu时,可运行在x86 cpu/arm cpu等cpu上|cpu|
+|--device_id|使用gpu时, 指定设备号|0|
+|--backend|部署模型时使用的后端, 支持的值为`[paddle,pptrt,pplite,ort,openvino,trt]` |paddle|
+
+关于如何通过FastDeploy使用更多不同的推理后端,以及如何使用不同的硬件,请参考文档:[如何切换模型推理后端引擎](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/faq/how_to_change_backend.md)
+
+## 6. 更多指南
+
+### 6.1 如何使用Python部署PP-OCRv2系列模型.
+本目录下的`infer.py`代码是以PP-OCRv3模型为例, 如果用户有使用PP-OCRv2的需求, 只需要按照下面所示的方式, 来创建PP-OCRv2并使用.
+
+```python
+# 此行为创建PP-OCRv3模型的代码
+ppocr_v3 = fd.vision.ocr.PPOCRv3(det_model=det_model, cls_model=cls_model, rec_model=rec_model)
+# 只需要将PPOCRv3改为PPOCRv2,即可创造PPOCRv2模型, 同时, 后续的接口均使用ppocr_v2来调用
+ppocr_v2 = fd.vision.ocr.PPOCRv2(det_model=det_model, cls_model=cls_model, rec_model=rec_model)
+
+# 如果用户在部署PP-OCRv2时, 需要使用TensorRT推理, 还需要改动Rec模型的TensorRT的输入shape.
+# 建议如下修改, 需要把 H 维度改为32, W 纬度按需修改.
+rec_option.set_trt_input_shape("x", [1, 3, 32, 10],
+ [args.rec_bs, 3, 32, 320],
+ [args.rec_bs, 3, 32, 2304])
+```
+
+### 6.2 如何在PP-OCRv2/v3系列模型中, 关闭文字方向分类器的使用.
+
+在PP-OCRv3/v2中, 文字方向分类器是可选的, 用户可以按照以下方式, 来决定自己是否使用方向分类器.
+```python
+# 使用 Cls 模型
+ppocr_v3 = fd.vision.ocr.PPOCRv3(det_model=det_model, cls_model=cls_model, rec_model=rec_model)
+
+# 不使用 Cls 模型
+ppocr_v3 = fd.vision.ocr.PPOCRv3(det_model=det_model, cls_model=None, rec_model=rec_model)
+```
+### 6.3 如何修改前后处理超参数.
+在示例代码中, 我们展示出了修改前后处理超参数的接口,并设置为默认值,其中, FastDeploy提供的超参数的含义与文档[PaddleOCR推理模型参数解释](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/inference_args.md)是相同的. 如果用户想要进行更多定制化的开发, 请阅读[PP-OCR系列 Python API查阅](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/python/html/ocr.html)
+
+```python
+# 设置检测模型的max_side_len
+det_model.preprocessor.max_side_len = 960
+# 其他...
+```
+
+### 6.4 其他指南
+- [FastDeploy部署PaddleOCR模型概览](../../)
+- [PP-OCRv3 C++部署](../cpp)
+- [PP-OCRv3 C 部署](../c)
+- [PP-OCRv3 C# 部署](../csharp)
+
+## 7. 常见问题
+- PaddleOCR能在FastDeploy支持的多种后端上推理,支持情况如下表所示, 如何切换后端, 详见文档[如何切换模型推理后端引擎](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/faq/how_to_change_backend.md)
+
+|硬件类型|支持的后端|
+|:---:|:---:|
+|X86 CPU| Paddle Inference, ONNX Runtime, OpenVINO |
+|ARM CPU| Paddle Lite |
+|飞腾 CPU| ONNX Runtime |
+|NVIDIA GPU| Paddle Inference, ONNX Runtime, TensorRT |
+
+- [如何将模型预测结果转为numpy格式](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/faq/vision_result_related_problems.md)
+- [Intel GPU(独立显卡/集成显卡)的使用](https://github.com/PaddlePaddle/FastDeploy/blob/develop/tutorials/intel_gpu/README.md)
+- [编译CPU部署库](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install/cpu.md)
+- [编译GPU部署库](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install/gpu.md)
+- [编译Jetson部署库](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install/jetson.md)
diff --git a/deploy/fastdeploy/cpu-gpu/python/infer.py b/deploy/fastdeploy/cpu-gpu/python/infer.py
new file mode 100755
index 0000000000000000000000000000000000000000..cb5d21c4b944a683158905be217b8740ebcf97e7
--- /dev/null
+++ b/deploy/fastdeploy/cpu-gpu/python/infer.py
@@ -0,0 +1,218 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import fastdeploy as fd
+import cv2
+import os
+
+
+def parse_arguments():
+ import argparse
+ import ast
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--det_model", required=True, help="Path of Detection model of PPOCR.")
+ parser.add_argument(
+ "--cls_model",
+ required=True,
+ help="Path of Classification model of PPOCR.")
+ parser.add_argument(
+ "--rec_model",
+ required=True,
+ help="Path of Recognization model of PPOCR.")
+ parser.add_argument(
+ "--rec_label_file",
+ required=True,
+ help="Path of Recognization model of PPOCR.")
+ parser.add_argument(
+ "--image", type=str, required=True, help="Path of test image file.")
+ parser.add_argument(
+ "--device",
+ type=str,
+ default='cpu',
+ help="Type of inference device, support 'cpu' or 'gpu'.")
+ parser.add_argument(
+ "--device_id",
+ type=int,
+ default=0,
+ help="Define which GPU card used to run model.")
+ parser.add_argument(
+ "--cls_bs",
+ type=int,
+ default=1,
+ help="Classification model inference batch size.")
+ parser.add_argument(
+ "--rec_bs",
+ type=int,
+ default=6,
+ help="Recognition model inference batch size")
+ parser.add_argument(
+ "--backend",
+ type=str,
+ default="default",
+ help="Type of inference backend, support ort/trt/paddle/openvino, default 'openvino' for cpu, 'tensorrt' for gpu"
+ )
+
+ return parser.parse_args()
+
+
+def build_option(args):
+
+ det_option = fd.RuntimeOption()
+ cls_option = fd.RuntimeOption()
+ rec_option = fd.RuntimeOption()
+
+ if args.device.lower() == "gpu":
+ det_option.use_gpu(args.device_id)
+ cls_option.use_gpu(args.device_id)
+ rec_option.use_gpu(args.device_id)
+
+ if args.backend.lower() == "trt":
+ assert args.device.lower(
+ ) == "gpu", "TensorRT backend require inference on device GPU."
+ det_option.use_trt_backend()
+ cls_option.use_trt_backend()
+ rec_option.use_trt_backend()
+
+ # If use TRT backend, the dynamic shape will be set as follow.
+ # We recommend that users set the length and height of the detection model to a multiple of 32.
+ # We also recommend that users set the Trt input shape as follow.
+ det_option.set_trt_input_shape("x", [1, 3, 64, 64], [1, 3, 640, 640],
+ [1, 3, 960, 960])
+ cls_option.set_trt_input_shape("x", [1, 3, 48, 10],
+ [args.cls_bs, 3, 48, 320],
+ [args.cls_bs, 3, 48, 1024])
+ rec_option.set_trt_input_shape("x", [1, 3, 48, 10],
+ [args.rec_bs, 3, 48, 320],
+ [args.rec_bs, 3, 48, 2304])
+
+ # Users could save TRT cache file to disk as follow.
+ det_option.set_trt_cache_file(args.det_model + "/det_trt_cache.trt")
+ cls_option.set_trt_cache_file(args.cls_model + "/cls_trt_cache.trt")
+ rec_option.set_trt_cache_file(args.rec_model + "/rec_trt_cache.trt")
+
+ elif args.backend.lower() == "pptrt":
+ assert args.device.lower(
+ ) == "gpu", "Paddle-TensorRT backend require inference on device GPU."
+ det_option.use_trt_backend()
+ det_option.enable_paddle_trt_collect_shape()
+ det_option.enable_paddle_to_trt()
+
+ cls_option.use_trt_backend()
+ cls_option.enable_paddle_trt_collect_shape()
+ cls_option.enable_paddle_to_trt()
+
+ rec_option.use_trt_backend()
+ rec_option.enable_paddle_trt_collect_shape()
+ rec_option.enable_paddle_to_trt()
+
+ # If use TRT backend, the dynamic shape will be set as follow.
+ # We recommend that users set the length and height of the detection model to a multiple of 32.
+ # We also recommend that users set the Trt input shape as follow.
+ det_option.set_trt_input_shape("x", [1, 3, 64, 64], [1, 3, 640, 640],
+ [1, 3, 960, 960])
+ cls_option.set_trt_input_shape("x", [1, 3, 48, 10],
+ [args.cls_bs, 3, 48, 320],
+ [args.cls_bs, 3, 48, 1024])
+ rec_option.set_trt_input_shape("x", [1, 3, 48, 10],
+ [args.rec_bs, 3, 48, 320],
+ [args.rec_bs, 3, 48, 2304])
+
+ # Users could save TRT cache file to disk as follow.
+ det_option.set_trt_cache_file(args.det_model)
+ cls_option.set_trt_cache_file(args.cls_model)
+ rec_option.set_trt_cache_file(args.rec_model)
+
+ elif args.backend.lower() == "ort":
+ det_option.use_ort_backend()
+ cls_option.use_ort_backend()
+ rec_option.use_ort_backend()
+
+ elif args.backend.lower() == "paddle":
+ det_option.use_paddle_infer_backend()
+ cls_option.use_paddle_infer_backend()
+ rec_option.use_paddle_infer_backend()
+
+ elif args.backend.lower() == "openvino":
+ assert args.device.lower(
+ ) == "cpu", "OpenVINO backend require inference on device CPU."
+ det_option.use_openvino_backend()
+ cls_option.use_openvino_backend()
+ rec_option.use_openvino_backend()
+
+ elif args.backend.lower() == "pplite":
+ assert args.device.lower(
+ ) == "cpu", "Paddle Lite backend require inference on device CPU."
+ det_option.use_lite_backend()
+ cls_option.use_lite_backend()
+ rec_option.use_lite_backend()
+
+ return det_option, cls_option, rec_option
+
+
+args = parse_arguments()
+
+det_model_file = os.path.join(args.det_model, "inference.pdmodel")
+det_params_file = os.path.join(args.det_model, "inference.pdiparams")
+
+cls_model_file = os.path.join(args.cls_model, "inference.pdmodel")
+cls_params_file = os.path.join(args.cls_model, "inference.pdiparams")
+
+rec_model_file = os.path.join(args.rec_model, "inference.pdmodel")
+rec_params_file = os.path.join(args.rec_model, "inference.pdiparams")
+rec_label_file = args.rec_label_file
+
+det_option, cls_option, rec_option = build_option(args)
+
+det_model = fd.vision.ocr.DBDetector(
+ det_model_file, det_params_file, runtime_option=det_option)
+
+cls_model = fd.vision.ocr.Classifier(
+ cls_model_file, cls_params_file, runtime_option=cls_option)
+
+rec_model = fd.vision.ocr.Recognizer(
+ rec_model_file, rec_params_file, rec_label_file, runtime_option=rec_option)
+
+# Parameters settings for pre and post processing of Det/Cls/Rec Models.
+# All parameters are set to default values.
+det_model.preprocessor.max_side_len = 960
+det_model.postprocessor.det_db_thresh = 0.3
+det_model.postprocessor.det_db_box_thresh = 0.6
+det_model.postprocessor.det_db_unclip_ratio = 1.5
+det_model.postprocessor.det_db_score_mode = "slow"
+det_model.postprocessor.use_dilation = False
+cls_model.postprocessor.cls_thresh = 0.9
+
+# Create PP-OCRv3, if cls_model is not needed, just set cls_model=None .
+ppocr_v3 = fd.vision.ocr.PPOCRv3(
+ det_model=det_model, cls_model=cls_model, rec_model=rec_model)
+
+# Set inference batch size for cls model and rec model, the value could be -1 and 1 to positive infinity.
+# When inference batch size is set to -1, it means that the inference batch size
+# of the cls and rec models will be the same as the number of boxes detected by the det model.
+ppocr_v3.cls_batch_size = args.cls_bs
+ppocr_v3.rec_batch_size = args.rec_bs
+
+# Read the input image
+im = cv2.imread(args.image)
+
+# Predict and reutrn the results
+result = ppocr_v3.predict(im)
+
+print(result)
+
+# Visuliaze the results.
+vis_im = fd.vision.vis_ppocr(im, result)
+cv2.imwrite("visualized_result.jpg", vis_im)
+print("Visualized result save in ./visualized_result.jpg")
diff --git a/deploy/fastdeploy/cpu-gpu/python/infer_cls.py b/deploy/fastdeploy/cpu-gpu/python/infer_cls.py
new file mode 100755
index 0000000000000000000000000000000000000000..b34868daef9e46ae59b5cb60fce1ff66fdf1bfd2
--- /dev/null
+++ b/deploy/fastdeploy/cpu-gpu/python/infer_cls.py
@@ -0,0 +1,77 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import fastdeploy as fd
+import cv2
+import os
+
+
+def parse_arguments():
+ import argparse
+ import ast
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--cls_model",
+ required=True,
+ help="Path of Classification model of PPOCR.")
+ parser.add_argument(
+ "--image", type=str, required=True, help="Path of test image file.")
+ parser.add_argument(
+ "--device",
+ type=str,
+ default='cpu',
+ help="Type of inference device, support 'cpu', 'kunlunxin' or 'gpu'.")
+ parser.add_argument(
+ "--device_id",
+ type=int,
+ default=0,
+ help="Define which GPU card used to run model.")
+ return parser.parse_args()
+
+
+def build_option(args):
+
+ cls_option = fd.RuntimeOption()
+
+ if args.device.lower() == "gpu":
+ cls_option.use_gpu(args.device_id)
+
+ return cls_option
+
+
+args = parse_arguments()
+
+cls_model_file = os.path.join(args.cls_model, "inference.pdmodel")
+cls_params_file = os.path.join(args.cls_model, "inference.pdiparams")
+
+# Set the runtime option
+cls_option = build_option(args)
+
+# Create the cls_model
+cls_model = fd.vision.ocr.Classifier(
+ cls_model_file, cls_params_file, runtime_option=cls_option)
+
+# Set the postprocessing parameters
+cls_model.postprocessor.cls_thresh = 0.9
+
+# Read the image
+im = cv2.imread(args.image)
+
+# Predict and return the results
+result = cls_model.predict(im)
+
+# User can infer a batch of images by following code.
+# result = cls_model.batch_predict([im])
+
+print(result)
diff --git a/deploy/fastdeploy/cpu-gpu/python/infer_det.py b/deploy/fastdeploy/cpu-gpu/python/infer_det.py
new file mode 100755
index 0000000000000000000000000000000000000000..7a7f5a07b7f57932ddc2aa33b4624f0399691bb0
--- /dev/null
+++ b/deploy/fastdeploy/cpu-gpu/python/infer_det.py
@@ -0,0 +1,82 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import fastdeploy as fd
+import cv2
+import os
+
+
+def parse_arguments():
+ import argparse
+ import ast
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--det_model", required=True, help="Path of Detection model of PPOCR.")
+ parser.add_argument(
+ "--image", type=str, required=True, help="Path of test image file.")
+ parser.add_argument(
+ "--device",
+ type=str,
+ default='cpu',
+ help="Type of inference device, support 'cpu', 'kunlunxin' or 'gpu'.")
+ parser.add_argument(
+ "--device_id",
+ type=int,
+ default=0,
+ help="Define which GPU card used to run model.")
+ return parser.parse_args()
+
+
+def build_option(args):
+
+ det_option = fd.RuntimeOption()
+
+ if args.device.lower() == "gpu":
+ det_option.use_gpu(args.device_id)
+
+ return det_option
+
+
+args = parse_arguments()
+
+det_model_file = os.path.join(args.det_model, "inference.pdmodel")
+det_params_file = os.path.join(args.det_model, "inference.pdiparams")
+
+# Set the runtime option
+det_option = build_option(args)
+
+# Create the det_model
+det_model = fd.vision.ocr.DBDetector(
+ det_model_file, det_params_file, runtime_option=det_option)
+
+# Set the preporcessing parameters
+det_model.preprocessor.max_side_len = 960
+det_model.postprocessor.det_db_thresh = 0.3
+det_model.postprocessor.det_db_box_thresh = 0.6
+det_model.postprocessor.det_db_unclip_ratio = 1.5
+det_model.postprocessor.det_db_score_mode = "slow"
+det_model.postprocessor.use_dilation = False
+
+# Read the image
+im = cv2.imread(args.image)
+
+# Predict and return the results
+result = det_model.predict(im)
+
+print(result)
+
+# Visualize the results
+vis_im = fd.vision.vis_ppocr(im, result)
+cv2.imwrite("visualized_result.jpg", vis_im)
+print("Visualized result save in ./visualized_result.jpg")
diff --git a/deploy/fastdeploy/cpu-gpu/python/infer_rec.py b/deploy/fastdeploy/cpu-gpu/python/infer_rec.py
new file mode 100755
index 0000000000000000000000000000000000000000..6f9e03b20ec3a3d4e382a7b237564cc496e09c25
--- /dev/null
+++ b/deploy/fastdeploy/cpu-gpu/python/infer_rec.py
@@ -0,0 +1,79 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import fastdeploy as fd
+import cv2
+import os
+
+
+def parse_arguments():
+ import argparse
+ import ast
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--rec_model",
+ required=True,
+ help="Path of Recognization model of PPOCR.")
+ parser.add_argument(
+ "--rec_label_file",
+ required=True,
+ help="Path of Recognization model of PPOCR.")
+ parser.add_argument(
+ "--image", type=str, required=True, help="Path of test image file.")
+ parser.add_argument(
+ "--device",
+ type=str,
+ default='cpu',
+ help="Type of inference device, support 'cpu', 'kunlunxin' or 'gpu'.")
+ parser.add_argument(
+ "--device_id",
+ type=int,
+ default=0,
+ help="Define which GPU card used to run model.")
+ return parser.parse_args()
+
+
+def build_option(args):
+
+ rec_option = fd.RuntimeOption()
+
+ if args.device.lower() == "gpu":
+ rec_option.use_gpu(args.device_id)
+
+ return rec_option
+
+
+args = parse_arguments()
+
+rec_model_file = os.path.join(args.rec_model, "inference.pdmodel")
+rec_params_file = os.path.join(args.rec_model, "inference.pdiparams")
+rec_label_file = args.rec_label_file
+
+# Set the runtime option
+rec_option = build_option(args)
+
+# Create the rec_model
+rec_model = fd.vision.ocr.Recognizer(
+ rec_model_file, rec_params_file, rec_label_file, runtime_option=rec_option)
+
+# Read the image
+im = cv2.imread(args.image)
+
+# Predict and return the result
+result = rec_model.predict(im)
+
+# User can infer a batch of images by following code.
+# result = rec_model.batch_predict([im])
+
+print(result)
diff --git a/deploy/fastdeploy/kunlun/README.md b/deploy/fastdeploy/kunlun/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..16487674c9d503ed5db3c6422938dfe1993074ef
--- /dev/null
+++ b/deploy/fastdeploy/kunlun/README.md
@@ -0,0 +1,32 @@
+[English](README.md) | 简体中文
+
+# PaddleOCR 在昆仑芯上部署方案-FastDeploy
+
+## 1. 说明
+PaddleOCR支持利用FastDeploy在昆仑芯片上部署模型.
+
+支持如下芯片的部署
+- 昆仑 818-100(推理芯片)
+- 昆仑 818-300(训练芯片)
+
+支持如下芯片的设备
+- K100/K200 昆仑 AI 加速卡
+- R200 昆仑芯 AI 加速卡
+
+## 2. 支持的PaddleOCR推理模型
+
+下表中的推理模型为FastDeploy测试过的模型, 下载链接由PaddleOCR模型库提供,
+更多的模型, 详见[PP-OCR系列模型列表](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.6/doc/doc_ch/models_list.md), 欢迎用户尝试.
+
+| PaddleOCR版本 | 文本框检测 | 方向分类模型 | 文字识别 |字典文件| 说明 |
+|:----|:----|:----|:----|:----|:--------|
+| ch_PP-OCRv3[推荐] |[ch_PP-OCRv3_det](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [ch_PP-OCRv3_rec](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar) | [ppocr_keys_v1.txt](https://bj.bcebos.com/paddlehub/fastdeploy/ppocr_keys_v1.txt) | OCRv3系列原始超轻量模型,支持中英文、多语种文本检测 |
+| en_PP-OCRv3[推荐] |[en_PP-OCRv3_det](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [en_PP-OCRv3_rec](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_rec_infer.tar) | [en_dict.txt](https://bj.bcebos.com/paddlehub/fastdeploy/en_dict.txt) | OCRv3系列原始超轻量模型,支持英文与数字识别,除检测模型和识别模型的训练数据与中文模型不同以外,无其他区别 |
+| ch_PP-OCRv2 |[ch_PP-OCRv2_det](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [ch_PP-OCRv2_rec](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar) | [ppocr_keys_v1.txt](https://bj.bcebos.com/paddlehub/fastdeploy/ppocr_keys_v1.txt) | OCRv2系列原始超轻量模型,支持中英文、多语种文本检测 |
+| ch_PP-OCRv2_mobile |[ch_ppocr_mobile_v2.0_det](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [ch_ppocr_mobile_v2.0_rec](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar) | [ppocr_keys_v1.txt](https://bj.bcebos.com/paddlehub/fastdeploy/ppocr_keys_v1.txt) | OCRv2系列原始超轻量模型,支持中英文、多语种文本检测,比PPOCRv2更加轻量 |
+| ch_PP-OCRv2_server |[ch_ppocr_server_v2.0_det](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [ch_ppocr_server_v2.0_rec](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar) |[ppocr_keys_v1.txt](https://bj.bcebos.com/paddlehub/fastdeploy/ppocr_keys_v1.txt) | OCRv2服务器系列模型, 支持中英文、多语种文本检测,比超轻量模型更大,但效果更好|
+
+
+## 3. 详细部署的部署示例
+- [Python部署](python)
+- [C++部署](cpp)
diff --git a/deploy/fastdeploy/kunlun/cpp/CMakeLists.txt b/deploy/fastdeploy/kunlun/cpp/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e9b33c3937c468a2889e087123539e30544443c0
--- /dev/null
+++ b/deploy/fastdeploy/kunlun/cpp/CMakeLists.txt
@@ -0,0 +1,15 @@
+PROJECT(infer_demo C CXX)
+CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
+
+# 指定下载解压后的fastdeploy库路径
+option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
+
+include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
+
+# 添加FastDeploy依赖头文件
+include_directories(${FASTDEPLOY_INCS})
+
+add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
+# 添加FastDeploy库依赖
+target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})
+
diff --git a/deploy/fastdeploy/kunlun/cpp/README.md b/deploy/fastdeploy/kunlun/cpp/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..c7a74aa6e146814c580be52e5d5965fe38fdcfa6
--- /dev/null
+++ b/deploy/fastdeploy/kunlun/cpp/README.md
@@ -0,0 +1,52 @@
+[English](README.md) | 简体中文
+# PP-OCRv3 昆仑芯XPU C++部署示例
+
+本目录下提供`infer.cc`, 供用户完成PP-OCRv3在昆仑芯XPU上的部署.
+
+## 1. 部署环境准备
+在部署前,需自行编译基于昆仑芯XPU的预测库,参考文档[昆仑芯XPU部署环境编译安装](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install#自行编译安装)
+
+## 2.部署模型准备
+在部署前, 请准备好您所需要运行的推理模型, 您可以在[FastDeploy支持的PaddleOCR模型列表](../README.md)中下载所需模型.
+
+## 3.运行部署示例
+```
+# 下载部署示例代码
+git clone https://github.com/PaddlePaddle/PaddleOCR.git
+cd PaddleOCR/deploy/fastdeploy/PP-OCRv3/kunlunxin/cpp
+
+mkdir build
+cd build
+# 使用编译完成的FastDeploy库编译infer_demo
+cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-kunlunxin
+make -j
+
+# 下载PP-OCRv3文字检测模型
+wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar
+tar -xvf ch_PP-OCRv3_det_infer.tar
+# 下载文字方向分类器模型
+wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar
+tar -xvf ch_ppocr_mobile_v2.0_cls_infer.tar
+# 下载PP-OCRv3文字识别模型
+wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar
+tar -xvf ch_PP-OCRv3_rec_infer.tar
+
+# 下载预测图片与字典文件
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/doc/imgs/12.jpg
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_keys_v1.txt
+
+./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg
+
+```
+
+运行完成可视化结果如下图所示
+
+
+
+
+
+## 4. 更多指南
+- [PP-OCR系列 C++ API查阅](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/cpp/html/namespacefastdeploy_1_1vision_1_1ocr.html)
+- [FastDeploy部署PaddleOCR模型概览](../../)
+- [PP-OCRv3 Python部署](../python)
+- 如果用户想要调整前后处理超参数、单独使用文字检测识别模型、使用其他模型等,更多详细文档与说明请参考[PP-OCR系列在CPU/GPU上的部署](../../cpu-gpu/cpp/README.md)
diff --git a/deploy/fastdeploy/kunlun/cpp/infer.cc b/deploy/fastdeploy/kunlun/cpp/infer.cc
new file mode 100644
index 0000000000000000000000000000000000000000..3342b53d16382c683d913d172423f790351ddd3b
--- /dev/null
+++ b/deploy/fastdeploy/kunlun/cpp/infer.cc
@@ -0,0 +1,115 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision.h"
+#ifdef WIN32
+const char sep = '\\';
+#else
+const char sep = '/';
+#endif
+
+void KunlunXinInfer(const std::string &det_model_dir,
+ const std::string &cls_model_dir,
+ const std::string &rec_model_dir,
+ const std::string &rec_label_file,
+ const std::string &image_file) {
+ auto det_model_file = det_model_dir + sep + "inference.pdmodel";
+ auto det_params_file = det_model_dir + sep + "inference.pdiparams";
+
+ auto cls_model_file = cls_model_dir + sep + "inference.pdmodel";
+ auto cls_params_file = cls_model_dir + sep + "inference.pdiparams";
+
+ auto rec_model_file = rec_model_dir + sep + "inference.pdmodel";
+ auto rec_params_file = rec_model_dir + sep + "inference.pdiparams";
+
+ auto option = fastdeploy::RuntimeOption();
+ option.UseKunlunXin();
+
+ auto det_option = option;
+ auto cls_option = option;
+ auto rec_option = option;
+
+ // The cls and rec model can inference a batch of images now.
+ // User could initialize the inference batch size and set them after create
+ // PP-OCR model.
+ int cls_batch_size = 1;
+ int rec_batch_size = 6;
+
+ auto det_model = fastdeploy::vision::ocr::DBDetector(
+ det_model_file, det_params_file, det_option);
+ auto cls_model = fastdeploy::vision::ocr::Classifier(
+ cls_model_file, cls_params_file, cls_option);
+ auto rec_model = fastdeploy::vision::ocr::Recognizer(
+ rec_model_file, rec_params_file, rec_label_file, rec_option);
+
+ assert(det_model.Initialized());
+ assert(cls_model.Initialized());
+ assert(rec_model.Initialized());
+
+ // The classification model is optional, so the PP-OCR can also be connected
+ // in series as follows
+ // auto ppocr_v3 = fastdeploy::pipeline::PPOCRv3(&det_model, &rec_model);
+ auto ppocr_v3 =
+ fastdeploy::pipeline::PPOCRv3(&det_model, &cls_model, &rec_model);
+
+ // Set inference batch size for cls model and rec model, the value could be -1
+ // and 1 to positive infinity.
+ // When inference batch size is set to -1, it means that the inference batch
+ // size
+ // of the cls and rec models will be the same as the number of boxes detected
+ // by the det model.
+ ppocr_v3.SetClsBatchSize(cls_batch_size);
+ ppocr_v3.SetRecBatchSize(rec_batch_size);
+
+ if (!ppocr_v3.Initialized()) {
+ std::cerr << "Failed to initialize PP-OCR." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+ auto im_bak = im.clone();
+
+ fastdeploy::vision::OCRResult result;
+ if (!ppocr_v3.Predict(&im, &result)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+
+ std::cout << result.Str() << std::endl;
+
+ auto vis_im = fastdeploy::vision::VisOcr(im_bak, result);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+ if (argc < 6) {
+ std::cout << "Usage: infer_demo path/to/det_model path/to/cls_model "
+ "path/to/rec_model path/to/rec_label_file path/to/image "
+ "e.g ./infer_demo ./ch_PP-OCRv3_det_infer "
+ "./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer "
+ "./ppocr_keys_v1.txt ./12.jpg"
+ << std::endl;
+ return -1;
+ }
+
+ std::string det_model_dir = argv[1];
+ std::string cls_model_dir = argv[2];
+ std::string rec_model_dir = argv[3];
+ std::string rec_label_file = argv[4];
+ std::string test_image = argv[5];
+ KunlunXinInfer(det_model_dir, cls_model_dir, rec_model_dir, rec_label_file,
+ test_image);
+ return 0;
+}
diff --git a/deploy/fastdeploy/kunlun/python/README.md b/deploy/fastdeploy/kunlun/python/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..4bc86f0e8b55d40418b71ad078c50a72343181b0
--- /dev/null
+++ b/deploy/fastdeploy/kunlun/python/README.md
@@ -0,0 +1,48 @@
+[English](README.md) | 简体中文
+# PP-OCRv3 昆仑芯XPU Python部署示例
+
+本目录下提供`infer.py`, 供用户完成PP-OCRv3在昆仑芯XPU上的部署.
+
+## 1. 部署环境准备
+在部署前,需自行编译基于昆仑XPU的FastDeploy python wheel包并安装,参考文档[昆仑芯XPU部署环境](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install#自行编译安装)
+
+## 2.部署模型准备
+在部署前, 请准备好您所需要运行的推理模型, 您可以在[FastDeploy支持的PaddleOCR模型列表](../README.md)中下载所需模型.
+
+## 3.运行部署示例
+```
+# 下载部署示例代码
+git clone https://github.com/PaddlePaddle/PaddleOCR.git
+cd PaddleOCR/deploy/fastdeploy/PP-OCRv3/kunlunxin/python
+
+# 下载PP-OCRv3文字检测模型
+wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar
+tar -xvf ch_PP-OCRv3_det_infer.tar
+# 下载文字方向分类器模型
+wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar
+tar -xvf ch_ppocr_mobile_v2.0_cls_infer.tar
+# 下载PP-OCRv3文字识别模型
+wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar
+tar -xvf ch_PP-OCRv3_rec_infer.tar
+
+# 下载预测图片与字典文件
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/doc/imgs/12.jpg
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_keys_v1.txt
+
+python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg
+```
+
+运行完成可视化结果如下图所示
+
+
+
+
+
+## 4. 更多指南
+- [PP-OCR系列 Python API查阅](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/python/html/ocr.html)
+- [FastDeploy部署PaddleOCR模型概览](../../)
+- [PP-OCRv3 C++部署](../cpp)
+- 如果用户想要调整前后处理超参数、单独使用文字检测识别模型、使用其他模型等,更多详细文档与说明请参考[PP-OCR系列在CPU/GPU上的部署](../../cpu-gpu/python/README.md)
+
+## 5. 常见问题
+- [如何将视觉模型预测结果转为numpy格式](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/faq/vision_result_related_problems.md)
diff --git a/deploy/fastdeploy/kunlun/python/infer.py b/deploy/fastdeploy/kunlun/python/infer.py
new file mode 100755
index 0000000000000000000000000000000000000000..b4d32f6661dcc644615bf895ec6f66cc769fd19e
--- /dev/null
+++ b/deploy/fastdeploy/kunlun/python/infer.py
@@ -0,0 +1,111 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import fastdeploy as fd
+import cv2
+import os
+
+
+def parse_arguments():
+ import argparse
+ import ast
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--det_model", required=True, help="Path of Detection model of PPOCR.")
+ parser.add_argument(
+ "--cls_model",
+ required=True,
+ help="Path of Classification model of PPOCR.")
+ parser.add_argument(
+ "--rec_model",
+ required=True,
+ help="Path of Recognization model of PPOCR.")
+ parser.add_argument(
+ "--rec_label_file",
+ required=True,
+ help="Path of Recognization model of PPOCR.")
+ parser.add_argument(
+ "--image", type=str, required=True, help="Path of test image file.")
+ parser.add_argument(
+ "--cls_bs",
+ type=int,
+ default=1,
+ help="Classification model inference batch size.")
+ parser.add_argument(
+ "--rec_bs",
+ type=int,
+ default=6,
+ help="Recognition model inference batch size")
+ return parser.parse_args()
+
+
+def build_option(args):
+
+ det_option = fd.RuntimeOption()
+ cls_option = fd.RuntimeOption()
+ rec_option = fd.RuntimeOption()
+
+ det_option.use_kunlunxin()
+ cls_option.use_kunlunxin()
+ rec_option.use_kunlunxin()
+
+ return det_option, cls_option, rec_option
+
+
+args = parse_arguments()
+
+det_model_file = os.path.join(args.det_model, "inference.pdmodel")
+det_params_file = os.path.join(args.det_model, "inference.pdiparams")
+
+cls_model_file = os.path.join(args.cls_model, "inference.pdmodel")
+cls_params_file = os.path.join(args.cls_model, "inference.pdiparams")
+
+rec_model_file = os.path.join(args.rec_model, "inference.pdmodel")
+rec_params_file = os.path.join(args.rec_model, "inference.pdiparams")
+rec_label_file = args.rec_label_file
+
+det_option, cls_option, rec_option = build_option(args)
+
+det_model = fd.vision.ocr.DBDetector(
+ det_model_file, det_params_file, runtime_option=det_option)
+
+cls_model = fd.vision.ocr.Classifier(
+ cls_model_file, cls_params_file, runtime_option=cls_option)
+
+rec_model = fd.vision.ocr.Recognizer(
+ rec_model_file, rec_params_file, rec_label_file, runtime_option=rec_option)
+
+# Create PP-OCRv3, if cls_model is not needed,
+# just set cls_model=None .
+ppocr_v3 = fd.vision.ocr.PPOCRv3(
+ det_model=det_model, cls_model=cls_model, rec_model=rec_model)
+
+# Set inference batch size for cls model and rec model, the value could be -1 and 1 to positive infinity.
+# When inference batch size is set to -1, it means that the inference batch size
+# of the cls and rec models will be the same as the number of boxes detected by the det model.
+ppocr_v3.cls_batch_size = args.cls_bs
+ppocr_v3.rec_batch_size = args.rec_bs
+
+# Prepare image.
+im = cv2.imread(args.image)
+
+# Print the results.
+result = ppocr_v3.predict(im)
+
+print(result)
+
+# Visuliaze the output.
+vis_im = fd.vision.vis_ppocr(im, result)
+cv2.imwrite("visualized_result.jpg", vis_im)
+print("Visualized result save in ./visualized_result.jpg")
diff --git a/deploy/fastdeploy/rockchip/README.md b/deploy/fastdeploy/rockchip/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..b38f7f89631c4903751e879e8d3c434ff0e47051
--- /dev/null
+++ b/deploy/fastdeploy/rockchip/README.md
@@ -0,0 +1,23 @@
+[English](README.md) | 简体中文
+
+# PaddleOCR 模型在RKNPU2上部署方案-FastDeploy
+
+## 1. 说明
+PaddleOCR支持通过FastDeploy在RKNPU2上部署相关模型.
+
+## 2. 支持模型列表
+
+下表中的模型下载链接由PaddleOCR模型库提供, 详见[PP-OCR系列模型列表](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.6/doc/doc_ch/models_list.md)
+
+| PaddleOCR版本 | 文本框检测 | 方向分类模型 | 文字识别 |字典文件| 说明 |
+|:----|:----|:----|:----|:----|:--------|
+| ch_PP-OCRv3[推荐] |[ch_PP-OCRv3_det](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [ch_PP-OCRv3_rec](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar) | [ppocr_keys_v1.txt](https://bj.bcebos.com/paddlehub/fastdeploy/ppocr_keys_v1.txt) | OCRv3系列原始超轻量模型,支持中英文、多语种文本检测 |
+| en_PP-OCRv3[推荐] |[en_PP-OCRv3_det](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [en_PP-OCRv3_rec](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_rec_infer.tar) | [en_dict.txt](https://bj.bcebos.com/paddlehub/fastdeploy/en_dict.txt) | OCRv3系列原始超轻量模型,支持英文与数字识别,除检测模型和识别模型的训练数据与中文模型不同以外,无其他区别 |
+| ch_PP-OCRv2 |[ch_PP-OCRv2_det](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [ch_PP-OCRv2_rec](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar) | [ppocr_keys_v1.txt](https://bj.bcebos.com/paddlehub/fastdeploy/ppocr_keys_v1.txt) | OCRv2系列原始超轻量模型,支持中英文、多语种文本检测 |
+| ch_PP-OCRv2_mobile |[ch_ppocr_mobile_v2.0_det](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [ch_ppocr_mobile_v2.0_rec](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar) | [ppocr_keys_v1.txt](https://bj.bcebos.com/paddlehub/fastdeploy/ppocr_keys_v1.txt) | OCRv2系列原始超轻量模型,支持中英文、多语种文本检测,比PPOCRv2更加轻量 |
+| ch_PP-OCRv2_server |[ch_ppocr_server_v2.0_det](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [ch_ppocr_server_v2.0_rec](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar) |[ppocr_keys_v1.txt](https://bj.bcebos.com/paddlehub/fastdeploy/ppocr_keys_v1.txt) | OCRv2服务器系列模型, 支持中英文、多语种文本检测,比超轻量模型更大,但效果更好|
+
+
+## 3. 详细部署的部署示例
+- [Python部署](python)
+- [C++部署](cpp)
diff --git a/deploy/fastdeploy/rockchip/cpp/CMakeLists.txt b/deploy/fastdeploy/rockchip/cpp/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..93540a7e83e05228bcb38042a91166c858c95137
--- /dev/null
+++ b/deploy/fastdeploy/rockchip/cpp/CMakeLists.txt
@@ -0,0 +1,14 @@
+PROJECT(infer_demo C CXX)
+CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
+
+# 指定下载解压后的fastdeploy库路径
+option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
+
+include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
+
+# 添加FastDeploy依赖头文件
+include_directories(${FASTDEPLOY_INCS})
+
+add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
+# 添加FastDeploy库依赖
+target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})
diff --git a/deploy/fastdeploy/rockchip/cpp/README.md b/deploy/fastdeploy/rockchip/cpp/README.md
new file mode 100755
index 0000000000000000000000000000000000000000..de2d6355ab9d02ab2e4f8b2a3a1798d16f94aa2b
--- /dev/null
+++ b/deploy/fastdeploy/rockchip/cpp/README.md
@@ -0,0 +1,122 @@
+[English](README_CN.md) | 简体中文
+# PP-OCRv3 RKNPU2 C++部署示例
+
+本目录下提供`infer.cc`, 供用户完成PP-OCRv3在RKNPU2的部署.
+
+
+## 1. 部署环境准备
+在部署前,需确认以下两个步骤
+- 1. 在部署前,需自行编译基于RKNPU2的预测库,参考文档[RKNPU2部署环境编译](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install#自行编译安装)
+- 2. 同时请用户参考[FastDeploy RKNPU2资源导航](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install/rknpu2.md)
+
+## 2.部署模型准备
+在部署前, 请准备好您所需要运行的推理模型, 您可以在[FastDeploy支持的PaddleOCR模型列表](../README.md)中下载所需模型.
+同时, 在RKNPU2上部署PP-OCR系列模型时,我们需要把Paddle的推理模型转为RKNN模型.
+由于rknn_toolkit2工具暂不支持直接从Paddle直接转换为RKNN模型,因此我们需要先将Paddle推理模型转为ONNX模型, 最后转为RKNN模型, 示例如下.
+
+```bash
+# 下载PP-OCRv3文字检测模型
+wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar
+tar -xvf ch_PP-OCRv3_det_infer.tar
+# 下载文字方向分类器模型
+wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar
+tar -xvf ch_ppocr_mobile_v2.0_cls_infer.tar
+# 下载PP-OCRv3文字识别模型
+wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar
+tar -xvf ch_PP-OCRv3_rec_infer.tar
+
+# 请用户自行安装最新发布版本的paddle2onnx, 转换模型到ONNX格式的模型
+paddle2onnx --model_dir ch_PP-OCRv3_det_infer \
+ --model_filename inference.pdmodel \
+ --params_filename inference.pdiparams \
+ --save_file ch_PP-OCRv3_det_infer/ch_PP-OCRv3_det_infer.onnx \
+ --enable_dev_version True
+paddle2onnx --model_dir ch_ppocr_mobile_v2.0_cls_infer \
+ --model_filename inference.pdmodel \
+ --params_filename inference.pdiparams \
+ --save_file ch_ppocr_mobile_v2.0_cls_infer/ch_ppocr_mobile_v2.0_cls_infer.onnx \
+ --enable_dev_version True
+paddle2onnx --model_dir ch_PP-OCRv3_rec_infer \
+ --model_filename inference.pdmodel \
+ --params_filename inference.pdiparams \
+ --save_file ch_PP-OCRv3_rec_infer/ch_PP-OCRv3_rec_infer.onnx \
+ --enable_dev_version True
+
+# 固定模型的输入shape
+python -m paddle2onnx.optimize --input_model ch_PP-OCRv3_det_infer/ch_PP-OCRv3_det_infer.onnx \
+ --output_model ch_PP-OCRv3_det_infer/ch_PP-OCRv3_det_infer.onnx \
+ --input_shape_dict "{'x':[1,3,960,960]}"
+python -m paddle2onnx.optimize --input_model ch_ppocr_mobile_v2.0_cls_infer/ch_ppocr_mobile_v2.0_cls_infer.onnx \
+ --output_model ch_ppocr_mobile_v2.0_cls_infer/ch_ppocr_mobile_v2.0_cls_infer.onnx \
+ --input_shape_dict "{'x':[1,3,48,192]}"
+python -m paddle2onnx.optimize --input_model ch_PP-OCRv3_rec_infer/ch_PP-OCRv3_rec_infer.onnx \
+ --output_model ch_PP-OCRv3_rec_infer/ch_PP-OCRv3_rec_infer.onnx \
+ --input_shape_dict "{'x':[1,3,48,320]}"
+
+# 在rockchip/rknpu2_tools/目录下, 我们为用户提供了转换ONNX模型到RKNN模型的工具
+python rockchip/rknpu2_tools/export.py --config_path tools/rknpu2/config/ppocrv3_det.yaml \
+ --target_platform rk3588
+python rockchip/rknpu2_tools/export.py --config_path tools/rknpu2/config/ppocrv3_rec.yaml \
+ --target_platform rk3588
+python rockchip/rknpu2_tools/export.py --config_path tools/rknpu2/config/ppocrv3_cls.yaml \
+ --target_platform rk3588
+```
+
+## 3.运行部署示例
+在本目录执行如下命令即可完成编译测试,支持此模型需保证FastDeploy版本1.0.3以上(x.x.x>1.0.3), RKNN版本在1.4.1b22以上。
+
+```
+# 下载部署示例代码
+git clone https://github.com/PaddlePaddle/PaddleOCR.git
+cd PaddleOCR/deploy/fastdeploy/PP-OCRv3/rockchip/cpp
+
+mkdir build
+cd build
+# 使用编译完成的FastDeploy库编译infer_demo
+cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-rockchip
+make -j
+
+# 下载图片和字典文件
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/doc/imgs/12.jpg
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_keys_v1.txt
+
+# 拷贝RKNN模型到build目录
+
+# CPU推理
+./infer_demo ./ch_PP-OCRv3_det_infer/ch_PP-OCRv3_det_infer.onnx \
+ ./ch_ppocr_mobile_v2.0_cls_infer/ch_ppocr_mobile_v2.0_cls_infer.onnx \
+ ./ch_PP-OCRv3_rec_infer/ch_PP-OCRv3_rec_infer.onnx \
+ ./ppocr_keys_v1.txt \
+ ./12.jpg \
+ 0
+# RKNPU推理
+./infer_demo ./ch_PP-OCRv3_det_infer/ch_PP-OCRv3_det_infer_rk3588_unquantized.rknn \
+ ./ch_ppocr_mobile_v2.0_cls_infer/ch_ppocr_mobile_v20_cls_infer_rk3588_unquantized.rknn \
+ ./ch_PP-OCRv3_rec_infer/ch_PP-OCRv3_rec_infer_rk3588_unquantized.rknn \
+ ./ppocr_keys_v1.txt \
+ ./12.jpg \
+ 1
+```
+
+运行完成可视化结果如下图所示:
+
+
+
+结果输出如下:
+
+```text
+det boxes: [[276,174],[285,173],[285,178],[276,179]]rec text: rec score:0.000000 cls label: 1 cls score: 0.766602
+det boxes: [[43,408],[483,390],[483,431],[44,449]]rec text: 上海斯格威铂尔曼大酒店 rec score:0.888450 cls label: 0 cls score: 1.000000
+det boxes: [[186,456],[399,448],[399,480],[186,488]]rec text: 打浦路15号 rec score:0.988769 cls label: 0 cls score: 1.000000
+det boxes: [[18,501],[513,485],[514,537],[18,554]]rec text: 绿洲仕格维花园公寓 rec score:0.992730 cls label: 0 cls score: 1.000000
+det boxes: [[78,553],[404,541],[404,573],[78,585]]rec text: 打浦路252935号 rec score:0.983545 cls label: 0 cls score: 1.000000
+Visualized result saved in ./vis_result.jpg
+```
+
+## 4. 更多指南
+
+- [PP-OCR系列 C++ API查阅](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/cpp/html/namespacefastdeploy_1_1vision_1_1ocr.html)
+- [FastDeploy部署PaddleOCR模型概览](../../)
+- [PP-OCRv3 Python部署](../python)
+- [FastDeploy RKNPU2资源导航](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install/rknpu2.md)
+- 如果用户想要调整前后处理超参数、单独使用文字检测识别模型、使用其他模型等,更多详细文档与说明请参考[PP-OCR系列在CPU/GPU上的部署](../../cpu-gpu/cpp/README.md)
diff --git a/deploy/fastdeploy/rockchip/cpp/infer.cc b/deploy/fastdeploy/rockchip/cpp/infer.cc
new file mode 100644
index 0000000000000000000000000000000000000000..7add35688a0f3d2c6f18b3c848be79f6d02db431
--- /dev/null
+++ b/deploy/fastdeploy/rockchip/cpp/infer.cc
@@ -0,0 +1,126 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision.h"
+
+void InitAndInfer(const std::string &det_model_file,
+ const std::string &cls_model_file,
+ const std::string &rec_model_file,
+ const std::string &rec_label_file,
+ const std::string &image_file,
+ const fastdeploy::RuntimeOption &option,
+ const fastdeploy::ModelFormat &format) {
+ auto det_params_file = "";
+ auto cls_params_file = "";
+ auto rec_params_file = "";
+
+ auto det_option = option;
+ auto cls_option = option;
+ auto rec_option = option;
+
+ if (format == fastdeploy::ONNX) {
+ std::cout << "ONNX Model" << std::endl;
+ }
+
+ auto det_model = fastdeploy::vision::ocr::DBDetector(
+ det_model_file, det_params_file, det_option, format);
+ auto cls_model = fastdeploy::vision::ocr::Classifier(
+ cls_model_file, cls_params_file, cls_option, format);
+ auto rec_model = fastdeploy::vision::ocr::Recognizer(
+ rec_model_file, rec_params_file, rec_label_file, rec_option, format);
+
+ if (format == fastdeploy::RKNN) {
+ cls_model.GetPreprocessor().DisableNormalize();
+ cls_model.GetPreprocessor().DisablePermute();
+
+ det_model.GetPreprocessor().DisableNormalize();
+ det_model.GetPreprocessor().DisablePermute();
+
+ rec_model.GetPreprocessor().DisableNormalize();
+ rec_model.GetPreprocessor().DisablePermute();
+ }
+ det_model.GetPreprocessor().SetStaticShapeInfer(true);
+ rec_model.GetPreprocessor().SetStaticShapeInfer(true);
+
+ assert(det_model.Initialized());
+ assert(cls_model.Initialized());
+ assert(rec_model.Initialized());
+
+ // The classification model is optional, so the PP-OCR can also be connected
+ // in series as follows auto ppocr_v3 =
+ // fastdeploy::pipeline::PPOCRv3(&det_model, &rec_model);
+ auto ppocr_v3 =
+ fastdeploy::pipeline::PPOCRv3(&det_model, &cls_model, &rec_model);
+
+ // When users enable static shape infer for rec model, the batch size of cls
+ // and rec model must to be set to 1.
+ ppocr_v3.SetClsBatchSize(1);
+ ppocr_v3.SetRecBatchSize(1);
+
+ if (!ppocr_v3.Initialized()) {
+ std::cerr << "Failed to initialize PP-OCR." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+
+ fastdeploy::vision::OCRResult result;
+ if (!ppocr_v3.Predict(im, &result)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+
+ std::cout << result.Str() << std::endl;
+
+ auto vis_im = fastdeploy::vision::VisOcr(im, result);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+ if (argc < 7) {
+ std::cout << "Usage: infer_demo path/to/det_model path/to/cls_model "
+ "path/to/rec_model path/to/rec_label_file path/to/image "
+ "run_option, "
+ "e.g ./infer_demo ./ch_PP-OCRv3_det_infer "
+ "./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer "
+ "./ppocr_keys_v1.txt ./12.jpg 0"
+ << std::endl;
+ std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
+ "with ascend."
+ << std::endl;
+ return -1;
+ }
+
+ fastdeploy::RuntimeOption option;
+ fastdeploy::ModelFormat format;
+ int flag = std::atoi(argv[6]);
+
+ if (flag == 0) {
+ option.UseCpu();
+ format = fastdeploy::ONNX;
+ } else if (flag == 1) {
+ option.UseRKNPU2();
+ format = fastdeploy::RKNN;
+ }
+
+ std::string det_model_dir = argv[1];
+ std::string cls_model_dir = argv[2];
+ std::string rec_model_dir = argv[3];
+ std::string rec_label_file = argv[4];
+ std::string test_image = argv[5];
+ InitAndInfer(det_model_dir, cls_model_dir, rec_model_dir, rec_label_file,
+ test_image, option, format);
+ return 0;
+}
diff --git a/deploy/fastdeploy/rockchip/python/README.md b/deploy/fastdeploy/rockchip/python/README.md
new file mode 100755
index 0000000000000000000000000000000000000000..cc0396337f6f8d91646ce082c5c61be9da27b436
--- /dev/null
+++ b/deploy/fastdeploy/rockchip/python/README.md
@@ -0,0 +1,105 @@
+[English](README_CN.md) | 简体中文
+# PP-OCRv3 RKNPU2 Python部署示例
+本目录下提供`infer.py`, 供用户完成PP-OCRv3在RKNPU2的部署.
+
+
+## 1. 部署环境准备
+在部署前,需确认以下两个步骤
+- 1. 在部署前,需自行编译基于RKNPU2的Python预测库,参考文档[RKNPU2部署环境编译](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install#自行编译安装)
+- 2. 同时请用户参考[FastDeploy RKNPU2资源导航](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install/rknpu2.md)
+
+## 2.部署模型准备
+在部署前, 请准备好您所需要运行的推理模型, 您可以在[FastDeploy支持的PaddleOCR模型列表](../README.md)中下载所需模型.
+同时, 在RKNPU2上部署PP-OCR系列模型时,我们需要把Paddle的推理模型转为RKNN模型.
+由于rknn_toolkit2工具暂不支持直接从Paddle直接转换为RKNN模型,因此我们需要先将Paddle推理模型转为ONNX模型, 最后转为RKNN模型, 示例如下.
+
+```bash
+# 下载PP-OCRv3文字检测模型
+wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar
+tar -xvf ch_PP-OCRv3_det_infer.tar
+# 下载文字方向分类器模型
+wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar
+tar -xvf ch_ppocr_mobile_v2.0_cls_infer.tar
+# 下载PP-OCRv3文字识别模型
+wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar
+tar -xvf ch_PP-OCRv3_rec_infer.tar
+
+# 请用户自行安装最新发布版本的paddle2onnx, 转换模型到ONNX格式的模型
+paddle2onnx --model_dir ch_PP-OCRv3_det_infer \
+ --model_filename inference.pdmodel \
+ --params_filename inference.pdiparams \
+ --save_file ch_PP-OCRv3_det_infer/ch_PP-OCRv3_det_infer.onnx \
+ --enable_dev_version True
+paddle2onnx --model_dir ch_ppocr_mobile_v2.0_cls_infer \
+ --model_filename inference.pdmodel \
+ --params_filename inference.pdiparams \
+ --save_file ch_ppocr_mobile_v2.0_cls_infer/ch_ppocr_mobile_v2.0_cls_infer.onnx \
+ --enable_dev_version True
+paddle2onnx --model_dir ch_PP-OCRv3_rec_infer \
+ --model_filename inference.pdmodel \
+ --params_filename inference.pdiparams \
+ --save_file ch_PP-OCRv3_rec_infer/ch_PP-OCRv3_rec_infer.onnx \
+ --enable_dev_version True
+
+# 固定模型的输入shape
+python -m paddle2onnx.optimize --input_model ch_PP-OCRv3_det_infer/ch_PP-OCRv3_det_infer.onnx \
+ --output_model ch_PP-OCRv3_det_infer/ch_PP-OCRv3_det_infer.onnx \
+ --input_shape_dict "{'x':[1,3,960,960]}"
+python -m paddle2onnx.optimize --input_model ch_ppocr_mobile_v2.0_cls_infer/ch_ppocr_mobile_v2.0_cls_infer.onnx \
+ --output_model ch_ppocr_mobile_v2.0_cls_infer/ch_ppocr_mobile_v2.0_cls_infer.onnx \
+ --input_shape_dict "{'x':[1,3,48,192]}"
+python -m paddle2onnx.optimize --input_model ch_PP-OCRv3_rec_infer/ch_PP-OCRv3_rec_infer.onnx \
+ --output_model ch_PP-OCRv3_rec_infer/ch_PP-OCRv3_rec_infer.onnx \
+ --input_shape_dict "{'x':[1,3,48,320]}"
+
+# 在rockchip/rknpu2_tools/目录下, 我们为用户提供了转换ONNX模型到RKNN模型的工具
+python rockchip/rknpu2_tools/export.py --config_path tools/rknpu2/config/ppocrv3_det.yaml \
+ --target_platform rk3588
+python rockchip/rknpu2_tools/export.py --config_path tools/rknpu2/config/ppocrv3_rec.yaml \
+ --target_platform rk3588
+python rockchip/rknpu2_tools/export.py --config_path tools/rknpu2/config/ppocrv3_cls.yaml \
+ --target_platform rk3588
+```
+
+
+## 3.运行部署示例
+在本目录执行如下命令即可完成编译测试,支持此模型需保证FastDeploy版本1.0.3以上(x.x.x>1.0.3), RKNN版本在1.4.1b22以上。
+
+```
+# 下载图片和字典文件
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/doc/imgs/12.jpg
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_keys_v1.txt
+
+# 下载部署示例代码
+git clone https://github.com/PaddlePaddle/PaddleOCR.git
+cd PaddleOCR/deploy/fastdeploy/PP-OCRv3/rockchip/python
+
+
+# CPU推理
+python3 infer.py \
+ --det_model ./ch_PP-OCRv3_det_infer/ch_PP-OCRv3_det_infer.onnx \
+ --cls_model ./ch_ppocr_mobile_v2.0_cls_infer/ch_ppocr_mobile_v2.0_cls_infer.onnx \
+ --rec_model ./ch_PP-OCRv3_rec_infer/ch_PP-OCRv3_rec_infer.onnx \
+ --rec_label_file ./ppocr_keys_v1.txt \
+ --image 12.jpg \
+ --device cpu
+
+# NPU推理
+python3 infer.py \
+ --det_model ./ch_PP-OCRv3_det_infer/ch_PP-OCRv3_det_infer_rk3588_unquantized.rknn \
+ --cls_model ./ch_ppocr_mobile_v2.0_cls_infer/ch_ppocr_mobile_v20_cls_infer_rk3588_unquantized.rknn \
+ --rec_model ./ch_PP-OCRv3_rec_infer/ch_PP-OCRv3_rec_infer_rk3588_unquantized.rknn \
+ --rec_label_file ppocr_keys_v1.txt \
+ --image 12.jpg \
+ --device npu
+```
+
+运行完成可视化结果如下图所示
+
+
+## 4. 更多指南
+- [PP-OCR系列 Python API查阅](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/python/html/ocr.html)
+- [FastDeploy部署PaddleOCR模型概览](../../)
+- [PP-OCRv3 C++部署](../cpp)
+- [FastDeploy RKNPU2资源导航](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install/rknpu2.md)
+- 如果用户想要调整前后处理超参数、单独使用文字检测识别模型、使用其他模型等,更多详细文档与说明请参考[PP-OCR系列在CPU/GPU上的部署](../../cpu-gpu/python/README.md)
diff --git a/deploy/fastdeploy/rockchip/python/infer.py b/deploy/fastdeploy/rockchip/python/infer.py
new file mode 100755
index 0000000000000000000000000000000000000000..7aa1382179b41c4ec3d1e634a75645fccd346256
--- /dev/null
+++ b/deploy/fastdeploy/rockchip/python/infer.py
@@ -0,0 +1,144 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import fastdeploy as fd
+import cv2
+import os
+
+
+def parse_arguments():
+ import argparse
+ import ast
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--det_model", required=True, help="Path of Detection model of PPOCR.")
+ parser.add_argument(
+ "--cls_model",
+ required=True,
+ help="Path of Classification model of PPOCR.")
+ parser.add_argument(
+ "--rec_model",
+ required=True,
+ help="Path of Recognization model of PPOCR.")
+ parser.add_argument(
+ "--rec_label_file",
+ required=True,
+ help="Path of Recognization model of PPOCR.")
+ parser.add_argument(
+ "--image", type=str, required=True, help="Path of test image file.")
+ parser.add_argument(
+ "--device",
+ type=str,
+ default='cpu',
+ help="Type of inference device, support 'cpu', 'kunlunxin' or 'gpu'.")
+ parser.add_argument(
+ "--cpu_thread_num",
+ type=int,
+ default=9,
+ help="Number of threads while inference on CPU.")
+ return parser.parse_args()
+
+
+def build_option(args):
+
+ det_option = fd.RuntimeOption()
+ cls_option = fd.RuntimeOption()
+ rec_option = fd.RuntimeOption()
+ if args.device == "npu":
+ det_option.use_rknpu2()
+ cls_option.use_rknpu2()
+ rec_option.use_rknpu2()
+
+ return det_option, cls_option, rec_option
+
+
+def build_format(args):
+ det_format = fd.ModelFormat.ONNX
+ cls_format = fd.ModelFormat.ONNX
+ rec_format = fd.ModelFormat.ONNX
+ if args.device == "npu":
+ det_format = fd.ModelFormat.RKNN
+ cls_format = fd.ModelFormat.RKNN
+ rec_format = fd.ModelFormat.RKNN
+
+ return det_format, cls_format, rec_format
+
+
+args = parse_arguments()
+
+# Detection模型, 检测文字框
+det_model_file = args.det_model
+det_params_file = ""
+# Classification模型,方向分类,可选
+cls_model_file = args.cls_model
+cls_params_file = ""
+# Recognition模型,文字识别模型
+rec_model_file = args.rec_model
+rec_params_file = ""
+rec_label_file = args.rec_label_file
+
+det_option, cls_option, rec_option = build_option(args)
+det_format, cls_format, rec_format = build_format(args)
+
+det_model = fd.vision.ocr.DBDetector(
+ det_model_file,
+ det_params_file,
+ runtime_option=det_option,
+ model_format=det_format)
+
+cls_model = fd.vision.ocr.Classifier(
+ cls_model_file,
+ cls_params_file,
+ runtime_option=cls_option,
+ model_format=cls_format)
+
+rec_model = fd.vision.ocr.Recognizer(
+ rec_model_file,
+ rec_params_file,
+ rec_label_file,
+ runtime_option=rec_option,
+ model_format=rec_format)
+
+# Det,Rec模型启用静态shape推理
+det_model.preprocessor.static_shape_infer = True
+rec_model.preprocessor.static_shape_infer = True
+
+if args.device == "npu":
+ det_model.preprocessor.disable_normalize()
+ det_model.preprocessor.disable_permute()
+ cls_model.preprocessor.disable_normalize()
+ cls_model.preprocessor.disable_permute()
+ rec_model.preprocessor.disable_normalize()
+ rec_model.preprocessor.disable_permute()
+
+# 创建PP-OCR,串联3个模型,其中cls_model可选,如无需求,可设置为None
+ppocr_v3 = fd.vision.ocr.PPOCRv3(
+ det_model=det_model, cls_model=cls_model, rec_model=rec_model)
+
+# Cls模型和Rec模型的batch size 必须设置为1, 开启静态shape推理
+ppocr_v3.cls_batch_size = 1
+ppocr_v3.rec_batch_size = 1
+
+# 预测图片准备
+im = cv2.imread(args.image)
+
+#预测并打印结果
+result = ppocr_v3.predict(im)
+
+print(result)
+
+# 可视化结果
+vis_im = fd.vision.vis_ppocr(im, result)
+cv2.imwrite("visualized_result.jpg", vis_im)
+print("Visualized result save in ./visualized_result.jpg")
diff --git a/deploy/fastdeploy/rockchip/rknpu2_tools/config/ppocrv3_cls.yaml b/deploy/fastdeploy/rockchip/rknpu2_tools/config/ppocrv3_cls.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..197becc2f25dd40e1b7cb1b7bebeb8527401c355
--- /dev/null
+++ b/deploy/fastdeploy/rockchip/rknpu2_tools/config/ppocrv3_cls.yaml
@@ -0,0 +1,15 @@
+mean:
+ -
+ - 127.5
+ - 127.5
+ - 127.5
+std:
+ -
+ - 127.5
+ - 127.5
+ - 127.5
+model_path: ./ch_ppocr_mobile_v2.0_cls_infer/ch_ppocr_mobile_v2.0_cls_infer.onnx
+outputs_nodes:
+do_quantization: False
+dataset:
+output_folder: "./ch_ppocr_mobile_v2.0_cls_infer"
diff --git a/deploy/fastdeploy/rockchip/rknpu2_tools/config/ppocrv3_det.yaml b/deploy/fastdeploy/rockchip/rknpu2_tools/config/ppocrv3_det.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2897c5f74b2c92713b2d936794e5242a6ff48514
--- /dev/null
+++ b/deploy/fastdeploy/rockchip/rknpu2_tools/config/ppocrv3_det.yaml
@@ -0,0 +1,15 @@
+mean:
+ -
+ - 123.675
+ - 116.28
+ - 103.53
+std:
+ -
+ - 58.395
+ - 57.12
+ - 57.375
+model_path: ./ch_PP-OCRv3_det_infer/ch_PP-OCRv3_det_infer.onnx
+outputs_nodes:
+do_quantization: False
+dataset:
+output_folder: "./ch_PP-OCRv3_det_infer"
diff --git a/deploy/fastdeploy/rockchip/rknpu2_tools/config/ppocrv3_rec.yaml b/deploy/fastdeploy/rockchip/rknpu2_tools/config/ppocrv3_rec.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8a22a39a2eee1b24f6fe1d99e71bf3d4b82195e8
--- /dev/null
+++ b/deploy/fastdeploy/rockchip/rknpu2_tools/config/ppocrv3_rec.yaml
@@ -0,0 +1,15 @@
+mean:
+ -
+ - 127.5
+ - 127.5
+ - 127.5
+std:
+ -
+ - 127.5
+ - 127.5
+ - 127.5
+model_path: ./ch_PP-OCRv3_rec_infer/ch_PP-OCRv3_rec_infer.onnx
+outputs_nodes:
+do_quantization: False
+dataset:
+output_folder: "./ch_PP-OCRv3_rec_infer"
diff --git a/deploy/fastdeploy/rockchip/rknpu2_tools/export.py b/deploy/fastdeploy/rockchip/rknpu2_tools/export.py
new file mode 100644
index 0000000000000000000000000000000000000000..a94b348859cc87999c3944e53884dea5d11638af
--- /dev/null
+++ b/deploy/fastdeploy/rockchip/rknpu2_tools/export.py
@@ -0,0 +1,80 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+import yaml
+import argparse
+from rknn.api import RKNN
+
+
+def get_config():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--verbose", default=True, help="rknntoolkit verbose")
+ parser.add_argument("--config_path")
+ parser.add_argument("--target_platform")
+ args = parser.parse_args()
+ return args
+
+
+if __name__ == "__main__":
+ config = get_config()
+ with open(config.config_path) as file:
+ file_data = file.read()
+ yaml_config = yaml.safe_load(file_data)
+ print(yaml_config)
+ model = RKNN(config.verbose)
+
+ # Config
+ mean_values = yaml_config["mean"]
+ std_values = yaml_config["std"]
+ model.config(
+ mean_values=mean_values,
+ std_values=std_values,
+ target_platform=config.target_platform)
+
+ # Load ONNX model
+ if yaml_config["outputs_nodes"] is None:
+ ret = model.load_onnx(model=yaml_config["model_path"])
+ else:
+ ret = model.load_onnx(
+ model=yaml_config["model_path"],
+ outputs=yaml_config["outputs_nodes"])
+ assert ret == 0, "Load model failed!"
+
+ # Build model
+ ret = model.build(
+ do_quantization=yaml_config["do_quantization"],
+ dataset=yaml_config["dataset"])
+ assert ret == 0, "Build model failed!"
+
+ # Init Runtime
+ ret = model.init_runtime()
+ assert ret == 0, "Init runtime environment failed!"
+
+ # Export
+ if not os.path.exists(yaml_config["output_folder"]):
+ os.mkdir(yaml_config["output_folder"])
+
+ name_list = os.path.basename(yaml_config["model_path"]).split(".")
+ model_base_name = ""
+ for name in name_list[0:-1]:
+ model_base_name += name
+ model_device_name = config.target_platform.lower()
+ if yaml_config["do_quantization"]:
+ model_save_name = model_base_name + "_" + model_device_name + "_quantized" + ".rknn"
+ else:
+ model_save_name = model_base_name + "_" + model_device_name + "_unquantized" + ".rknn"
+ ret = model.export_rknn(
+ os.path.join(yaml_config["output_folder"], model_save_name))
+ assert ret == 0, "Export rknn model failed!"
+ print("Export OK!")
diff --git a/deploy/fastdeploy/serving/README.md b/deploy/fastdeploy/serving/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1d52fec45e52a253139bf61d5d555cd3f474722b
--- /dev/null
+++ b/deploy/fastdeploy/serving/README.md
@@ -0,0 +1,24 @@
+[English](README.md) | 简体中文
+# PaddleOCR 使用 FastDeploy 服务化部署PP-OCR系列模型
+## 1. FastDeploy 服务化部署介绍
+在线推理作为企业或个人线上部署模型的最后一环,是工业界必不可少的环节,其中最重要的就是服务化推理框架。FastDeploy 目前提供两种服务化部署方式:simple_serving和fastdeploy_serving
+- simple_serving:适用于只需要通过http等调用AI推理任务,没有高并发需求的场景。simple_serving基于Flask框架具有简单高效的特点,可以快速验证线上部署模型的可行性
+- fastdeploy_serving:适用于高并发、高吞吐量请求的场景。基于Triton Inference Server框架,是一套可用于实际生产的完备且性能卓越的服务化部署框架
+
+## 2. 支持的PaddleOCR推理模型
+
+下表中的推理模型为FastDeploy测试过的模型, 下载链接由PaddleOCR模型库提供,
+更多的模型, 详见[PP-OCR系列模型列表](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.6/doc/doc_ch/models_list.md), 欢迎用户尝试.
+
+| PaddleOCR版本 | 文本框检测 | 方向分类模型 | 文字识别 |字典文件| 说明 |
+|:----|:----|:----|:----|:----|:--------|
+| ch_PP-OCRv3[推荐] |[ch_PP-OCRv3_det](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [ch_PP-OCRv3_rec](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar) | [ppocr_keys_v1.txt](https://bj.bcebos.com/paddlehub/fastdeploy/ppocr_keys_v1.txt) | OCRv3系列原始超轻量模型,支持中英文、多语种文本检测 |
+| en_PP-OCRv3[推荐] |[en_PP-OCRv3_det](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [en_PP-OCRv3_rec](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_rec_infer.tar) | [en_dict.txt](https://bj.bcebos.com/paddlehub/fastdeploy/en_dict.txt) | OCRv3系列原始超轻量模型,支持英文与数字识别,除检测模型和识别模型的训练数据与中文模型不同以外,无其他区别 |
+| ch_PP-OCRv2 |[ch_PP-OCRv2_det](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [ch_PP-OCRv2_rec](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar) | [ppocr_keys_v1.txt](https://bj.bcebos.com/paddlehub/fastdeploy/ppocr_keys_v1.txt) | OCRv2系列原始超轻量模型,支持中英文、多语种文本检测 |
+| ch_PP-OCRv2_mobile |[ch_ppocr_mobile_v2.0_det](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [ch_ppocr_mobile_v2.0_rec](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar) | [ppocr_keys_v1.txt](https://bj.bcebos.com/paddlehub/fastdeploy/ppocr_keys_v1.txt) | OCRv2系列原始超轻量模型,支持中英文、多语种文本检测,比PPOCRv2更加轻量 |
+| ch_PP-OCRv2_server |[ch_ppocr_server_v2.0_det](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [ch_ppocr_server_v2.0_rec](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar) |[ppocr_keys_v1.txt](https://bj.bcebos.com/paddlehub/fastdeploy/ppocr_keys_v1.txt) | OCRv2服务器系列模型, 支持中英文、多语种文本检测,比超轻量模型更大,但效果更好|
+
+## 3. 详细的部署示例
+
+- [fastdeploy serving](fastdeploy_serving)
+- [simple serving](simple_serving)
diff --git a/deploy/fastdeploy/serving/fastdeploy_serving/README.md b/deploy/fastdeploy/serving/fastdeploy_serving/README.md
new file mode 100755
index 0000000000000000000000000000000000000000..5dbfff12d4448807cdfe42f60b493f1e98de129e
--- /dev/null
+++ b/deploy/fastdeploy/serving/fastdeploy_serving/README.md
@@ -0,0 +1,120 @@
+[English](README.md) | 简体中文
+# PaddleOCR服务化部署示例
+
+PaddleOCR 服务化部署示例是利用FastDeploy Serving搭建的服务化部署示例。FastDeploy Serving是基于Triton Inference Server框架封装的适用于高并发、高吞吐量请求的服务化部署框架,是一套可用于实际生产的完备且性能卓越的服务化部署框架。如没有高并发,高吞吐场景的需求,只想快速检验模型线上部署的可行性,请参考[simple_serving](../simple_serving/)
+
+## 1. 部署环境准备
+在服务化部署前,需确认服务化镜像的软硬件环境要求和镜像拉取命令,请参考[FastDeploy服务化部署](https://github.com/PaddlePaddle/FastDeploy/blob/develop/serving/README_CN.md)
+
+## 2. PP-OCRv3服务化部署介绍
+本文介绍了使用FastDeploy搭建PP-OCRv3模型服务的方法.
+服务端必须在docker内启动,而客户端不是必须在docker容器内.
+
+**本文所在路径($PWD)下的models里包含模型的配置和代码(服务端会加载模型和代码以启动服务), 需要将其映射到docker中使用.**
+
+PP-OCRv3由det(检测)、cls(分类)和rec(识别)三个模型组成.
+
+服务化部署串联的示意图如下图所示,其中`pp_ocr`串联了`det_preprocess`、`det_runtime`和`det_postprocess`,`cls_pp`串联了`cls_runtime`和`cls_postprocess`,`rec_pp`串联了`rec_runtime`和`rec_postprocess`.
+
+特别的是,在`det_postprocess`中会多次调用`cls_pp`和`rec_pp`服务,来实现对检测结果(多个框)进行分类和识别,,最后返回给用户最终的识别结果。
+
+
+
+
+
+
+
+
+## 3. 服务端的使用
+
+### 3.1 下载模型并使用服务化Docker
+```bash
+# 下载仓库代码
+git clone https://github.com/PaddlePaddle/PaddleOCR.git
+cd PaddleOCR/deploy/fastdeploy/serving/fastdeploy_serving
+
+# 下载模型,图片和字典文件
+wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar
+tar xvf ch_PP-OCRv3_det_infer.tar && mv ch_PP-OCRv3_det_infer 1
+mv 1/inference.pdiparams 1/model.pdiparams && mv 1/inference.pdmodel 1/model.pdmodel
+mv 1 models/det_runtime/ && rm -rf ch_PP-OCRv3_det_infer.tar
+
+wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar
+tar xvf ch_ppocr_mobile_v2.0_cls_infer.tar && mv ch_ppocr_mobile_v2.0_cls_infer 1
+mv 1/inference.pdiparams 1/model.pdiparams && mv 1/inference.pdmodel 1/model.pdmodel
+mv 1 models/cls_runtime/ && rm -rf ch_ppocr_mobile_v2.0_cls_infer.tar
+
+wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar
+tar xvf ch_PP-OCRv3_rec_infer.tar && mv ch_PP-OCRv3_rec_infer 1
+mv 1/inference.pdiparams 1/model.pdiparams && mv 1/inference.pdmodel 1/model.pdmodel
+mv 1 models/rec_runtime/ && rm -rf ch_PP-OCRv3_rec_infer.tar
+
+mkdir models/pp_ocr/1 && mkdir models/rec_pp/1 && mkdir models/cls_pp/1
+
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_keys_v1.txt
+mv ppocr_keys_v1.txt models/rec_postprocess/1/
+
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/doc/imgs/12.jpg
+
+# x.y.z为镜像版本号,需参照serving文档替换为数字
+docker pull registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10
+docker run -dit --net=host --name fastdeploy --shm-size="1g" -v $PWD:/ocr_serving registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10 bash
+docker exec -it -u root fastdeploy bash
+```
+
+### 3.2 安装(在docker内)
+```bash
+ldconfig
+apt-get install libgl1
+```
+
+#### 3.3 启动服务端(在docker内)
+```bash
+fastdeployserver --model-repository=/ocr_serving/models
+```
+
+参数:
+ - `model-repository`(required): 整套模型streaming_pp_tts存放的路径.
+ - `http-port`(optional): HTTP服务的端口号. 默认: `8000`. 本示例中未使用该端口.
+ - `grpc-port`(optional): GRPC服务的端口号. 默认: `8001`.
+ - `metrics-port`(optional): 服务端指标的端口号. 默认: `8002`. 本示例中未使用该端口.
+
+
+## 4. 客户端的使用
+### 4.1 安装
+```bash
+pip3 install tritonclient[all]
+```
+
+### 4.2 发送请求
+```bash
+python3 client.py
+```
+
+## 5.配置修改
+当前默认配置在GPU上运行, 如果要在CPU或其他推理引擎上运行。 需要修改`models/runtime/config.pbtxt`中配置,详情请参考[配置文档](../../../../../serving/docs/zh_CN/model_configuration.md)
+
+## 6. 其他指南
+
+- 使用PP-OCRv2进行服务化部署, 除了自行准备PP-OCRv2模型之外, 只需手动添加一行代码即可.
+在[model.py](./models/det_postprocess/1/model.py#L109)文件**109行添加以下代码**:
+```
+self.rec_preprocessor.cls_image_shape[1] = 32
+```
+
+- [使用 VisualDL 进行 Serving 可视化部署](https://github.com/PaddlePaddle/FastDeploy/blob/develop/serving/docs/zh_CN/vdl_management.md)
+通过VisualDL的可视化界面对PP-OCRv3进行服务化部署只需要如下三步:
+```text
+1. 载入模型库:./vision/ocr/PP-OCRv3/serving
+2. 下载模型资源文件:点击det_runtime模型,点击版本号1添加预训练模型,选择文字识别模型ch_PP-OCRv3_det进行下载。点击cls_runtime模型,点击版本号1添加预训练模型,选择文字识别模型ch_ppocr_mobile_v2.0_cls进行下载。点击rec_runtime模型,点击版本号1添加预训练模型,选择文字识别模型ch_PP-OCRv3_rec进行下载。点击rec_postprocess模型,点击版本号1添加预训练模型,选择文字识别模型ch_PP-OCRv3_rec进行下载。
+3. 启动服务:点击启动服务按钮,输入启动参数。
+```
+
+
+
+
+## 7. 常见问题
+- [如何编写客户端 HTTP/GRPC 请求](https://github.com/PaddlePaddle/FastDeploy/blob/develop/serving/docs/zh_CN/client.md)
+- [如何编译服务化部署镜像](https://github.com/PaddlePaddle/FastDeploy/blob/develop/serving/docs/zh_CN/compile.md)
+- [服务化部署原理及动态Batch介绍](https://github.com/PaddlePaddle/FastDeploy/blob/develop/serving/docs/zh_CN/demo.md)
+- [模型仓库介绍](https://github.com/PaddlePaddle/FastDeploy/blob/develop/serving/docs/zh_CN/model_repository.md)
diff --git a/deploy/fastdeploy/serving/fastdeploy_serving/client.py b/deploy/fastdeploy/serving/fastdeploy_serving/client.py
new file mode 100755
index 0000000000000000000000000000000000000000..6b758c5e39ac0fada03e4e7a561e4a4d0192c6e0
--- /dev/null
+++ b/deploy/fastdeploy/serving/fastdeploy_serving/client.py
@@ -0,0 +1,109 @@
+import logging
+import numpy as np
+import time
+from typing import Optional
+import cv2
+import json
+
+from tritonclient import utils as client_utils
+from tritonclient.grpc import InferenceServerClient, InferInput, InferRequestedOutput, service_pb2_grpc, service_pb2
+
+LOGGER = logging.getLogger("run_inference_on_triton")
+
+
+class SyncGRPCTritonRunner:
+ DEFAULT_MAX_RESP_WAIT_S = 120
+
+ def __init__(
+ self,
+ server_url: str,
+ model_name: str,
+ model_version: str,
+ *,
+ verbose=False,
+ resp_wait_s: Optional[float]=None, ):
+ self._server_url = server_url
+ self._model_name = model_name
+ self._model_version = model_version
+ self._verbose = verbose
+ self._response_wait_t = self.DEFAULT_MAX_RESP_WAIT_S if resp_wait_s is None else resp_wait_s
+
+ self._client = InferenceServerClient(
+ self._server_url, verbose=self._verbose)
+ error = self._verify_triton_state(self._client)
+ if error:
+ raise RuntimeError(
+ f"Could not communicate to Triton Server: {error}")
+
+ LOGGER.debug(
+ f"Triton server {self._server_url} and model {self._model_name}:{self._model_version} "
+ f"are up and ready!")
+
+ model_config = self._client.get_model_config(self._model_name,
+ self._model_version)
+ model_metadata = self._client.get_model_metadata(self._model_name,
+ self._model_version)
+ LOGGER.info(f"Model config {model_config}")
+ LOGGER.info(f"Model metadata {model_metadata}")
+
+ self._inputs = {tm.name: tm for tm in model_metadata.inputs}
+ self._input_names = list(self._inputs)
+ self._outputs = {tm.name: tm for tm in model_metadata.outputs}
+ self._output_names = list(self._outputs)
+ self._outputs_req = [
+ InferRequestedOutput(name) for name in self._outputs
+ ]
+
+ def Run(self, inputs):
+ """
+ Args:
+ inputs: list, Each value corresponds to an input name of self._input_names
+ Returns:
+ results: dict, {name : numpy.array}
+ """
+ infer_inputs = []
+ for idx, data in enumerate(inputs):
+ infer_input = InferInput(self._input_names[idx], data.shape,
+ "UINT8")
+ infer_input.set_data_from_numpy(data)
+ infer_inputs.append(infer_input)
+
+ results = self._client.infer(
+ model_name=self._model_name,
+ model_version=self._model_version,
+ inputs=infer_inputs,
+ outputs=self._outputs_req,
+ client_timeout=self._response_wait_t, )
+ results = {name: results.as_numpy(name) for name in self._output_names}
+ return results
+
+ def _verify_triton_state(self, triton_client):
+ if not triton_client.is_server_live():
+ return f"Triton server {self._server_url} is not live"
+ elif not triton_client.is_server_ready():
+ return f"Triton server {self._server_url} is not ready"
+ elif not triton_client.is_model_ready(self._model_name,
+ self._model_version):
+ return f"Model {self._model_name}:{self._model_version} is not ready"
+ return None
+
+
+if __name__ == "__main__":
+ model_name = "pp_ocr"
+ model_version = "1"
+ url = "localhost:8001"
+ runner = SyncGRPCTritonRunner(url, model_name, model_version)
+ im = cv2.imread("12.jpg")
+ im = np.array([im, ])
+ for i in range(1):
+ result = runner.Run([im, ])
+ batch_texts = result['rec_texts']
+ batch_scores = result['rec_scores']
+ batch_bboxes = result['det_bboxes']
+ for i_batch in range(len(batch_texts)):
+ texts = batch_texts[i_batch]
+ scores = batch_scores[i_batch]
+ bboxes = batch_bboxes[i_batch]
+ for i_box in range(len(texts)):
+ print('text=', texts[i_box].decode('utf-8'), ' score=',
+ scores[i_box], ' bbox=', bboxes[i_box])
diff --git a/deploy/fastdeploy/serving/fastdeploy_serving/models/cls_postprocess/1/model.py b/deploy/fastdeploy/serving/fastdeploy_serving/models/cls_postprocess/1/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..891db5f24b8f117c6d499e258dd5e16ee7a7f356
--- /dev/null
+++ b/deploy/fastdeploy/serving/fastdeploy_serving/models/cls_postprocess/1/model.py
@@ -0,0 +1,105 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import numpy as np
+import time
+
+import fastdeploy as fd
+
+# triton_python_backend_utils is available in every Triton Python model. You
+# need to use this module to create inference requests and responses. It also
+# contains some utility functions for extracting information from model_config
+# and converting Triton input/output types to numpy types.
+import triton_python_backend_utils as pb_utils
+
+
+class TritonPythonModel:
+ """Your Python model must use the same class name. Every Python model
+ that is created must have "TritonPythonModel" as the class name.
+ """
+
+ def initialize(self, args):
+ """`initialize` is called only once when the model is being loaded.
+ Implementing `initialize` function is optional. This function allows
+ the model to intialize any state associated with this model.
+ Parameters
+ ----------
+ args : dict
+ Both keys and values are strings. The dictionary keys and values are:
+ * model_config: A JSON string containing the model configuration
+ * model_instance_kind: A string containing model instance kind
+ * model_instance_device_id: A string containing model instance device ID
+ * model_repository: Model repository path
+ * model_version: Model version
+ * model_name: Model name
+ """
+ # You must parse model_config. JSON string is not parsed here
+ self.model_config = json.loads(args['model_config'])
+ print("model_config:", self.model_config)
+
+ self.input_names = []
+ for input_config in self.model_config["input"]:
+ self.input_names.append(input_config["name"])
+ print("postprocess input names:", self.input_names)
+
+ self.output_names = []
+ self.output_dtype = []
+ for output_config in self.model_config["output"]:
+ self.output_names.append(output_config["name"])
+ dtype = pb_utils.triton_string_to_numpy(output_config["data_type"])
+ self.output_dtype.append(dtype)
+ print("postprocess output names:", self.output_names)
+ self.postprocessor = fd.vision.ocr.ClassifierPostprocessor()
+
+ def execute(self, requests):
+ """`execute` must be implemented in every Python model. `execute`
+ function receives a list of pb_utils.InferenceRequest as the only
+ argument. This function is called when an inference is requested
+ for this model. Depending on the batching configuration (e.g. Dynamic
+ Batching) used, `requests` may contain multiple requests. Every
+ Python model, must create one pb_utils.InferenceResponse for every
+ pb_utils.InferenceRequest in `requests`. If there is an error, you can
+ set the error argument when creating a pb_utils.InferenceResponse.
+ Parameters
+ ----------
+ requests : list
+ A list of pb_utils.InferenceRequest
+ Returns
+ -------
+ list
+ A list of pb_utils.InferenceResponse. The length of this list must
+ be the same as `requests`
+ """
+ responses = []
+ for request in requests:
+ infer_outputs = pb_utils.get_input_tensor_by_name(
+ request, self.input_names[0])
+ infer_outputs = infer_outputs.as_numpy()
+ results = self.postprocessor.run([infer_outputs])
+ out_tensor_0 = pb_utils.Tensor(self.output_names[0],
+ np.array(results[0]))
+ out_tensor_1 = pb_utils.Tensor(self.output_names[1],
+ np.array(results[1]))
+ inference_response = pb_utils.InferenceResponse(
+ output_tensors=[out_tensor_0, out_tensor_1])
+ responses.append(inference_response)
+ return responses
+
+ def finalize(self):
+ """`finalize` is called only once when the model is being unloaded.
+ Implementing `finalize` function is optional. This function allows
+ the model to perform any necessary clean ups before exit.
+ """
+ print('Cleaning up...')
diff --git a/deploy/fastdeploy/serving/fastdeploy_serving/models/cls_postprocess/config.pbtxt b/deploy/fastdeploy/serving/fastdeploy_serving/models/cls_postprocess/config.pbtxt
new file mode 100644
index 0000000000000000000000000000000000000000..18ab2facc6389217da7b16fc91804b1a52b0ce30
--- /dev/null
+++ b/deploy/fastdeploy/serving/fastdeploy_serving/models/cls_postprocess/config.pbtxt
@@ -0,0 +1,30 @@
+name: "cls_postprocess"
+backend: "python"
+max_batch_size: 128
+input [
+ {
+ name: "POST_INPUT_0"
+ data_type: TYPE_FP32
+ dims: [ 2 ]
+ }
+]
+
+output [
+ {
+ name: "POST_OUTPUT_0"
+ data_type: TYPE_INT32
+ dims: [ 1 ]
+ },
+ {
+ name: "POST_OUTPUT_1"
+ data_type: TYPE_FP32
+ dims: [ 1 ]
+ }
+]
+
+instance_group [
+ {
+ count: 1
+ kind: KIND_CPU
+ }
+]
diff --git a/deploy/fastdeploy/serving/fastdeploy_serving/models/cls_pp/config.pbtxt b/deploy/fastdeploy/serving/fastdeploy_serving/models/cls_pp/config.pbtxt
new file mode 100644
index 0000000000000000000000000000000000000000..068b1e7d87954eb66b59b99a74b7693a98060e33
--- /dev/null
+++ b/deploy/fastdeploy/serving/fastdeploy_serving/models/cls_pp/config.pbtxt
@@ -0,0 +1,54 @@
+name: "cls_pp"
+platform: "ensemble"
+max_batch_size: 128
+input [
+ {
+ name: "x"
+ data_type: TYPE_FP32
+ dims: [ 3, -1, -1 ]
+ }
+]
+output [
+ {
+ name: "cls_labels"
+ data_type: TYPE_INT32
+ dims: [ 1 ]
+ },
+ {
+ name: "cls_scores"
+ data_type: TYPE_FP32
+ dims: [ 1 ]
+ }
+]
+ensemble_scheduling {
+ step [
+ {
+ model_name: "cls_runtime"
+ model_version: 1
+ input_map {
+ key: "x"
+ value: "x"
+ }
+ output_map {
+ key: "softmax_0.tmp_0"
+ value: "infer_output"
+ }
+ },
+ {
+ model_name: "cls_postprocess"
+ model_version: 1
+ input_map {
+ key: "POST_INPUT_0"
+ value: "infer_output"
+ }
+ output_map {
+ key: "POST_OUTPUT_0"
+ value: "cls_labels"
+ }
+ output_map {
+ key: "POST_OUTPUT_1"
+ value: "cls_scores"
+ }
+ }
+ ]
+}
diff --git a/deploy/fastdeploy/serving/fastdeploy_serving/models/cls_runtime/config.pbtxt b/deploy/fastdeploy/serving/fastdeploy_serving/models/cls_runtime/config.pbtxt
new file mode 100755
index 0000000000000000000000000000000000000000..eb7b2550366a9c69cc90e002d5390eee99e31abb
--- /dev/null
+++ b/deploy/fastdeploy/serving/fastdeploy_serving/models/cls_runtime/config.pbtxt
@@ -0,0 +1,52 @@
+# optional, If name is specified it must match the name of the model repository directory containing the model.
+name: "cls_runtime"
+backend: "fastdeploy"
+max_batch_size: 128
+
+# Input configuration of the model
+input [
+ {
+ # input name
+ name: "x"
+ # input type such as TYPE_FP32、TYPE_UINT8、TYPE_INT8、TYPE_INT16、TYPE_INT32、TYPE_INT64、TYPE_FP16、TYPE_STRING
+ data_type: TYPE_FP32
+ # input shape, The batch dimension is omitted and the actual shape is [batch, c, h, w]
+ dims: [ 3, -1, -1 ]
+ }
+]
+
+# The output of the model is configured in the same format as the input
+output [
+ {
+ name: "softmax_0.tmp_0"
+ data_type: TYPE_FP32
+ dims: [ 2 ]
+ }
+]
+
+# Number of instances of the model
+instance_group [
+ {
+ # The number of instances is 1
+ count: 1
+ # Use GPU, CPU inference option is:KIND_CPU
+ kind: KIND_GPU
+ # The instance is deployed on the 0th GPU card
+ gpus: [0]
+ }
+]
+
+optimization {
+ execution_accelerators {
+ # GPU推理配置, 配合KIND_GPU使用
+ gpu_execution_accelerator : [
+ {
+ name : "paddle"
+ # 设置推理并行计算线程数为4
+ parameters { key: "cpu_threads" value: "4" }
+ # 开启mkldnn加速,设置为0关闭mkldnn
+ parameters { key: "use_mkldnn" value: "1" }
+ }
+ ]
+ }
+}
diff --git a/deploy/fastdeploy/serving/fastdeploy_serving/models/det_postprocess/1/model.py b/deploy/fastdeploy/serving/fastdeploy_serving/models/det_postprocess/1/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..87115c2d949762adfe3796487e93bc6e94483a60
--- /dev/null
+++ b/deploy/fastdeploy/serving/fastdeploy_serving/models/det_postprocess/1/model.py
@@ -0,0 +1,238 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import numpy as np
+import time
+import math
+import cv2
+import fastdeploy as fd
+
+# triton_python_backend_utils is available in every Triton Python model. You
+# need to use this module to create inference requests and responses. It also
+# contains some utility functions for extracting information from model_config
+# and converting Triton input/output types to numpy types.
+import triton_python_backend_utils as pb_utils
+
+
+def get_rotate_crop_image(img, box):
+ '''
+ img_height, img_width = img.shape[0:2]
+ left = int(np.min(points[:, 0]))
+ right = int(np.max(points[:, 0]))
+ top = int(np.min(points[:, 1]))
+ bottom = int(np.max(points[:, 1]))
+ img_crop = img[top:bottom, left:right, :].copy()
+ points[:, 0] = points[:, 0] - left
+ points[:, 1] = points[:, 1] - top
+ '''
+ points = []
+ for i in range(4):
+ points.append([box[2 * i], box[2 * i + 1]])
+ points = np.array(points, dtype=np.float32)
+ img = img.astype(np.float32)
+ assert len(points) == 4, "shape of points must be 4*2"
+ img_crop_width = int(
+ max(
+ np.linalg.norm(points[0] - points[1]),
+ np.linalg.norm(points[2] - points[3])))
+ img_crop_height = int(
+ max(
+ np.linalg.norm(points[0] - points[3]),
+ np.linalg.norm(points[1] - points[2])))
+ pts_std = np.float32([[0, 0], [img_crop_width, 0],
+ [img_crop_width, img_crop_height],
+ [0, img_crop_height]])
+ M = cv2.getPerspectiveTransform(points, pts_std)
+ dst_img = cv2.warpPerspective(
+ img,
+ M, (img_crop_width, img_crop_height),
+ borderMode=cv2.BORDER_REPLICATE,
+ flags=cv2.INTER_CUBIC)
+ dst_img_height, dst_img_width = dst_img.shape[0:2]
+ if dst_img_height * 1.0 / dst_img_width >= 1.5:
+ dst_img = np.rot90(dst_img)
+ return dst_img
+
+
+class TritonPythonModel:
+ """Your Python model must use the same class name. Every Python model
+ that is created must have "TritonPythonModel" as the class name.
+ """
+
+ def initialize(self, args):
+ """`initialize` is called only once when the model is being loaded.
+ Implementing `initialize` function is optional. This function allows
+ the model to intialize any state associated with this model.
+ Parameters
+ ----------
+ args : dict
+ Both keys and values are strings. The dictionary keys and values are:
+ * model_config: A JSON string containing the model configuration
+ * model_instance_kind: A string containing model instance kind
+ * model_instance_device_id: A string containing model instance device ID
+ * model_repository: Model repository path
+ * model_version: Model version
+ * model_name: Model name
+ """
+ # You must parse model_config. JSON string is not parsed here
+ self.model_config = json.loads(args['model_config'])
+ print("model_config:", self.model_config)
+
+ self.input_names = []
+ for input_config in self.model_config["input"]:
+ self.input_names.append(input_config["name"])
+ print("postprocess input names:", self.input_names)
+
+ self.output_names = []
+ self.output_dtype = []
+ for output_config in self.model_config["output"]:
+ self.output_names.append(output_config["name"])
+ dtype = pb_utils.triton_string_to_numpy(output_config["data_type"])
+ self.output_dtype.append(dtype)
+ print("postprocess output names:", self.output_names)
+ self.postprocessor = fd.vision.ocr.DBDetectorPostprocessor()
+ self.cls_preprocessor = fd.vision.ocr.ClassifierPreprocessor()
+ self.rec_preprocessor = fd.vision.ocr.RecognizerPreprocessor()
+ self.cls_threshold = 0.9
+
+ def execute(self, requests):
+ """`execute` must be implemented in every Python model. `execute`
+ function receives a list of pb_utils.InferenceRequest as the only
+ argument. This function is called when an inference is requested
+ for this model. Depending on the batching configuration (e.g. Dynamic
+ Batching) used, `requests` may contain multiple requests. Every
+ Python model, must create one pb_utils.InferenceResponse for every
+ pb_utils.InferenceRequest in `requests`. If there is an error, you can
+ set the error argument when creating a pb_utils.InferenceResponse.
+ Parameters
+ ----------
+ requests : list
+ A list of pb_utils.InferenceRequest
+ Returns
+ -------
+ list
+ A list of pb_utils.InferenceResponse. The length of this list must
+ be the same as `requests`
+ """
+ responses = []
+ for request in requests:
+ infer_outputs = pb_utils.get_input_tensor_by_name(
+ request, self.input_names[0])
+ im_infos = pb_utils.get_input_tensor_by_name(request,
+ self.input_names[1])
+ ori_imgs = pb_utils.get_input_tensor_by_name(request,
+ self.input_names[2])
+
+ infer_outputs = infer_outputs.as_numpy()
+ im_infos = im_infos.as_numpy()
+ ori_imgs = ori_imgs.as_numpy()
+
+ results = self.postprocessor.run([infer_outputs], im_infos)
+ batch_rec_texts = []
+ batch_rec_scores = []
+ batch_box_list = []
+ for i_batch in range(len(results)):
+
+ cls_labels = []
+ cls_scores = []
+ rec_texts = []
+ rec_scores = []
+
+ box_list = fd.vision.ocr.sort_boxes(results[i_batch])
+ image_list = []
+ if len(box_list) == 0:
+ image_list.append(ori_imgs[i_batch])
+ else:
+ for box in box_list:
+ crop_img = get_rotate_crop_image(ori_imgs[i_batch], box)
+ image_list.append(crop_img)
+
+ batch_box_list.append(box_list)
+
+ cls_pre_tensors = self.cls_preprocessor.run(image_list)
+ cls_dlpack_tensor = cls_pre_tensors[0].to_dlpack()
+ cls_input_tensor = pb_utils.Tensor.from_dlpack(
+ "x", cls_dlpack_tensor)
+
+ inference_request = pb_utils.InferenceRequest(
+ model_name='cls_pp',
+ requested_output_names=['cls_labels', 'cls_scores'],
+ inputs=[cls_input_tensor])
+ inference_response = inference_request.exec()
+ if inference_response.has_error():
+ raise pb_utils.TritonModelException(
+ inference_response.error().message())
+ else:
+ # Extract the output tensors from the inference response.
+ cls_labels = pb_utils.get_output_tensor_by_name(
+ inference_response, 'cls_labels')
+ cls_labels = cls_labels.as_numpy()
+
+ cls_scores = pb_utils.get_output_tensor_by_name(
+ inference_response, 'cls_scores')
+ cls_scores = cls_scores.as_numpy()
+
+ for index in range(len(image_list)):
+ if cls_labels[index] == 1 and cls_scores[
+ index] > self.cls_threshold:
+ image_list[index] = cv2.rotate(
+ image_list[index].astype(np.float32), 1)
+ image_list[index] = np.astype(np.uint8)
+
+ rec_pre_tensors = self.rec_preprocessor.run(image_list)
+ rec_dlpack_tensor = rec_pre_tensors[0].to_dlpack()
+ rec_input_tensor = pb_utils.Tensor.from_dlpack(
+ "x", rec_dlpack_tensor)
+
+ inference_request = pb_utils.InferenceRequest(
+ model_name='rec_pp',
+ requested_output_names=['rec_texts', 'rec_scores'],
+ inputs=[rec_input_tensor])
+ inference_response = inference_request.exec()
+ if inference_response.has_error():
+ raise pb_utils.TritonModelException(
+ inference_response.error().message())
+ else:
+ # Extract the output tensors from the inference response.
+ rec_texts = pb_utils.get_output_tensor_by_name(
+ inference_response, 'rec_texts')
+ rec_texts = rec_texts.as_numpy()
+
+ rec_scores = pb_utils.get_output_tensor_by_name(
+ inference_response, 'rec_scores')
+ rec_scores = rec_scores.as_numpy()
+
+ batch_rec_texts.append(rec_texts)
+ batch_rec_scores.append(rec_scores)
+
+ out_tensor_0 = pb_utils.Tensor(
+ self.output_names[0],
+ np.array(
+ batch_rec_texts, dtype=np.object_))
+ out_tensor_1 = pb_utils.Tensor(self.output_names[1],
+ np.array(batch_rec_scores))
+ out_tensor_2 = pb_utils.Tensor(self.output_names[2],
+ np.array(batch_box_list))
+ inference_response = pb_utils.InferenceResponse(
+ output_tensors=[out_tensor_0, out_tensor_1, out_tensor_2])
+ responses.append(inference_response)
+ return responses
+
+ def finalize(self):
+ """`finalize` is called only once when the model is being unloaded.
+ Implementing `finalize` function is optional. This function allows
+ the model to perform any necessary clean ups before exit.
+ """
+ print('Cleaning up...')
diff --git a/deploy/fastdeploy/serving/fastdeploy_serving/models/det_postprocess/config.pbtxt b/deploy/fastdeploy/serving/fastdeploy_serving/models/det_postprocess/config.pbtxt
new file mode 100644
index 0000000000000000000000000000000000000000..378b7bab64f76a71163177f071f776b104c00df3
--- /dev/null
+++ b/deploy/fastdeploy/serving/fastdeploy_serving/models/det_postprocess/config.pbtxt
@@ -0,0 +1,45 @@
+name: "det_postprocess"
+backend: "python"
+max_batch_size: 128
+input [
+ {
+ name: "POST_INPUT_0"
+ data_type: TYPE_FP32
+ dims: [ 1, -1, -1]
+ },
+ {
+ name: "POST_INPUT_1"
+ data_type: TYPE_INT32
+ dims: [ 4 ]
+ },
+ {
+ name: "ORI_IMG"
+ data_type: TYPE_UINT8
+ dims: [ -1, -1, 3 ]
+ }
+]
+
+output [
+ {
+ name: "POST_OUTPUT_0"
+ data_type: TYPE_STRING
+ dims: [ -1, 1 ]
+ },
+ {
+ name: "POST_OUTPUT_1"
+ data_type: TYPE_FP32
+ dims: [ -1, 1 ]
+ },
+ {
+ name: "POST_OUTPUT_2"
+ data_type: TYPE_FP32
+ dims: [ -1, -1, 1 ]
+ }
+]
+
+instance_group [
+ {
+ count: 1
+ kind: KIND_CPU
+ }
+]
diff --git a/deploy/fastdeploy/serving/fastdeploy_serving/models/det_preprocess/1/model.py b/deploy/fastdeploy/serving/fastdeploy_serving/models/det_preprocess/1/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..28e838da5b6394b7ae14d0ad5f99bded996b14d8
--- /dev/null
+++ b/deploy/fastdeploy/serving/fastdeploy_serving/models/det_preprocess/1/model.py
@@ -0,0 +1,107 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import numpy as np
+import time
+
+import fastdeploy as fd
+
+# triton_python_backend_utils is available in every Triton Python model. You
+# need to use this module to create inference requests and responses. It also
+# contains some utility functions for extracting information from model_config
+# and converting Triton input/output types to numpy types.
+import triton_python_backend_utils as pb_utils
+
+
+class TritonPythonModel:
+ """Your Python model must use the same class name. Every Python model
+ that is created must have "TritonPythonModel" as the class name.
+ """
+
+ def initialize(self, args):
+ """`initialize` is called only once when the model is being loaded.
+ Implementing `initialize` function is optional. This function allows
+ the model to intialize any state associated with this model.
+ Parameters
+ ----------
+ args : dict
+ Both keys and values are strings. The dictionary keys and values are:
+ * model_config: A JSON string containing the model configuration
+ * model_instance_kind: A string containing model instance kind
+ * model_instance_device_id: A string containing model instance device ID
+ * model_repository: Model repository path
+ * model_version: Model version
+ * model_name: Model name
+ """
+ # You must parse model_config. JSON string is not parsed here
+ self.model_config = json.loads(args['model_config'])
+ print("model_config:", self.model_config)
+
+ self.input_names = []
+ for input_config in self.model_config["input"]:
+ self.input_names.append(input_config["name"])
+ print("preprocess input names:", self.input_names)
+
+ self.output_names = []
+ self.output_dtype = []
+ for output_config in self.model_config["output"]:
+ self.output_names.append(output_config["name"])
+ dtype = pb_utils.triton_string_to_numpy(output_config["data_type"])
+ self.output_dtype.append(dtype)
+ print("preprocess output names:", self.output_names)
+ self.preprocessor = fd.vision.ocr.DBDetectorPreprocessor()
+
+ def execute(self, requests):
+ """`execute` must be implemented in every Python model. `execute`
+ function receives a list of pb_utils.InferenceRequest as the only
+ argument. This function is called when an inference is requested
+ for this model. Depending on the batching configuration (e.g. Dynamic
+ Batching) used, `requests` may contain multiple requests. Every
+ Python model, must create one pb_utils.InferenceResponse for every
+ pb_utils.InferenceRequest in `requests`. If there is an error, you can
+ set the error argument when creating a pb_utils.InferenceResponse.
+ Parameters
+ ----------
+ requests : list
+ A list of pb_utils.InferenceRequest
+ Returns
+ -------
+ list
+ A list of pb_utils.InferenceResponse. The length of this list must
+ be the same as `requests`
+ """
+ responses = []
+ for request in requests:
+ data = pb_utils.get_input_tensor_by_name(request,
+ self.input_names[0])
+ data = data.as_numpy()
+ outputs, im_infos = self.preprocessor.run(data)
+ dlpack_tensor = outputs[0].to_dlpack()
+ output_tensor_0 = pb_utils.Tensor.from_dlpack(self.output_names[0],
+ dlpack_tensor)
+ output_tensor_1 = pb_utils.Tensor(
+ self.output_names[1], np.array(
+ im_infos, dtype=np.int32))
+ inference_response = pb_utils.InferenceResponse(
+ output_tensors=[output_tensor_0, output_tensor_1])
+ responses.append(inference_response)
+ return responses
+
+ def finalize(self):
+ """`finalize` is called only once when the model is being unloaded.
+ Implementing `finalize` function is optional. This function allows
+ the model to perform any necessary clean ups before exit.
+ """
+ print('Cleaning up...')
diff --git a/deploy/fastdeploy/serving/fastdeploy_serving/models/det_preprocess/config.pbtxt b/deploy/fastdeploy/serving/fastdeploy_serving/models/det_preprocess/config.pbtxt
new file mode 100644
index 0000000000000000000000000000000000000000..93aa1d062ebb440429e588f0cefe9bb6235a2932
--- /dev/null
+++ b/deploy/fastdeploy/serving/fastdeploy_serving/models/det_preprocess/config.pbtxt
@@ -0,0 +1,37 @@
+# optional, If name is specified it must match the name of the model repository directory containing the model.
+name: "det_preprocess"
+backend: "python"
+max_batch_size: 1
+
+# Input configuration of the model
+input [
+ {
+ # input name
+ name: "INPUT_0"
+ # input type such as TYPE_FP32、TYPE_UINT8、TYPE_INT8、TYPE_INT16、TYPE_INT32、TYPE_INT64、TYPE_FP16、TYPE_STRING
+ data_type: TYPE_UINT8
+ # input shape, The batch dimension is omitted and the actual shape is [batch, c, h, w]
+ dims: [ -1, -1, 3 ]
+ }
+]
+
+# The output of the model is configured in the same format as the input
+output [
+ {
+ name: "OUTPUT_0"
+ data_type: TYPE_FP32
+ dims: [ 3, -1, -1 ]
+ },
+ {
+ name: "OUTPUT_1"
+ data_type: TYPE_INT32
+ dims: [ 4 ]
+ }
+]
+
+instance_group [
+ {
+ count: 1
+ kind: KIND_CPU
+ }
+]
diff --git a/deploy/fastdeploy/serving/fastdeploy_serving/models/det_runtime/config.pbtxt b/deploy/fastdeploy/serving/fastdeploy_serving/models/det_runtime/config.pbtxt
new file mode 100755
index 0000000000000000000000000000000000000000..96d85e3e1941293b049242b1c2b1cf207bb108bc
--- /dev/null
+++ b/deploy/fastdeploy/serving/fastdeploy_serving/models/det_runtime/config.pbtxt
@@ -0,0 +1,52 @@
+# optional, If name is specified it must match the name of the model repository directory containing the model.
+name: "det_runtime"
+backend: "fastdeploy"
+max_batch_size: 1
+
+# Input configuration of the model
+input [
+ {
+ # input name
+ name: "x"
+ # input type such as TYPE_FP32、TYPE_UINT8、TYPE_INT8、TYPE_INT16、TYPE_INT32、TYPE_INT64、TYPE_FP16、TYPE_STRING
+ data_type: TYPE_FP32
+ # input shape, The batch dimension is omitted and the actual shape is [batch, c, h, w]
+ dims: [ 3, -1, -1 ]
+ }
+]
+
+# The output of the model is configured in the same format as the input
+output [
+ {
+ name: "sigmoid_0.tmp_0"
+ data_type: TYPE_FP32
+ dims: [ 1, -1, -1 ]
+ }
+]
+
+# Number of instances of the model
+instance_group [
+ {
+ # The number of instances is 1
+ count: 1
+ # Use GPU, CPU inference option is:KIND_CPU
+ kind: KIND_GPU
+ # The instance is deployed on the 0th GPU card
+ gpus: [0]
+ }
+]
+
+optimization {
+ execution_accelerators {
+ # GPU推理配置, 配合KIND_GPU使用
+ gpu_execution_accelerator : [
+ {
+ name : "paddle"
+ # 设置推理并行计算线程数为4
+ parameters { key: "cpu_threads" value: "4" }
+ # 开启mkldnn加速,设置为0关闭mkldnn
+ parameters { key: "use_mkldnn" value: "1" }
+ }
+ ]
+ }
+}
\ No newline at end of file
diff --git a/deploy/fastdeploy/serving/fastdeploy_serving/models/pp_ocr/config.pbtxt b/deploy/fastdeploy/serving/fastdeploy_serving/models/pp_ocr/config.pbtxt
new file mode 100644
index 0000000000000000000000000000000000000000..5ef951107e4f36696a46ce7396ddedc5c9316cee
--- /dev/null
+++ b/deploy/fastdeploy/serving/fastdeploy_serving/models/pp_ocr/config.pbtxt
@@ -0,0 +1,87 @@
+name: "pp_ocr"
+platform: "ensemble"
+max_batch_size: 1
+input [
+ {
+ name: "INPUT"
+ data_type: TYPE_UINT8
+ dims: [ -1, -1, 3 ]
+ }
+]
+output [
+ {
+ name: "rec_texts"
+ data_type: TYPE_STRING
+ dims: [ -1, 1 ]
+ },
+ {
+ name: "rec_scores"
+ data_type: TYPE_FP32
+ dims: [ -1, 1 ]
+ },
+ {
+ name: "det_bboxes"
+ data_type: TYPE_FP32
+ dims: [ -1, -1, 1 ]
+ }
+]
+ensemble_scheduling {
+ step [
+ {
+ model_name: "det_preprocess"
+ model_version: 1
+ input_map {
+ key: "INPUT_0"
+ value: "INPUT"
+ }
+ output_map {
+ key: "OUTPUT_0"
+ value: "infer_input"
+ }
+ output_map {
+ key: "OUTPUT_1"
+ value: "infos"
+ }
+ },
+ {
+ model_name: "det_runtime"
+ model_version: 1
+ input_map {
+ key: "x"
+ value: "infer_input"
+ }
+ output_map {
+ key: "sigmoid_0.tmp_0"
+ value: "infer_output"
+ }
+ },
+ {
+ model_name: "det_postprocess"
+ model_version: 1
+ input_map {
+ key: "POST_INPUT_0"
+ value: "infer_output"
+ }
+ input_map {
+ key: "POST_INPUT_1"
+ value: "infos"
+ }
+ input_map {
+ key: "ORI_IMG"
+ value: "INPUT"
+ }
+ output_map {
+ key: "POST_OUTPUT_0"
+ value: "rec_texts"
+ }
+ output_map {
+ key: "POST_OUTPUT_1"
+ value: "rec_scores"
+ }
+ output_map {
+ key: "POST_OUTPUT_2"
+ value: "det_bboxes"
+ }
+ }
+ ]
+}
diff --git a/deploy/fastdeploy/serving/fastdeploy_serving/models/rec_postprocess/1/model.py b/deploy/fastdeploy/serving/fastdeploy_serving/models/rec_postprocess/1/model.py
new file mode 100755
index 0000000000000000000000000000000000000000..c046cd929b75175bcbeceea80f14a8fb04c733ca
--- /dev/null
+++ b/deploy/fastdeploy/serving/fastdeploy_serving/models/rec_postprocess/1/model.py
@@ -0,0 +1,112 @@
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import numpy as np
+import time
+import os
+import sys
+import codecs
+import fastdeploy as fd
+
+# triton_python_backend_utils is available in every Triton Python model. You
+# need to use this module to create inference requests and responses. It also
+# contains some utility functions for extracting information from model_config
+# and converting Triton input/output types to numpy types.
+import triton_python_backend_utils as pb_utils
+
+
+class TritonPythonModel:
+ """Your Python model must use the same class name. Every Python model
+ that is created must have "TritonPythonModel" as the class name.
+ """
+
+ def initialize(self, args):
+ """`initialize` is called only once when the model is being loaded.
+ Implementing `initialize` function is optional. This function allows
+ the model to intialize any state associated with this model.
+ Parameters
+ ----------
+ args : dict
+ Both keys and values are strings. The dictionary keys and values are:
+ * model_config: A JSON string containing the model configuration
+ * model_instance_kind: A string containing model instance kind
+ * model_instance_device_id: A string containing model instance device ID
+ * model_repository: Model repository path
+ * model_version: Model version
+ * model_name: Model name
+ """
+ # You must parse model_config. JSON string is not parsed here
+ self.model_config = json.loads(args['model_config'])
+ print("model_config:", self.model_config)
+
+ self.input_names = []
+ for input_config in self.model_config["input"]:
+ self.input_names.append(input_config["name"])
+ print("postprocess input names:", self.input_names)
+
+ self.output_names = []
+ self.output_dtype = []
+ for output_config in self.model_config["output"]:
+ self.output_names.append(output_config["name"])
+ dtype = pb_utils.triton_string_to_numpy(output_config["data_type"])
+ self.output_dtype.append(dtype)
+ print("postprocess output names:", self.output_names)
+
+ dir_name = os.path.dirname(os.path.realpath(__file__)) + "/"
+ file_name = dir_name + "ppocr_keys_v1.txt"
+ #self.label_list = load_dict()
+ self.postprocessor = fd.vision.ocr.RecognizerPostprocessor(file_name)
+
+ def execute(self, requests):
+ """`execute` must be implemented in every Python model. `execute`
+ function receives a list of pb_utils.InferenceRequest as the only
+ argument. This function is called when an inference is requested
+ for this model. Depending on the batching configuration (e.g. Dynamic
+ Batching) used, `requests` may contain multiple requests. Every
+ Python model, must create one pb_utils.InferenceResponse for every
+ pb_utils.InferenceRequest in `requests`. If there is an error, you can
+ set the error argument when creating a pb_utils.InferenceResponse.
+ Parameters
+ ----------
+ requests : list
+ A list of pb_utils.InferenceRequest
+ Returns
+ -------
+ list
+ A list of pb_utils.InferenceResponse. The length of this list must
+ be the same as `requests`
+ """
+ responses = []
+ for request in requests:
+ infer_outputs = pb_utils.get_input_tensor_by_name(
+ request, self.input_names[0])
+ infer_outputs = infer_outputs.as_numpy()
+ results = self.postprocessor.run([infer_outputs])
+ out_tensor_0 = pb_utils.Tensor(
+ self.output_names[0], np.array(
+ results[0], dtype=np.object_))
+ out_tensor_1 = pb_utils.Tensor(self.output_names[1],
+ np.array(results[1]))
+ inference_response = pb_utils.InferenceResponse(
+ output_tensors=[out_tensor_0, out_tensor_1])
+ responses.append(inference_response)
+ return responses
+
+ def finalize(self):
+ """`finalize` is called only once when the model is being unloaded.
+ Implementing `finalize` function is optional. This function allows
+ the model to perform any necessary clean ups before exit.
+ """
+ print('Cleaning up...')
diff --git a/deploy/fastdeploy/serving/fastdeploy_serving/models/rec_postprocess/config.pbtxt b/deploy/fastdeploy/serving/fastdeploy_serving/models/rec_postprocess/config.pbtxt
new file mode 100644
index 0000000000000000000000000000000000000000..c125140c8b15f8d090ed7ef72ee855454059aa42
--- /dev/null
+++ b/deploy/fastdeploy/serving/fastdeploy_serving/models/rec_postprocess/config.pbtxt
@@ -0,0 +1,30 @@
+name: "rec_postprocess"
+backend: "python"
+max_batch_size: 128
+input [
+ {
+ name: "POST_INPUT_0"
+ data_type: TYPE_FP32
+ dims: [ -1, 6625 ]
+ }
+]
+
+output [
+ {
+ name: "POST_OUTPUT_0"
+ data_type: TYPE_STRING
+ dims: [ 1 ]
+ },
+ {
+ name: "POST_OUTPUT_1"
+ data_type: TYPE_FP32
+ dims: [ 1 ]
+ }
+]
+
+instance_group [
+ {
+ count: 1
+ kind: KIND_CPU
+ }
+]
diff --git a/deploy/fastdeploy/serving/fastdeploy_serving/models/rec_pp/config.pbtxt b/deploy/fastdeploy/serving/fastdeploy_serving/models/rec_pp/config.pbtxt
new file mode 100644
index 0000000000000000000000000000000000000000..bb79f90012ba70fc1eac7779218395c3135be8f4
--- /dev/null
+++ b/deploy/fastdeploy/serving/fastdeploy_serving/models/rec_pp/config.pbtxt
@@ -0,0 +1,54 @@
+name: "rec_pp"
+platform: "ensemble"
+max_batch_size: 128
+input [
+ {
+ name: "x"
+ data_type: TYPE_FP32
+ dims: [ 3, 48, -1 ]
+ }
+]
+output [
+ {
+ name: "rec_texts"
+ data_type: TYPE_STRING
+ dims: [ 1 ]
+ },
+ {
+ name: "rec_scores"
+ data_type: TYPE_FP32
+ dims: [ 1 ]
+ }
+]
+ensemble_scheduling {
+ step [
+ {
+ model_name: "rec_runtime"
+ model_version: 1
+ input_map {
+ key: "x"
+ value: "x"
+ }
+ output_map {
+ key: "softmax_5.tmp_0"
+ value: "infer_output"
+ }
+ },
+ {
+ model_name: "rec_postprocess"
+ model_version: 1
+ input_map {
+ key: "POST_INPUT_0"
+ value: "infer_output"
+ }
+ output_map {
+ key: "POST_OUTPUT_0"
+ value: "rec_texts"
+ }
+ output_map {
+ key: "POST_OUTPUT_1"
+ value: "rec_scores"
+ }
+ }
+ ]
+}
diff --git a/deploy/fastdeploy/serving/fastdeploy_serving/models/rec_runtime/config.pbtxt b/deploy/fastdeploy/serving/fastdeploy_serving/models/rec_runtime/config.pbtxt
new file mode 100755
index 0000000000000000000000000000000000000000..037d7a9f285550c8946bcf3f3cb9191c667a792c
--- /dev/null
+++ b/deploy/fastdeploy/serving/fastdeploy_serving/models/rec_runtime/config.pbtxt
@@ -0,0 +1,52 @@
+# optional, If name is specified it must match the name of the model repository directory containing the model.
+name: "rec_runtime"
+backend: "fastdeploy"
+max_batch_size: 128
+
+# Input configuration of the model
+input [
+ {
+ # input name
+ name: "x"
+ # input type such as TYPE_FP32、TYPE_UINT8、TYPE_INT8、TYPE_INT16、TYPE_INT32、TYPE_INT64、TYPE_FP16、TYPE_STRING
+ data_type: TYPE_FP32
+ # input shape, The batch dimension is omitted and the actual shape is [batch, c, h, w]
+ dims: [ 3, 48, -1 ]
+ }
+]
+
+# The output of the model is configured in the same format as the input
+output [
+ {
+ name: "softmax_5.tmp_0"
+ data_type: TYPE_FP32
+ dims: [ -1, 6625 ]
+ }
+]
+
+# Number of instances of the model
+instance_group [
+ {
+ # The number of instances is 1
+ count: 1
+ # Use GPU, CPU inference option is:KIND_CPU
+ kind: KIND_GPU
+ # The instance is deployed on the 0th GPU card
+ gpus: [0]
+ }
+]
+
+optimization {
+ execution_accelerators {
+ # GPU推理配置, 配合KIND_GPU使用
+ gpu_execution_accelerator : [
+ {
+ name : "paddle"
+ # 设置推理并行计算线程数为4
+ parameters { key: "cpu_threads" value: "4" }
+ # 开启mkldnn加速,设置为0关闭mkldnn
+ parameters { key: "use_mkldnn" value: "1" }
+ }
+ ]
+ }
+}
\ No newline at end of file
diff --git a/deploy/fastdeploy/serving/fastdeploy_serving/ppocr.png b/deploy/fastdeploy/serving/fastdeploy_serving/ppocr.png
new file mode 100644
index 0000000000000000000000000000000000000000..db12eddc49c9afe0d2d6ea661633abd8eff50c1b
Binary files /dev/null and b/deploy/fastdeploy/serving/fastdeploy_serving/ppocr.png differ
diff --git a/deploy/fastdeploy/serving/simple_serving/README.md b/deploy/fastdeploy/serving/simple_serving/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..82f856678d34ed1c12e7ecfc51918725bef34665
--- /dev/null
+++ b/deploy/fastdeploy/serving/simple_serving/README.md
@@ -0,0 +1,48 @@
+简体中文 | [English](README.md)
+
+
+# PaddleOCR Python轻量服务化部署示例
+
+PaddleOCR Python轻量服务化部署是FastDeploy基于Flask框架搭建的可快速验证线上模型部署可行性的服务化部署示例,基于http请求完成AI推理任务,适用于无并发推理的简单场景,如有高并发,高吞吐场景的需求请参考[fastdeploy_serving](../fastdeploy_serving/)
+
+
+## 1. 部署环境准备
+
+在部署前,需确认软硬件环境,同时下载预编译python wheel 包,参考文档[FastDeploy预编译库安装](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install#FastDeploy预编译库安装)
+
+
+## 2. 启动服务
+```bash
+# 下载部署示例代码
+git clone https://github.com/PaddlePaddle/PaddleOCR.git
+cd PaddleOCR/deploy/fastdeploy/serving/simple_serving
+
+# 下载模型和字典文件
+wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar
+tar xvf ch_PP-OCRv3_det_infer.tar
+
+wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar
+tar -xvf ch_ppocr_mobile_v2.0_cls_infer.tar
+
+wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar
+tar xvf ch_PP-OCRv3_rec_infer.tar
+
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_keys_v1.txt
+
+# 启动服务,可修改server.py中的配置项来指定硬件、后端等
+# 可通过--host、--port指定IP和端口号
+fastdeploy simple_serving --app server:app
+```
+
+## 3. 客户端请求
+```bash
+# 下载部署示例代码
+git clone https://github.com/PaddlePaddle/PaddleOCR.git
+cd PaddleOCR/deploy/fastdeploy/serving/simple_serving
+
+# 下载测试图片
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/doc/imgs/12.jpg
+
+# 请求服务,获取推理结果(如有必要,请修改脚本中的IP和端口号)
+python client.py
+```
diff --git a/deploy/fastdeploy/serving/simple_serving/client.py b/deploy/fastdeploy/serving/simple_serving/client.py
new file mode 100644
index 0000000000000000000000000000000000000000..6849c22046e67492c6a7d9db15fcf3f6b5a40d5d
--- /dev/null
+++ b/deploy/fastdeploy/serving/simple_serving/client.py
@@ -0,0 +1,24 @@
+import requests
+import json
+import cv2
+import fastdeploy as fd
+from fastdeploy.serving.utils import cv2_to_base64
+
+if __name__ == '__main__':
+ url = "http://127.0.0.1:8000/fd/ppocrv3"
+ headers = {"Content-Type": "application/json"}
+
+ im = cv2.imread("12.jpg")
+ data = {"data": {"image": cv2_to_base64(im)}, "parameters": {}}
+
+ resp = requests.post(url=url, headers=headers, data=json.dumps(data))
+ if resp.status_code == 200:
+ r_json = json.loads(resp.json()["result"])
+ print(r_json)
+ ocr_result = fd.vision.utils.json_to_ocr(r_json)
+ vis_im = fd.vision.vis_ppocr(im, ocr_result)
+ cv2.imwrite("visualized_result.jpg", vis_im)
+ print("Visualized result save in ./visualized_result.jpg")
+ else:
+ print("Error code:", resp.status_code)
+ print(resp.text)
diff --git a/deploy/fastdeploy/serving/simple_serving/server.py b/deploy/fastdeploy/serving/simple_serving/server.py
new file mode 100644
index 0000000000000000000000000000000000000000..0078b7112f91004926ced6623253589cdc68cab2
--- /dev/null
+++ b/deploy/fastdeploy/serving/simple_serving/server.py
@@ -0,0 +1,80 @@
+import fastdeploy as fd
+from fastdeploy.serving.server import SimpleServer
+import os
+import logging
+
+logging.getLogger().setLevel(logging.INFO)
+
+# Configurations
+det_model_dir = 'ch_PP-OCRv3_det_infer'
+cls_model_dir = 'ch_ppocr_mobile_v2.0_cls_infer'
+rec_model_dir = 'ch_PP-OCRv3_rec_infer'
+rec_label_file = 'ppocr_keys_v1.txt'
+device = 'cpu'
+# backend: ['paddle', 'trt'], you can also use other backends, but need to modify
+# the runtime option below
+backend = 'paddle'
+
+# Prepare models
+# Detection model
+det_model_file = os.path.join(det_model_dir, "inference.pdmodel")
+det_params_file = os.path.join(det_model_dir, "inference.pdiparams")
+# Classification model
+cls_model_file = os.path.join(cls_model_dir, "inference.pdmodel")
+cls_params_file = os.path.join(cls_model_dir, "inference.pdiparams")
+# Recognition model
+rec_model_file = os.path.join(rec_model_dir, "inference.pdmodel")
+rec_params_file = os.path.join(rec_model_dir, "inference.pdiparams")
+
+# Setup runtime option to select hardware, backend, etc.
+option = fd.RuntimeOption()
+if device.lower() == 'gpu':
+ option.use_gpu()
+if backend == 'trt':
+ option.use_trt_backend()
+else:
+ option.use_paddle_infer_backend()
+
+det_option = option
+det_option.set_trt_input_shape("x", [1, 3, 64, 64], [1, 3, 640, 640],
+ [1, 3, 960, 960])
+
+# det_option.set_trt_cache_file("det_trt_cache.trt")
+print(det_model_file, det_params_file)
+det_model = fd.vision.ocr.DBDetector(
+ det_model_file, det_params_file, runtime_option=det_option)
+
+cls_batch_size = 1
+rec_batch_size = 6
+
+cls_option = option
+cls_option.set_trt_input_shape("x", [1, 3, 48, 10],
+ [cls_batch_size, 3, 48, 320],
+ [cls_batch_size, 3, 48, 1024])
+
+# cls_option.set_trt_cache_file("cls_trt_cache.trt")
+cls_model = fd.vision.ocr.Classifier(
+ cls_model_file, cls_params_file, runtime_option=cls_option)
+
+rec_option = option
+rec_option.set_trt_input_shape("x", [1, 3, 48, 10],
+ [rec_batch_size, 3, 48, 320],
+ [rec_batch_size, 3, 48, 2304])
+
+# rec_option.set_trt_cache_file("rec_trt_cache.trt")
+rec_model = fd.vision.ocr.Recognizer(
+ rec_model_file, rec_params_file, rec_label_file, runtime_option=rec_option)
+
+# Create PPOCRv3 pipeline
+ppocr_v3 = fd.vision.ocr.PPOCRv3(
+ det_model=det_model, cls_model=cls_model, rec_model=rec_model)
+
+ppocr_v3.cls_batch_size = cls_batch_size
+ppocr_v3.rec_batch_size = rec_batch_size
+
+# Create server, setup REST API
+app = SimpleServer()
+app.register(
+ task_name="fd/ppocrv3",
+ model_handler=fd.serving.handler.VisionModelHandler,
+ predictor=ppocr_v3)
diff --git a/deploy/fastdeploy/sophgo/README.md b/deploy/fastdeploy/sophgo/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..9fd2e9563f48263cda37e98a6b4cf49ec5d53b4f
--- /dev/null
+++ b/deploy/fastdeploy/sophgo/README.md
@@ -0,0 +1,102 @@
+[English](README.md) | 简体中文
+
+# PaddleOCR 模型在SOPHGO上部署方案-FastDeploy
+
+## 1. 说明
+PaddleOCR支持通过FastDeploy在SOPHGO上部署相关模型.
+
+## 2.支持模型列表
+
+下表中的模型下载链接由PaddleOCR模型库提供, 详见[PP-OCR系列模型列表](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.6/doc/doc_ch/models_list.md)
+
+| PaddleOCR版本 | 文本框检测 | 方向分类模型 | 文字识别 |字典文件| 说明 |
+|:----|:----|:----|:----|:----|:--------|
+| ch_PP-OCRv3[推荐] |[ch_PP-OCRv3_det](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [ch_PP-OCRv3_rec](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar) | [ppocr_keys_v1.txt](https://bj.bcebos.com/paddlehub/fastdeploy/ppocr_keys_v1.txt) | OCRv3系列原始超轻量模型,支持中英文、多语种文本检测 |
+| en_PP-OCRv3[推荐] |[en_PP-OCRv3_det](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [en_PP-OCRv3_rec](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_rec_infer.tar) | [en_dict.txt](https://bj.bcebos.com/paddlehub/fastdeploy/en_dict.txt) | OCRv3系列原始超轻量模型,支持英文与数字识别,除检测模型和识别模型的训练数据与中文模型不同以外,无其他区别 |
+| ch_PP-OCRv2 |[ch_PP-OCRv2_det](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [ch_PP-OCRv2_rec](https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar) | [ppocr_keys_v1.txt](https://bj.bcebos.com/paddlehub/fastdeploy/ppocr_keys_v1.txt) | OCRv2系列原始超轻量模型,支持中英文、多语种文本检测 |
+| ch_PP-OCRv2_mobile |[ch_ppocr_mobile_v2.0_det](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [ch_ppocr_mobile_v2.0_rec](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar) | [ppocr_keys_v1.txt](https://bj.bcebos.com/paddlehub/fastdeploy/ppocr_keys_v1.txt) | OCRv2系列原始超轻量模型,支持中英文、多语种文本检测,比PPOCRv2更加轻量 |
+| ch_PP-OCRv2_server |[ch_ppocr_server_v2.0_det](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar) | [ch_ppocr_mobile_v2.0_cls](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar) | [ch_ppocr_server_v2.0_rec](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar) |[ppocr_keys_v1.txt](https://bj.bcebos.com/paddlehub/fastdeploy/ppocr_keys_v1.txt) | OCRv2服务器系列模型, 支持中英文、多语种文本检测,比超轻量模型更大,但效果更好|
+
+## 3. 准备PP-OCR推理模型以及转换模型
+
+PP-OCRv3包括文本检测模型(ch_PP-OCRv3_det)、方向分类模型(ch_ppocr_mobile_v2.0_cls)、文字识别模型(ch_PP-OCRv3_rec)
+SOPHGO-TPU部署模型前需要将以上Paddle模型转换成bmodel模型,我们以ch_PP-OCRv3_det模型为例,具体步骤如下:
+- 下载Paddle模型[ch_PP-OCRv3_det](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar)
+- Pddle模型转换为ONNX模型,请参考[Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX)
+- ONNX模型转换bmodel模型的过程,请参考[TPU-MLIR](https://github.com/sophgo/tpu-mlir)
+下面我们提供一个example, 供用户参考,完成模型的转换.
+
+### 3.1 下载ch_PP-OCRv3_det模型,并转换为ONNX模型
+```shell
+wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar
+tar xvf ch_PP-OCRv3_det_infer.tar
+
+# 修改ch_PP-OCRv3_det模型的输入shape,由动态输入变成固定输入
+python paddle_infer_shape.py --model_dir ch_PP-OCRv3_det_infer \
+ --model_filename inference.pdmodel \
+ --params_filename inference.pdiparams \
+ --save_dir ch_PP-OCRv3_det_infer_fix \
+ --input_shape_dict="{'x':[1,3,960,608]}"
+
+# 请用户自行安装最新发布版本的paddle2onnx, 转换模型到ONNX格式的模型
+paddle2onnx --model_dir ch_PP-OCRv3_det_infer_fix \
+ --model_filename inference.pdmodel \
+ --params_filename inference.pdiparams \
+ --save_file ch_PP-OCRv3_det_infer_fix.onnx \
+ --enable_dev_version True
+```
+
+### 3.2 导出bmodel模型
+
+以转换BM1684x的bmodel模型为例子,我们需要下载[TPU-MLIR](https://github.com/sophgo/tpu-mlir)工程,安装过程具体参见[TPU-MLIR文档](https://github.com/sophgo/tpu-mlir/blob/master/README.md)。
+#### 3.2.1 安装
+``` shell
+docker pull sophgo/tpuc_dev:latest
+
+# myname1234是一个示例,也可以设置其他名字
+docker run --privileged --name myname1234 -v $PWD:/workspace -it sophgo/tpuc_dev:latest
+
+source ./envsetup.sh
+./build.sh
+```
+
+#### 3.2.2 ONNX模型转换为bmodel模型
+``` shell
+mkdir ch_PP-OCRv3_det && cd ch_PP-OCRv3_det
+
+#在该文件中放入测试图片,同时将上一步转换的ch_PP-OCRv3_det_infer_fix.onnx放入该文件夹中
+cp -rf ${REGRESSION_PATH}/dataset/COCO2017 .
+cp -rf ${REGRESSION_PATH}/image .
+#放入onnx模型文件ch_PP-OCRv3_det_infer_fix.onnx
+
+mkdir workspace && cd workspace
+
+#将ONNX模型转换为mlir模型,其中参数--output_names可以通过NETRON查看
+model_transform.py \
+ --model_name ch_PP-OCRv3_det \
+ --model_def ../ch_PP-OCRv3_det_infer_fix.onnx \
+ --input_shapes [[1,3,960,608]] \
+ --mean 0.0,0.0,0.0 \
+ --scale 0.0039216,0.0039216,0.0039216 \
+ --keep_aspect_ratio \
+ --pixel_format rgb \
+ --output_names sigmoid_0.tmp_0 \
+ --test_input ../image/dog.jpg \
+ --test_result ch_PP-OCRv3_det_top_outputs.npz \
+ --mlir ch_PP-OCRv3_det.mlir
+
+#将mlir模型转换为BM1684x的F32 bmodel模型
+model_deploy.py \
+ --mlir ch_PP-OCRv3_det.mlir \
+ --quantize F32 \
+ --chip bm1684x \
+ --test_input ch_PP-OCRv3_det_in_f32.npz \
+ --test_reference ch_PP-OCRv3_det_top_outputs.npz \
+ --model ch_PP-OCRv3_det_1684x_f32.bmodel
+```
+最终获得可以在BM1684x上能够运行的bmodel模型ch_PP-OCRv3_det_1684x_f32.bmodel。按照上面同样的方法,可以将ch_ppocr_mobile_v2.0_cls,ch_PP-OCRv3_rec转换为bmodel的格式。如果需要进一步对模型进行加速,可以将ONNX模型转换为INT8 bmodel,具体步骤参见[TPU-MLIR文档](https://github.com/sophgo/tpu-mlir/blob/master/README.md)。
+
+
+## 4. 详细部署的部署示例
+- [Python部署](python)
+- [C++部署](cpp)
diff --git a/deploy/fastdeploy/sophgo/cpp/CMakeLists.txt b/deploy/fastdeploy/sophgo/cpp/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b32846afedc66b290e53721191493645b04fb707
--- /dev/null
+++ b/deploy/fastdeploy/sophgo/cpp/CMakeLists.txt
@@ -0,0 +1,13 @@
+PROJECT(infer_demo C CXX)
+CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
+# 指定下载解压后的fastdeploy库路径
+option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
+
+include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)
+
+# 添加FastDeploy依赖头文件
+include_directories(${FASTDEPLOY_INCS})
+
+add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
+# 添加FastDeploy库依赖
+target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})
diff --git a/deploy/fastdeploy/sophgo/cpp/README.md b/deploy/fastdeploy/sophgo/cpp/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..450b207cda2f9ee23ff222975c60065e00b6215b
--- /dev/null
+++ b/deploy/fastdeploy/sophgo/cpp/README.md
@@ -0,0 +1,60 @@
+[English](README_CN.md) | 简体中文
+# PP-OCRv3 SOPHGO C++部署示例
+本目录下提供`infer.cc`快速完成PPOCRv3模型在SOPHGO BM1684x板子上加速部署的示例。
+
+## 1. 部署环境准备
+在部署前,需自行编译基于SOPHGO硬件的预测库,参考文档[SOPHGO硬件部署环境](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install#算能硬件部署环境)
+
+## 2. 生成基本目录文件
+
+该例程由以下几个部分组成
+```text
+.
+├── CMakeLists.txt
+├── fastdeploy-sophgo # 编译好的SDK文件夹
+├── image # 存放图片的文件夹
+├── infer.cc
+└── model # 存放模型文件的文件夹
+```
+
+## 3.部署示例
+
+### 3.1 下载部署示例代码
+```bash
+# 下载部署示例代码
+git clone https://github.com/PaddlePaddle/PaddleOCR.git
+cd PaddleOCR/deploy/fastdeploy/sophgo/cpp
+```
+
+### 3.2 拷贝bmodel模型文至model文件夹
+将Paddle模型转换为SOPHGO bmodel模型,转换步骤参考[文档](../README.md). 将转换后的SOPHGO bmodel模型文件拷贝至model中.
+
+### 3.3 准备测试图片至image文件夹,以及字典文件
+```bash
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/doc/imgs/12.jpg
+cp 12.jpg image/
+
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_keys_v1.txt
+```
+
+### 3.4 编译example
+
+```bash
+cd build
+cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-0.0.3
+make
+```
+
+### 3.5 运行例程
+
+```bash
+./infer_demo model ./ppocr_keys_v1.txt image/12.jpeg
+```
+
+
+## 4. 更多指南
+
+- [PP-OCR系列 C++ API查阅](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/cpp/html/namespacefastdeploy_1_1vision_1_1ocr.html)
+- [FastDeploy部署PaddleOCR模型概览](../../)
+- [PP-OCRv3 Python部署](../python)
+- 如果用户想要调整前后处理超参数、单独使用文字检测识别模型、使用其他模型等,更多详细文档与说明请参考[PP-OCR系列在CPU/GPU上的部署](../../cpu-gpu/cpp/README.md)
diff --git a/deploy/fastdeploy/sophgo/cpp/infer.cc b/deploy/fastdeploy/sophgo/cpp/infer.cc
new file mode 100644
index 0000000000000000000000000000000000000000..181561b39e94c6e242502de24c17aadcda2d34c7
--- /dev/null
+++ b/deploy/fastdeploy/sophgo/cpp/infer.cc
@@ -0,0 +1,136 @@
+// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "fastdeploy/vision.h"
+#ifdef WIN32
+const char sep = '\\';
+#else
+const char sep = '/';
+#endif
+
+void InitAndInfer(const std::string &det_model_dir,
+ const std::string &rec_label_file,
+ const std::string &image_file,
+ const fastdeploy::RuntimeOption &option) {
+ auto det_model_file =
+ det_model_dir + sep + "ch_PP-OCRv3_det_1684x_f32.bmodel";
+ auto det_params_file = det_model_dir + sep + "";
+
+ auto cls_model_file =
+ det_model_dir + sep + "ch_ppocr_mobile_v2.0_cls_1684x_f32.bmodel";
+ auto cls_params_file = det_model_dir + sep + "";
+
+ auto rec_model_file =
+ det_model_dir + sep + "ch_PP-OCRv3_rec_1684x_f32.bmodel";
+ auto rec_params_file = det_model_dir + sep + "";
+
+ auto format = fastdeploy::ModelFormat::SOPHGO;
+
+ auto det_option = option;
+ auto cls_option = option;
+ auto rec_option = option;
+
+ // The cls and rec model can inference a batch of images now.
+ // User could initialize the inference batch size and set them after create
+ // PPOCR model.
+ int cls_batch_size = 1;
+ int rec_batch_size = 1;
+
+ // If use TRT backend, the dynamic shape will be set as follow.
+ // We recommend that users set the length and height of the detection model to
+ // a multiple of 32. We also recommend that users set the Trt input shape as
+ // follow.
+ det_option.SetTrtInputShape("x", {1, 3, 64, 64}, {1, 3, 640, 640},
+ {1, 3, 960, 960});
+ cls_option.SetTrtInputShape("x", {1, 3, 48, 10}, {cls_batch_size, 3, 48, 320},
+ {cls_batch_size, 3, 48, 1024});
+ rec_option.SetTrtInputShape("x", {1, 3, 48, 10}, {rec_batch_size, 3, 48, 320},
+ {rec_batch_size, 3, 48, 2304});
+
+ // Users could save TRT cache file to disk as follow.
+ // det_option.SetTrtCacheFile(det_model_dir + sep + "det_trt_cache.trt");
+ // cls_option.SetTrtCacheFile(cls_model_dir + sep + "cls_trt_cache.trt");
+ // rec_option.SetTrtCacheFile(rec_model_dir + sep + "rec_trt_cache.trt");
+
+ auto det_model = fastdeploy::vision::ocr::DBDetector(
+ det_model_file, det_params_file, det_option, format);
+ auto cls_model = fastdeploy::vision::ocr::Classifier(
+ cls_model_file, cls_params_file, cls_option, format);
+ auto rec_model = fastdeploy::vision::ocr::Recognizer(
+ rec_model_file, rec_params_file, rec_label_file, rec_option, format);
+
+ // Users could enable static shape infer for rec model when deploy PP-OCR on
+ // hardware which can not support dynamic shape infer well, like Huawei Ascend
+ // series.
+ rec_model.GetPreprocessor().SetStaticShapeInfer(true);
+ rec_model.GetPreprocessor().SetRecImageShape({3, 48, 584});
+
+ assert(det_model.Initialized());
+ assert(cls_model.Initialized());
+ assert(rec_model.Initialized());
+
+ // The classification model is optional, so the PP-OCR can also be connected
+ // in series as follows auto ppocr_v3 =
+ // fastdeploy::pipeline::PPOCRv3(&det_model, &rec_model);
+ auto ppocr_v3 =
+ fastdeploy::pipeline::PPOCRv3(&det_model, &cls_model, &rec_model);
+
+ // Set inference batch size for cls model and rec model, the value could be -1
+ // and 1 to positive infinity. When inference batch size is set to -1, it
+ // means that the inference batch size of the cls and rec models will be the
+ // same as the number of boxes detected by the det model.
+ ppocr_v3.SetClsBatchSize(cls_batch_size);
+ ppocr_v3.SetRecBatchSize(rec_batch_size);
+
+ if (!ppocr_v3.Initialized()) {
+ std::cerr << "Failed to initialize PP-OCR." << std::endl;
+ return;
+ }
+
+ auto im = cv::imread(image_file);
+ auto im_bak = im.clone();
+
+ fastdeploy::vision::OCRResult result;
+ if (!ppocr_v3.Predict(&im, &result)) {
+ std::cerr << "Failed to predict." << std::endl;
+ return;
+ }
+
+ std::cout << result.Str() << std::endl;
+
+ auto vis_im = fastdeploy::vision::VisOcr(im_bak, result);
+ cv::imwrite("vis_result.jpg", vis_im);
+ std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
+}
+
+int main(int argc, char *argv[]) {
+ if (argc < 4) {
+ std::cout << "Usage: infer_demo path/to/model "
+ "path/to/rec_label_file path/to/image "
+ "e.g ./infer_demo ./ocr_bmodel "
+ "./ppocr_keys_v1.txt ./12.jpg"
+ << std::endl;
+ return -1;
+ }
+
+ fastdeploy::RuntimeOption option;
+ option.UseSophgo();
+ option.UseSophgoBackend();
+
+ std::string model_dir = argv[1];
+ std::string rec_label_file = argv[2];
+ std::string test_image = argv[3];
+ InitAndInfer(model_dir, rec_label_file, test_image, option);
+ return 0;
+}
diff --git a/deploy/fastdeploy/sophgo/python/README.md b/deploy/fastdeploy/sophgo/python/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..0926d889ad94350ae8173afc8588628fa9311c13
--- /dev/null
+++ b/deploy/fastdeploy/sophgo/python/README.md
@@ -0,0 +1,46 @@
+[English](README.md) | 简体中文
+# PP-OCRv3 SOPHGO Python部署示例
+本目录下提供`infer.py`快速完成 PP-OCRv3 在SOPHGO TPU上部署的示例。
+
+## 1. 部署环境准备
+
+在部署前,需自行编译基于算能硬件的FastDeploy python wheel包并安装,参考文档[算能硬件部署环境](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install#算能硬件部署环境)
+
+
+## 2.运行部署示例
+
+### 2.1 模型准备
+将Paddle模型转换为SOPHGO bmodel模型, 转换步骤参考[文档](../README.md)
+
+### 2.2 开始部署
+```bash
+# 下载部署示例代码
+git clone https://github.com/PaddlePaddle/PaddleOCR.git
+cd PaddleOCR/deploy/fastdeploy/sophgo/python
+
+# 下载图片
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/doc/imgs/12.jpg
+
+#下载字典文件
+wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_keys_v1.txt
+
+# 推理
+python3 infer.py --det_model ocr_bmodel/ch_PP-OCRv3_det_1684x_f32.bmodel \
+ --cls_model ocr_bmodel/ch_ppocr_mobile_v2.0_cls_1684x_f32.bmodel \
+ --rec_model ocr_bmodel/ch_PP-OCRv3_rec_1684x_f32.bmodel \
+ --rec_label_file ../ppocr_keys_v1.txt \
+ --image ../12.jpg
+
+# 运行完成后返回结果如下所示
+det boxes: [[42,413],[483,391],[484,428],[43,450]]rec text: 上海斯格威铂尔大酒店 rec score:0.952958 cls label: 0 cls score: 1.000000
+det boxes: [[187,456],[399,448],[400,480],[188,488]]rec text: 打浦路15号 rec score:0.897335 cls label: 0 cls score: 1.000000
+det boxes: [[23,507],[513,488],[515,529],[24,548]]rec text: 绿洲仕格维花园公寓 rec score:0.994589 cls label: 0 cls score: 1.000000
+det boxes: [[74,553],[427,542],[428,571],[75,582]]rec text: 打浦路252935号 rec score:0.900663 cls label: 0 cls score: 1.000000
+
+可视化结果保存在sophgo_result.jpg中
+```
+
+## 3. 其它文档
+- [PP-OCRv3 C++部署](../cpp)
+- [转换 PP-OCRv3 SOPHGO模型文档](../README.md)
+- 如果用户想要调整前后处理超参数、单独使用文字检测识别模型、使用其他模型等,更多详细文档与说明请参考[PP-OCR系列在CPU/GPU上的部署](../../cpu-gpu/cpp/README.md)
diff --git a/deploy/fastdeploy/sophgo/python/infer.py b/deploy/fastdeploy/sophgo/python/infer.py
new file mode 100644
index 0000000000000000000000000000000000000000..356317099f4072bad3830c403c14913c74f573cd
--- /dev/null
+++ b/deploy/fastdeploy/sophgo/python/infer.py
@@ -0,0 +1,116 @@
+import fastdeploy as fd
+import cv2
+import os
+
+
+def parse_arguments():
+ import argparse
+ import ast
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--det_model", required=True, help="Path of Detection model of PPOCR.")
+ parser.add_argument(
+ "--cls_model",
+ required=True,
+ help="Path of Classification model of PPOCR.")
+ parser.add_argument(
+ "--rec_model",
+ required=True,
+ help="Path of Recognization model of PPOCR.")
+ parser.add_argument(
+ "--rec_label_file",
+ required=True,
+ help="Path of Recognization label of PPOCR.")
+ parser.add_argument(
+ "--image", type=str, required=True, help="Path of test image file.")
+
+ return parser.parse_args()
+
+
+args = parse_arguments()
+
+# 配置runtime,加载模型
+runtime_option = fd.RuntimeOption()
+runtime_option.use_sophgo()
+
+# Detection模型, 检测文字框
+det_model_file = args.det_model
+det_params_file = ""
+# Classification模型,方向分类,可选
+cls_model_file = args.cls_model
+cls_params_file = ""
+# Recognition模型,文字识别模型
+rec_model_file = args.rec_model
+rec_params_file = ""
+rec_label_file = args.rec_label_file
+
+# PPOCR的cls和rec模型现在已经支持推理一个Batch的数据
+# 定义下面两个变量后, 可用于设置trt输入shape, 并在PPOCR模型初始化后, 完成Batch推理设置
+cls_batch_size = 1
+rec_batch_size = 1
+
+# 当使用TRT时,分别给三个模型的runtime设置动态shape,并完成模型的创建.
+# 注意: 需要在检测模型创建完成后,再设置分类模型的动态输入并创建分类模型, 识别模型同理.
+# 如果用户想要自己改动检测模型的输入shape, 我们建议用户把检测模型的长和高设置为32的倍数.
+det_option = runtime_option
+det_option.set_trt_input_shape("x", [1, 3, 64, 64], [1, 3, 640, 640],
+ [1, 3, 960, 960])
+# 用户可以把TRT引擎文件保存至本地
+# det_option.set_trt_cache_file(args.det_model + "/det_trt_cache.trt")
+det_model = fd.vision.ocr.DBDetector(
+ det_model_file,
+ det_params_file,
+ runtime_option=det_option,
+ model_format=fd.ModelFormat.SOPHGO)
+
+cls_option = runtime_option
+cls_option.set_trt_input_shape("x", [1, 3, 48, 10],
+ [cls_batch_size, 3, 48, 320],
+ [cls_batch_size, 3, 48, 1024])
+# 用户可以把TRT引擎文件保存至本地
+# cls_option.set_trt_cache_file(args.cls_model + "/cls_trt_cache.trt")
+cls_model = fd.vision.ocr.Classifier(
+ cls_model_file,
+ cls_params_file,
+ runtime_option=cls_option,
+ model_format=fd.ModelFormat.SOPHGO)
+
+rec_option = runtime_option
+rec_option.set_trt_input_shape("x", [1, 3, 48, 10],
+ [rec_batch_size, 3, 48, 320],
+ [rec_batch_size, 3, 48, 2304])
+# 用户可以把TRT引擎文件保存至本地
+# rec_option.set_trt_cache_file(args.rec_model + "/rec_trt_cache.trt")
+rec_model = fd.vision.ocr.Recognizer(
+ rec_model_file,
+ rec_params_file,
+ rec_label_file,
+ runtime_option=rec_option,
+ model_format=fd.ModelFormat.SOPHGO)
+
+# 创建PP-OCR,串联3个模型,其中cls_model可选,如无需求,可设置为None
+ppocr_v3 = fd.vision.ocr.PPOCRv3(
+ det_model=det_model, cls_model=cls_model, rec_model=rec_model)
+
+# 需要使用下行代码, 来启用rec模型的静态shape推理,这里rec模型的静态输入为[3, 48, 584]
+rec_model.preprocessor.static_shape_infer = True
+rec_model.preprocessor.rec_image_shape = [3, 48, 584]
+
+# 给cls和rec模型设置推理时的batch size
+# 此值能为-1, 和1到正无穷
+# 当此值为-1时, cls和rec模型的batch size将默认和det模型检测出的框的数量相同
+ppocr_v3.cls_batch_size = cls_batch_size
+ppocr_v3.rec_batch_size = rec_batch_size
+
+# 预测图片准备
+im = cv2.imread(args.image)
+
+#预测并打印结果
+result = ppocr_v3.predict(im)
+
+print(result)
+
+# 可视化结果
+vis_im = fd.vision.vis_ppocr(im, result)
+cv2.imwrite("sophgo_result.jpg", vis_im)
+print("Visualized result save in ./sophgo_result.jpg")
diff --git a/deploy/fastdeploy/web/README.md b/deploy/fastdeploy/web/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..5ca9628d30630d538479e6df90c744a1eea52261
--- /dev/null
+++ b/deploy/fastdeploy/web/README.md
@@ -0,0 +1,33 @@
+[English](README.md) | 简体中文
+# PP-OCRv3 前端部署示例
+
+本节介绍部署PaddleOCR的PP-OCRv3模型在浏览器中运行,以及@paddle-js-models/ocr npm包中的js接口。
+
+
+## 1. 前端部署PP-OCRv3模型
+PP-OCRv3模型web demo使用[**参考文档**](https://github.com/PaddlePaddle/FastDeploy/tree/develop/examples/application/js/web_demo)
+
+## 2. PP-OCRv3 js接口
+
+```
+import * as ocr from "@paddle-js-models/ocr";
+await ocr.init(detConfig, recConfig);
+const res = await ocr.recognize(img, option, postConfig);
+```
+ocr模型加载和初始化,其中模型为Paddle.js模型格式,js模型转换方式参考[文档](https://github.com/PaddlePaddle/FastDeploy/tree/develop/examples/application/js/web_demo/README.md)
+
+**init函数参数**
+
+> * **detConfig**(dict): 文本检测模型配置参数,默认值为 {modelPath: 'https://js-models.bj.bcebos.com/PaddleOCR/PP-OCRv3/ch_PP-OCRv3_det_infer_js_960/model.json', fill: '#fff', mean: [0.485, 0.456, 0.406],std: [0.229, 0.224, 0.225]}; 其中,modelPath为文本检测模型路径,fill 为图像预处理padding的值,mean和std分别为预处理的均值和标准差
+> * **recConfig**(dict)): 文本识别模型配置参数,默认值为 {modelPath: 'https://js-models.bj.bcebos.com/PaddleOCR/PP-OCRv3/ch_PP-OCRv3_rec_infer_js/model.json', fill: '#000', mean: [0.5, 0.5, 0.5], std: [0.5, 0.5, 0.5]}; 其中,modelPath为文本检测模型路径,fill 为图像预处理padding的值,mean和std分别为预处理的均值和标准差
+
+
+**recognize函数参数**
+
+> * **img**(HTMLImageElement): 输入图像参数,类型为HTMLImageElement。
+> * **option**(dict): 可视化文本检测框的canvas参数,可不用设置。
+> * **postConfig**(dict): 文本检测后处理参数,默认值为:{shape: 960, thresh: 0.3, box_thresh: 0.6, unclip_ratio:1.5}; thresh是输出预测图的二值化阈值;box_thresh是输出框的阈值,低于此值的预测框会被丢弃,unclip_ratio是输出框扩大的比例。
+
+
+## 其它文档
+- [PP-OCRv3 微信小程序部署文档](https://github.com/PaddlePaddle/FastDeploy/tree/develop/examples/application/js/mini_program)