app.py 2.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
import gradio as gr
import base64
from io import BytesIO
from PIL import Image
import numpy as np

from pipeline.pipeline import pp_humanv2


# UGC: Define the inference fn() for your models
def model_inference(input_date, avtivity_list):
12 13 14
    if 'do_entrance_counting'in avtivity_list or 'draw_center_traj' in avtivity_list:
        if 'MOT' not in avtivity_list:
            avtivity_list.append('MOT')
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
    result = pp_humanv2(input_date, avtivity_list)

    return result


def clear_all():
    return None, None, None


with gr.Blocks() as demo:
    gr.Markdown("PP-Human Pipeline")

    with gr.Tabs():

        with gr.TabItem("image"):

31
            img_in = gr.Image(value="https://paddledet.bj.bcebos.com/modelcenter/images/PP-Human/human_attr.jpg",label="Input")
32 33 34 35 36 37 38 39
            img_out = gr.Image(label="Output")

            img_avtivity_list = gr.CheckboxGroup(["ATTR"])
            img_button1 = gr.Button("Submit")
            img_button2 = gr.Button("Clear")

        with gr.TabItem("video"):

40
            video_in = gr.Video(value="https://paddledet.bj.bcebos.com/modelcenter/images/PP-Human/human_attr.mp4",label="Input")
41 42 43
            video_out = gr.Video(label="Output")

            video_avtivity_list = gr.CheckboxGroup(["MOT","ATTR","VIDEO_ACTION","SKELETON_ACTION","ID_BASED_DETACTION","ID_BASED_CLSACTION","REID",\
44
                                                    "do_entrance_counting","draw_center_traj"],label="Task Choice (note: only one task should be checked)")
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
            video_button1 = gr.Button("Submit")
            video_button2 = gr.Button("Clear")

    img_button1.click(
        fn=model_inference,
        inputs=[img_in, img_avtivity_list],
        outputs=img_out)
    img_button2.click(
        fn=clear_all,
        inputs=None,
        outputs=[img_in, img_out, img_avtivity_list])

    video_button1.click(
        fn=model_inference,
        inputs=[video_in, video_avtivity_list],
        outputs=video_out)
    video_button2.click(
        fn=clear_all,
        inputs=None,
        outputs=[video_in, video_out, video_avtivity_list])

demo.launch()