未验证 提交 96f4c9c2 编写于 作者: C chenjian 提交者: GitHub

Bump vdl to v2.5.2 (#1250)

* [New Feature] Add paddle2onnx component (#1228)

* add paddle2onnx component

* add comments

* fix

* supplement failure judgement

* fix format

* fix format

* [Frontend] Add model convertion frontend (#1232)

* add paddle2onnx component

* add comments

* fix

* onnx2paddle

* filetype

* filetype2

* filetype3

* filetype4

* filetype5

* filetype5

* filetype6

* filetype7

* filetype7

* filetype8

* filetype8

* filetype8

* filetype8

* filetype9

* filetype10

* filetype11

* filetype12

* filetype13

* filetype14

* filetype15

* filetype16

* filetype17

* filetype18

* add animation for converting

* add animation for downloading

* remove unneccessary file

* optimize logic

* add text

* update

---------
Co-authored-by: Nxiaoyixin-cmd <1634228212@qq.com>

* [Bug] Fix export graph bug in windows (#1244)

* fix export graph bug in windows

* fix format

* fix coompatible pproblem for python3.10 and gradio (#1249)

* bump visualdl to v2.5.2

---------
Co-authored-by: Nxiaoyixin-cmd <1634228212@qq.com>
上级 e41769d5
...@@ -4,6 +4,29 @@ VisualDL的更新记录在此查看。 ...@@ -4,6 +4,29 @@ VisualDL的更新记录在此查看。
This is the Changelog for the VisualDL 2.0 project. This is the Changelog for the VisualDL 2.0 project.
## v2.5.2 - 2023-05-04
**ZH**
- 功能新增
- **BE**: 增加Paddle2Onnx模型转换组件 (#1228)
- **FE**: 增加模型转换界面 (#1232)
- 问题修复
- **BE**: 修复windows动态图导出bug(#1244)
- **BE**: 修复兼容性问题 (#1249)
**EN**
- Features
- **BE**: Add Paddle2Onnx component (#1228)
- **FE**: Add model convertion page (#1232)
- Bug Fixes
- **BE**: Fix bug for exporting dynamic graph in Windows(#1244)
- **BE**: Fix compatible problem (#1249)
## v2.5.1 - 2023-02-20 ## v2.5.1 - 2023-02-20
**ZH** **ZH**
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
"error": "Error occurred", "error": "Error occurred",
"graph": "Graphs", "graph": "Graphs",
"dynamic_graph": "dynamic", "dynamic_graph": "dynamic",
"ToggleGraph": "X2Paddle", "x2paddle": "Model Convertion",
"static_graph": "static", "static_graph": "static",
"high-dimensional": "High Dimensional", "high-dimensional": "High Dimensional",
"profiler": "performance analysis", "profiler": "performance analysis",
......
...@@ -8,5 +8,26 @@ ...@@ -8,5 +8,26 @@
"warin-info4": "The model has been converted, please do not click again", "warin-info4": "The model has been converted, please do not click again",
"warin-info5": "Please upload the model file and convert it", "warin-info5": "Please upload the model file and convert it",
"warin-info6": "Model file has been converted, please do not click again", "warin-info6": "Model file has been converted, please do not click again",
"warin-info7": "Please upload the model file" "warin-info7": "Please upload the model file",
"Conversion": "Conversion",
"pdmodels": "pdmodels",
"pdiparams": "pdiparams",
"model": "model",
"opset_version": "opset_version",
"deploy_backend": "deploy_backend",
"lite_valid_places": "lite_valid_places",
"lite_model_type": "lite_model_type",
"convert_to_lite": "convert_to_lite",
"onnx_model": "onnx model",
"Download": "Download",
"Reload": "Reload",
"View": "View",
"Please": "Please select the file",
"isRequire": "This item is required",
"isYes": "Yes",
"isNo": "No",
"Paddle2OnnxTitle": "Paddle2Onnx model conversion configuration",
"Onnx2PaddleTitle": "Onnx2Paddle model conversion configuration",
"converting": "Converting now, please wait",
"downloading": "Downloading now, please wait"
} }
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
"error": "发生错误", "error": "发生错误",
"graph": "网络结构", "graph": "网络结构",
"dynamic_graph": "动态", "dynamic_graph": "动态",
"ToggleGraph": "X2Paddle", "x2paddle": "模型转换",
"static_graph": "静态", "static_graph": "静态",
"high-dimensional": "数据降维", "high-dimensional": "数据降维",
"profiler": "性能分析", "profiler": "性能分析",
......
...@@ -8,5 +8,26 @@ ...@@ -8,5 +8,26 @@
"warin-info4": "模型已转换,请勿再次点击", "warin-info4": "模型已转换,请勿再次点击",
"warin-info5": "请上传模型文件并转换", "warin-info5": "请上传模型文件并转换",
"warin-info6": "模型文件已转换,请勿再次点击", "warin-info6": "模型文件已转换,请勿再次点击",
"warin-info7": "请上传模型文件" "warin-info7": "请上传模型文件",
"Conversion": "转换",
"pdmodels": "模型结构文件",
"pdiparams": "模型参数文件",
"model": "模型",
"opset_version": "op集合版本",
"deploy_backend": "部署后端类型",
"lite_valid_places": "Lite后端类型",
"lite_model_type": "Lite模型类型",
"convert_to_lite": "是否转成Paddle-Lite支持格式",
"onnx_model": "onnx模型",
"Download": "下载",
"Reload": "重新载入",
"View": "视图",
"Please": "请上传模型文件",
"isRequire": "该项为必填项",
"isYes": "是",
"isNo": "否",
"Paddle2OnnxTitle": "Paddle2Onnx模型转换配置",
"Onnx2PaddleTitle": "Onnx2Paddle模型转换配置",
"converting": "转换中,请稍等片刻",
"downloading": "文件下载中,请稍等片刻"
} }
/**
* Copyright 2020 Baidu Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import React, {FunctionComponent} from 'react';
import {WithStyled, contentHeight, contentMargin, headerHeight, position, transitionProps} from '~/utils/style';
import BodyLoading from '~/components/BodyLoading';
import styled from 'styled-components';
const Section = styled.section`
display: flex;
font-family: PingFangSC-Regular;
aside {
position: static;
height: auto;
}
`;
const Article = styled.article`
flex: auto;
min-width: 0;
margin: ${contentMargin};
min-height: ${contentHeight};
`;
const Aside = styled.aside`
flex: none;
background-color: var(--background-color);
height: ${`calc(100vh - ${headerHeight})`};
${position('sticky', headerHeight, 0, null, null)}
width:18.571428571428573rem;
overflow-x: hidden;
overflow-y: auto;
${transitionProps('background-color')}
`;
const ProfilerAside = styled.aside`
flex: none;
background-color: var(--background-color);
height: auto;
position: static;
overflow-x: hidden;
overflow-y: auto;
${transitionProps('background-color')}
`;
type ContentProps = {
aside?: React.ReactNode;
leftAside?: React.ReactNode;
loading?: boolean;
isProfiler?: boolean;
show?: boolean;
nodeShow?: boolean;
};
const Content: FunctionComponent<ContentProps & WithStyled> = ({
children,
aside,
leftAside,
loading,
className,
isProfiler,
show,
nodeShow
}) => (
<Section className={className}>
{leftAside && <Aside>{leftAside}</Aside>}
<Article>{children}</Article>
{aside && isProfiler ? (
<ProfilerAside>{aside}</ProfilerAside>
) : (
// `${`calc(100vh - ${headerHeight})`}`
<Aside
style={{
display: aside ? 'inline-block' : 'none',
height: aside
? nodeShow
? 'auto'
: `${`calc(100vh - 13.28571rem)`}`
: show
? nodeShow
? 'auto'
: `${`calc(100vh - 13.28571rem)`}`
: '0px',
position: show ? 'relative' : 'absolute',
top: '0px'
// height: '0px',
// 此时处于分屏且不选中的情况
// width: '260px',
}}
/* display: inline-block; */
// height: calc(100vh - 13.2857rem);
// position: relative;
// top: 0px;
// height: 0px;
// width: 260px;
>
{aside}
</Aside>
)}
{loading && <BodyLoading />}
</Section>
);
export default Content;
...@@ -71,6 +71,7 @@ const Content = styled.div` ...@@ -71,6 +71,7 @@ const Content = styled.div`
> iframe { > iframe {
${size('100%', '100%')} ${size('100%', '100%')}
// ${size('50%', '100%')}
border: none; border: none;
} }
......
...@@ -23,6 +23,8 @@ import ChartToolbox from '~/components/ChartToolbox'; ...@@ -23,6 +23,8 @@ import ChartToolbox from '~/components/ChartToolbox';
import HashLoader from 'react-spinners/HashLoader'; import HashLoader from 'react-spinners/HashLoader';
import logo from '~/assets/images/netron.png'; import logo from '~/assets/images/netron.png';
import netron2 from '@visualdl/netron2'; import netron2 from '@visualdl/netron2';
import netron from '@visualdl/netron';
import styled from 'styled-components'; import styled from 'styled-components';
import {toast} from 'react-toastify'; import {toast} from 'react-toastify';
import useTheme from '~/hooks/useTheme'; import useTheme from '~/hooks/useTheme';
...@@ -72,6 +74,7 @@ const Content = styled.div` ...@@ -72,6 +74,7 @@ const Content = styled.div`
height: calc(100% - ${toolboxHeight}); height: calc(100% - ${toolboxHeight});
> iframe { > iframe {
// ${size('50%', '100%')}
${size('100%', '100%')} ${size('100%', '100%')}
border: none; border: none;
} }
...@@ -300,9 +303,17 @@ const Graph = React.forwardRef<GraphRef, GraphProps>( ...@@ -300,9 +303,17 @@ const Graph = React.forwardRef<GraphRef, GraphProps>(
tooltipPlacement="bottom" tooltipPlacement="bottom"
/> />
<Content> <Content>
{/* <iframe
// ref={iframe}
src={PUBLIC_PATH + netron2}
frameBorder={0}
scrolling="no"
marginWidth={0}
marginHeight={0}
></iframe> */}
<iframe <iframe
ref={iframe} ref={iframe}
src={PUBLIC_PATH + netron2} src={PUBLIC_PATH + netron}
frameBorder={0} frameBorder={0}
scrolling="no" scrolling="no"
marginWidth={0} marginWidth={0}
......
/**
* Copyright 2020 Baidu Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import type {Documentation, OpenedResult, Properties, SearchItem, SearchResult} from '~/resource/graph/types';
import React, {useCallback, useEffect, useImperativeHandle, useMemo, useRef, useState} from 'react';
import {contentHeight, position, primaryColor, rem, size, transitionProps} from '~/utils/style';
import ChartToolbox from '~/components/ChartToolbox';
import HashLoader from 'react-spinners/HashLoader';
import logo from '~/assets/images/netron.png';
import netron from '@visualdl/netron';
import netron2 from '@visualdl/netron2';
import styled from 'styled-components';
import {toast} from 'react-toastify';
import useTheme from '~/hooks/useTheme';
import {useTranslation} from 'react-i18next';
const PUBLIC_PATH: string = import.meta.env.SNOWPACK_PUBLIC_PATH;
let IFRAME_HOST = `${window.location.protocol}//${window.location.host}`;
if (PUBLIC_PATH.startsWith('http')) {
const url = new URL(PUBLIC_PATH);
IFRAME_HOST = `${url.protocol}//${url.host}`;
}
const toolboxHeight = rem(40);
const Wrapper = styled.div`
position: relative;
height: ${contentHeight};
background-color: var(--background-color);
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
${transitionProps('background-color')}
`;
const RenderContent = styled.div<{show: boolean}>`
position: absolute;
top: 0;
left: 0;
${size('100%', '100%')}
opacity: ${props => (props.show ? 1 : 0)};
z-index: ${props => (props.show ? 0 : -1)};
pointer-events: ${props => (props.show ? 'auto' : 'none')};
`;
const Toolbox = styled(ChartToolbox)`
height: ${toolboxHeight};
border-bottom: 1px solid var(--border-color);
padding: 0 ${rem(20)};
${transitionProps('border-color')}
`;
const Content = styled.div`
position: relative;
height: calc(100% - ${toolboxHeight});
> iframe {
${size('100%', '100%')}
// ${size('50%', '100%')}
border: none;
}
> .powered-by {
display: block;
${position('absolute', null, null, rem(20), rem(30))}
color: var(--graph-copyright-color);
font-size: ${rem(14)};
user-select: none;
img {
height: 1em;
filter: var(--graph-copyright-logo-filter);
vertical-align: middle;
}
}
`;
const Loading = styled.div`
${size('100%', '100%')}
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
overscroll-behavior: none;
cursor: progress;
font-size: ${rem(16)};
line-height: ${rem(60)};
`;
export type GraphRef = {
export(type: 'svg' | 'png'): void;
changeGraph(name: string): void;
search(value: string): void;
select(item: SearchItem): void;
showModelProperties(): void;
showNodeDocumentation(data: Properties): void;
};
type GraphProps = {
files: FileList | File[] | null;
uploader: JSX.Element;
showAttributes: boolean;
showInitializers: boolean;
showNames: boolean;
horizontal: boolean;
onRendered?: (flag: boolean) => unknown;
onOpened?: (data: OpenedResult) => unknown;
onSearch?: (data: SearchResult) => unknown;
onShowModelProperties?: (data: Properties) => unknown;
onShowNodeProperties?: (data: Properties) => unknown;
onShowNodeDocumentation?: (data: Documentation) => unknown;
};
const Graph = React.forwardRef<GraphRef, GraphProps>(
(
{
files,
uploader,
showAttributes,
showInitializers,
showNames,
horizontal,
onRendered,
onOpened,
onSearch,
onShowModelProperties,
onShowNodeProperties,
onShowNodeDocumentation
},
ref
) => {
const {t} = useTranslation('graph');
const theme = useTheme();
const [ready, setReady] = useState(false);
const [loading, setLoading] = useState(false);
const [rendered, setRendered] = useState(false);
const iframe = useRef<HTMLIFrameElement>(null);
const handler = useCallback(
(event: MessageEvent) => {
if (event.data) {
const {type, data} = event.data;
switch (type) {
case 'status':
switch (data) {
case 'ready':
return setReady(true);
case 'loading':
return setLoading(true);
case 'rendered':
setLoading(false);
setRendered(true);
// debugger;
onRendered?.(true);
return;
}
return;
case 'opened':
return onOpened?.(data);
case 'search':
return onSearch?.(data);
case 'cancel':
return setLoading(false);
case 'error':
toast.error(data);
setLoading(false);
return;
case 'show-model-properties':
return onShowModelProperties?.(data);
case 'show-node-properties':
return onShowNodeProperties?.(data);
case 'show-node-documentation':
return onShowNodeDocumentation?.(data);
}
}
},
[onRendered, onOpened, onSearch, onShowModelProperties, onShowNodeProperties, onShowNodeDocumentation]
);
const dispatch = useCallback((type: string, data?: unknown) => {
iframe.current?.contentWindow?.postMessage(
{
type,
data
},
IFRAME_HOST
);
}, []);
useEffect(() => {
window.addEventListener('message', handler);
dispatch('ready');
return () => {
window.removeEventListener('message', handler);
};
}, [handler, dispatch]);
useEffect(() => {
console.log('GraphStaticss', files, ready);
(ready && dispatch('change-files', files)) || undefined;
}, [dispatch, files, ready]);
useEffect(
() => (ready && dispatch('toggle-attributes', showAttributes)) || undefined,
[dispatch, showAttributes, ready]
);
useEffect(
() => (ready && dispatch('toggle-initializers', showInitializers)) || undefined,
[dispatch, showInitializers, ready]
);
useEffect(() => (ready && dispatch('toggle-names', showNames)) || undefined, [dispatch, showNames, ready]);
useEffect(
() => (ready && dispatch('toggle-direction', horizontal)) || undefined,
[dispatch, horizontal, ready]
);
useEffect(() => (ready && dispatch('toggle-theme', theme)) || undefined, [dispatch, theme, ready]);
useImperativeHandle(ref, () => ({
export(type) {
dispatch('export', type);
},
changeGraph(name) {
dispatch('change-graph', name);
},
search(value) {
dispatch('search', value);
},
select(item) {
dispatch('select', item);
},
showModelProperties() {
dispatch('show-model-properties');
},
showNodeDocumentation(data) {
dispatch('show-node-documentation', data);
}
}));
const content = useMemo(() => {
if (!ready || loading) {
return (
<Loading>
<HashLoader size="60px" color={primaryColor} />
</Loading>
);
}
if (!files) {
// debugger;
return uploader;
}
if (ready && !rendered) {
// debugger;
return uploader;
}
return null;
}, [ready, loading, rendered, uploader, files]);
const shows = !loading && rendered && files;
return (
<Wrapper>
{content}
<RenderContent show={shows ? true : false}>
<Toolbox
items={[
{
icon: 'zoom-in',
tooltip: t('graph:zoom-in'),
onClick: () => dispatch('zoom-in')
},
{
icon: 'zoom-out',
tooltip: t('graph:zoom-out'),
onClick: () => dispatch('zoom-out')
},
{
icon: 'restore-size',
tooltip: t('graph:restore-size'),
onClick: () => dispatch('zoom-reset')
}
]}
reversed
tooltipPlacement="bottom"
/>
<Content>
{/* <iframe
ref={iframe}
src={PUBLIC_PATH + netron}
frameBorder={0}
scrolling="no"
marginWidth={0}
marginHeight={0}
></iframe> */}
<iframe
ref={iframe}
src={PUBLIC_PATH + netron}
frameBorder={0}
scrolling="no"
marginWidth={0}
marginHeight={0}
></iframe>
<a
className="powered-by"
href="https://github.com/lutzroeder/netron"
target="_blank"
rel="noreferrer"
>
Powered by <img src={PUBLIC_PATH + logo} alt="netron" />
</a>
</Content>
</RenderContent>
</Wrapper>
);
}
);
Graph.displayName = 'Graph';
export default Graph;
/* eslint-disable react-hooks/rules-of-hooks */
import React, {useState} from 'react';
import {Form, Input, Radio, Select} from 'antd';
import type {UploadProps} from 'antd';
import Buttons from '~/components/Button';
import {axios_fetcher} from '~/utils/fetch';
import {message} from 'antd';
import {useTranslation} from 'react-i18next';
import {Progress} from 'antd';
const {Option} = Select;
export default function xpaddleUploader(props: any) {
const [form] = Form.useForm();
const {t} = useTranslation(['togglegraph', 'common']);
const formLayout: any = {labelCol: {span: 4}, wrapperCol: {span: 14}};
const [convertProcess, setConvertProgress] = useState(0);
const [convertProcessFlag, setconvertProcessFlag] = useState(false);
const Uploadprops: UploadProps = {
name: 'file',
action: '',
headers: {
authorization: 'authorization-text'
},
onChange(info) {
// debugger;
if (info.file.status !== 'uploading') {
console.log(info.file, info.fileList);
}
if (info.file.status === 'done') {
message.success(`${info.file.name} file uploaded successfully`);
} else if (info.file.status === 'error') {
message.error(`${info.file.name} file upload failed.`);
}
}
};
const LiteBackend = [
'arm',
'opencl',
'x86',
'metal',
'xpu',
'bm',
'mlu',
'intel_fpga',
'huawei_ascend_npu',
'imagination_nna',
'rockchip_npu',
'mediatek_apu',
'huawei_kirin_npu',
'amlogic_npu'
];
const lite_model_type = ['protobuf', 'naive_buffer'];
const base64UrlToFile = (base64Url: any, filename: any) => {
// const arr = base64Url.split(',');
// const mime = arr[0].match(/:(.*?);/)[1];
const bstr = atob(base64Url);
let n = bstr.length;
const u8arr = new Uint8Array(n);
while (n--) {
u8arr[n] = bstr.charCodeAt(n);
}
return new File([u8arr], filename);
};
const submodel = async () => {
props.changeLoading(true);
const values = await form.validateFields();
const formData = new FormData();
const onnx_file_component = document.getElementById('upload_onnx_model_file') as HTMLInputElement;
const onnx_file = onnx_file_component!.files![0];
formData.append('convert_to_lite', values.convertToLite);
formData.append('model', onnx_file);
formData.append('lite_valid_places', values.liteValidPlaces);
formData.append('lite_model_type:', values.liteModelType);
axios_fetcher(
`/inference/onnx2paddle/convert`,
{
method: 'POST',
body: formData
},
{
onDownloadProgress: function (axiosProgressEvent: any) {
setConvertProgress(Math.round(axiosProgressEvent.progress! * 100));
setconvertProcessFlag(true);
}
}
)
.then(
(res: any) => {
const files2 = base64UrlToFile(res.model, 'model.pdmodel');
props.setFiles([onnx_file]);
props.changeFiles2([files2]);
const current_date = new Date();
const filename = `${current_date.getFullYear()}_${current_date.getMonth()}_${current_date.getDay()}_${current_date.getHours()}_${current_date.getMinutes()}_${current_date.getSeconds()}_paddlemodel.tar`;
props.downloadEvent(res['request_id'], filename);
},
res => {
props.changeLoading(false);
console.log(res);
}
)
.finally(() => {
setconvertProcessFlag(false);
});
};
return (
<div>
<div
style={{
textAlign: 'center',
margin: '40px',
fontSize: '26px'
}}
>
{t('togglegraph:Onnx2PaddleTitle')}
</div>
<Form layout={formLayout} form={form} initialValues={{layout: formLayout}} style={{maxWidth: 600}}>
<Form.Item
label={t('togglegraph:model')}
name="model"
rules={[{required: true, message: t('isRequire')}]}
>
<Input type="file" id="upload_onnx_model_file" accept=".onnx" />
</Form.Item>
<Form.Item
name="convertToLite"
label={t('togglegraph:convert_to_lite')}
rules={[{required: true, message: t('isRequire')}]}
initialValue="no"
>
<Radio.Group>
<Radio value="yes">{t('togglegraph:isYes')}</Radio>
<Radio value="no">{t('togglegraph:isNo')}</Radio>
</Radio.Group>
</Form.Item>
<Form.Item
label={t('togglegraph:lite_valid_places')}
name="liteValidPlaces"
rules={[{required: false}]}
initialValue="arm"
>
<Select placeholder="Please select a lite place">
{LiteBackend.map((item: string) => {
return (
<Option value={item} key={item}>
{item}
</Option>
);
})}
</Select>
</Form.Item>
<Form.Item
label={t('togglegraph:lite_model_type')}
name="liteModelType"
rules={[{required: false}]}
initialValue="naive_buffer"
>
<Select placeholder="Please select a lite model type">
{lite_model_type.map((item: string) => {
return (
<Option value={item} key={item}>
{item}
</Option>
);
})}
</Select>
</Form.Item>
{/* <Form.Item>
<Button type="primary">Submit</Button>
</Form.Item> */}
</Form>
<div
style={{
textAlign: 'center'
}}
>
<Buttons
onClick={() => {
setConvertProgress(0);
setconvertProcessFlag(true);
submodel();
}}
>
{t('Conversion')}
</Buttons>
{convertProcessFlag ? <Progress type="circle" percent={convertProcess} /> : null}
{convertProcessFlag ? <h1> {t('togglegraph:converting')} </h1> : null}
</div>
</div>
);
}
/* eslint-disable react-hooks/rules-of-hooks */
/* eslint-disable prettier/prettier */
import React, {useState} from 'react';
import {Form, Input, Radio, Select} from 'antd';
import {UploadOutlined} from '@ant-design/icons';
import type {UploadProps} from 'antd';
import Buttons from '~/components/Button';
import {message, Upload, Button} from 'antd';
import {fetcher, axios_fetcher} from '~/utils/fetch';
import {useTranslation} from 'react-i18next';
import {Progress, Space} from 'antd';
const {Option} = Select;
export default function xpaddleUploader(props: any) {
const [form] = Form.useForm();
const formLayout: any = {labelCol: {span: 4}, wrapperCol: {span: 14}};
const {t} = useTranslation(['togglegraph', 'common']);
const [convertProcess, setConvertProgress] = useState(0);
const [convertProcessFlag, setconvertProcessFlag] = useState(false);
const Uploadprops: UploadProps = {
name: 'file',
action: '',
headers: {
authorization: 'authorization-text'
},
onChange(info) {
// debugger;
if (info.file.status !== 'uploading') {
console.log(info.file, info.fileList);
}
if (info.file.status === 'done') {
message.success(`${info.file.name} file uploaded successfully`);
} else if (info.file.status === 'error') {
message.error(`${info.file.name} file upload failed.`);
}
}
};
const LiteBackend = [7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
const lite_model_type = ['onnxruntime', 'tensorrt', 'others'];
const base64UrlToFile = (base64Url: any, filename: any) => {
// const arr = base64Url.split(',');
// const mime = arr[0].match(/:(.*?);/)[1];
const bstr = atob(base64Url);
let n = bstr.length;
const u8arr = new Uint8Array(n);
while (n--) {
u8arr[n] = bstr.charCodeAt(n);
}
return new File([u8arr], filename);
};
const submodel = async () => {
props.changeLoading(true);
const values = await form.validateFields();
// debugger;
const formData = new FormData();
const model_file_component = document.getElementById('upload_pd_model_file') as HTMLInputElement;
const model_file = model_file_component!.files![0];
const param_file_component = document.getElementById('upload_pd_param_file') as HTMLInputElement;
const param_file = param_file_component!.files![0];
formData.append('model', model_file);
formData.append('param', param_file);
formData.append('opset_version', values['opset_version']);
formData.append('deploy_backend:', values['deployBackend']);
axios_fetcher(
`/inference/paddle2onnx/convert`,
{
method: 'POST',
body: formData
},
{
onDownloadProgress: function (axiosProgressEvent: any) {
setConvertProgress(Math.round(axiosProgressEvent.progress! * 100));
setconvertProcessFlag(true);
}
}
)
.then(
(res: any) => {
const files2 = base64UrlToFile(res.model, 'model.onnx');
props.setFiles([model_file]);
props.changeFiles2([files2]);
const current_date = new Date();
const filename = `${current_date.getFullYear()}_${current_date.getMonth()}_${current_date.getDay()}_${current_date.getHours()}_${current_date.getMinutes()}_${current_date.getSeconds()}_onnxmodel.onnx`;
props.downloadEvent(res['request_id'], filename);
},
res => {
// debugger;
props.changeLoading(false);
console.log(res);
}
)
.finally(() => {
setconvertProcessFlag(false);
});
};
return (
<div>
<div
style={{
textAlign: 'center',
margin: '40px',
fontSize: '26px'
}}
>
{t('togglegraph:Paddle2OnnxTitle')}
</div>
<Form
// {...formItemLayout}
layout={formLayout}
form={form}
initialValues={{layout: formLayout}}
style={{maxWidth: 600}}
>
<Form.Item
label={t('togglegraph:pdmodels')}
name="model"
rules={[{required: true, message: t('isRequire')}]}
>
<Input type="file" id="upload_pd_model_file" accept=".pdmodel" />
</Form.Item>
<Form.Item
label={t('togglegraph:pdiparams')}
name="param"
rules={[{required: true, message: t('isRequire')}]}
>
<Input type="file" id="upload_pd_param_file" accept=".pdiparams" />
</Form.Item>
<Form.Item
label={t('togglegraph:opset_version')}
name="opset_version"
rules={[{required: false}]}
initialValue="11"
>
<Select placeholder="Please select a version">
{LiteBackend.map((item: number) => {
return (
<Option value={item} key={item}>
{item}
</Option>
);
})}
</Select>
</Form.Item>
<Form.Item
label={t('togglegraph:deploy_backend')}
name="deployBackend"
rules={[{required: false}]}
initialValue="onnxruntime"
>
<Select placeholder="Please select a version">
{lite_model_type.map((item: string) => {
return (
<Option value={item} key={item}>
{item}
</Option>
);
})}
</Select>
</Form.Item>
</Form>
<div
style={{
textAlign: 'center'
}}
>
<Buttons
onClick={() => {
setConvertProgress(0);
setconvertProcessFlag(true);
submodel();
}}
>
{t('togglegraph:Conversion')}
</Buttons>
{convertProcessFlag ? (
<Progress type="circle" className="processCircle" percent={convertProcess} />
) : null}
{convertProcessFlag ? <h1> {t('togglegraph:converting')} </h1> : null}
</div>
</div>
);
}
.progressCircle{
align-items: center;
}
\ No newline at end of file
...@@ -27,7 +27,7 @@ import {useDispatch} from 'react-redux'; ...@@ -27,7 +27,7 @@ import {useDispatch} from 'react-redux';
import type {BlobResponse} from '~/utils/fetch'; import type {BlobResponse} from '~/utils/fetch';
import Button from '~/components/Button'; import Button from '~/components/Button';
import Checkbox from '~/components/Checkbox'; import Checkbox from '~/components/Checkbox';
import Content from '~/components/Content'; import Content from '~/components/ContentXpaddle';
import Field from '~/components/Field'; import Field from '~/components/Field';
import HashLoader from 'react-spinners/HashLoader'; import HashLoader from 'react-spinners/HashLoader';
import ModelPropertiesDialog from '~/components/GraphPage/ModelPropertiesDialog'; import ModelPropertiesDialog from '~/components/GraphPage/ModelPropertiesDialog';
...@@ -110,6 +110,7 @@ const Graph = React.forwardRef<pageRef, GraphProps>(({changeRendered, show = tru ...@@ -110,6 +110,7 @@ const Graph = React.forwardRef<pageRef, GraphProps>(({changeRendered, show = tru
const {loading} = useRequest<BlobResponse>(files ? null : '/graph/graph'); const {loading} = useRequest<BlobResponse>(files ? null : '/graph/graph');
const setModelFile = useCallback( const setModelFile = useCallback(
(f: FileList | File[]) => { (f: FileList | File[]) => {
// debugger;
storeDispatch(actions.graph.setModel(f)); storeDispatch(actions.graph.setModel(f));
setFiles(f); setFiles(f);
}, },
...@@ -224,7 +225,6 @@ const Graph = React.forwardRef<pageRef, GraphProps>(({changeRendered, show = tru ...@@ -224,7 +225,6 @@ const Graph = React.forwardRef<pageRef, GraphProps>(({changeRendered, show = tru
</Aside> </Aside>
); );
} }
console.log('nodeData && renderedflag3', nodeData, renderedflag3);
if (nodeData && renderedflag3) { if (nodeData && renderedflag3) {
return ( return (
...@@ -290,6 +290,7 @@ const Graph = React.forwardRef<pageRef, GraphProps>(({changeRendered, show = tru ...@@ -290,6 +290,7 @@ const Graph = React.forwardRef<pageRef, GraphProps>(({changeRendered, show = tru
</RadioGroup> </RadioGroup>
</Field> </Field>
</AsideSection> </AsideSection>
<AsideSection> <AsideSection>
<Field label={t('graph:export-file')}> <Field label={t('graph:export-file')}>
<ExportButtonWrapper> <ExportButtonWrapper>
...@@ -332,12 +333,13 @@ const Graph = React.forwardRef<pageRef, GraphProps>(({changeRendered, show = tru ...@@ -332,12 +333,13 @@ const Graph = React.forwardRef<pageRef, GraphProps>(({changeRendered, show = tru
() => <Uploader onClickUpload={onClickFile} onDropFiles={setModelFile} />, () => <Uploader onClickUpload={onClickFile} onDropFiles={setModelFile} />,
[onClickFile, setModelFile] [onClickFile, setModelFile]
); );
// const flags = false;
const flags = files && show;
return ( return (
<> <>
<Title>{t('common:graph')}</Title> <Title>{t('common:graph')}</Title>
<ModelPropertiesDialog data={modelData} onClose={() => setModelData(null)} /> <ModelPropertiesDialog data={modelData} onClose={() => setModelData(null)} />
<Content aside={aside}> <Content show={show} aside={flags ? aside : null}>
{loading ? ( {loading ? (
<Loading> <Loading>
<HashLoader size="60px" color={primaryColor} /> <HashLoader size="60px" color={primaryColor} />
......
...@@ -18,8 +18,10 @@ import type {TFunction} from 'i18next'; ...@@ -18,8 +18,10 @@ import type {TFunction} from 'i18next';
import i18next from 'i18next'; import i18next from 'i18next';
import queryString from 'query-string'; import queryString from 'query-string';
import {toast} from 'react-toastify'; import {toast} from 'react-toastify';
import axios from 'axios';
const API_TOKEN_KEY: string = import.meta.env.SNOWPACK_PUBLIC_API_TOKEN_KEY; const API_TOKEN_KEY: string = import.meta.env.SNOWPACK_PUBLIC_API_TOKEN_KEY;
const API_URL: string = import.meta.env.SNOWPACK_PUBLIC_API_URL; export const API_URL: string = import.meta.env.SNOWPACK_PUBLIC_API_URL;
console.log('API_URL', API_TOKEN_KEY); console.log('API_URL', API_TOKEN_KEY);
const API_TOKEN_HEADER = 'X-VisualDL-Instance-ID'; const API_TOKEN_HEADER = 'X-VisualDL-Instance-ID';
...@@ -89,7 +91,6 @@ export async function fetcher<T = unknown>(url: string, options?: RequestInit): ...@@ -89,7 +91,6 @@ export async function fetcher<T = unknown>(url: string, options?: RequestInit):
// res = await fetch('http://10.181.196.14:8040/app/api/deploy/convert?format=onnx', addApiToken(options)); // res = await fetch('http://10.181.196.14:8040/app/api/deploy/convert?format=onnx', addApiToken(options));
res = await fetch(API_URL + url, addApiToken(options)); res = await fetch(API_URL + url, addApiToken(options));
console.log('ressponse', res);
} catch (e) { } catch (e) {
const t = await logErrorAndReturnT(e); const t = await logErrorAndReturnT(e);
throw new Error(t('errors:network-error')); throw new Error(t('errors:network-error'));
...@@ -131,7 +132,6 @@ export async function fetcher<T = unknown>(url: string, options?: RequestInit): ...@@ -131,7 +132,6 @@ export async function fetcher<T = unknown>(url: string, options?: RequestInit):
} else { } else {
let data: Blob; let data: Blob;
try { try {
console.log('datas', res);
data = await res.blob(); data = await res.blob();
} catch (e) { } catch (e) {
const t = await logErrorAndReturnT(e); const t = await logErrorAndReturnT(e);
...@@ -140,7 +140,6 @@ export async function fetcher<T = unknown>(url: string, options?: RequestInit): ...@@ -140,7 +140,6 @@ export async function fetcher<T = unknown>(url: string, options?: RequestInit):
const disposition = res.headers.get('Content-Disposition'); const disposition = res.headers.get('Content-Disposition');
// support safari // support safari
if (!data.arrayBuffer) { if (!data.arrayBuffer) {
console.log('arrayBuffer', data);
data.arrayBuffer = async () => data.arrayBuffer = async () =>
new Promise<ArrayBuffer>((resolve, reject) => { new Promise<ArrayBuffer>((resolve, reject) => {
const fileReader = new FileReader(); const fileReader = new FileReader();
...@@ -150,7 +149,6 @@ export async function fetcher<T = unknown>(url: string, options?: RequestInit): ...@@ -150,7 +149,6 @@ export async function fetcher<T = unknown>(url: string, options?: RequestInit):
fileReader.readAsArrayBuffer(data); fileReader.readAsArrayBuffer(data);
}); });
} }
console.log('datas', data);
let filename: string | null = null; let filename: string | null = null;
if (disposition && disposition.indexOf('attachment') !== -1) { if (disposition && disposition.indexOf('attachment') !== -1) {
const matches = /filename[^;=\n]*=((['"]).*?\2|[^;\n]*)/.exec(disposition); const matches = /filename[^;=\n]*=((['"]).*?\2|[^;\n]*)/.exec(disposition);
...@@ -162,6 +160,63 @@ export async function fetcher<T = unknown>(url: string, options?: RequestInit): ...@@ -162,6 +160,63 @@ export async function fetcher<T = unknown>(url: string, options?: RequestInit):
} }
} }
export async function axios_fetcher<T = unknown>(url: string, options?: RequestInit, config?: object): Promise<BlobResponse | string | T> {
let res: any;
try {
if (options!.method==="POST"){
res = await axios.post(API_URL + url, options!.body, config);
} else if(options!.method==="GET"){
res = await axios.get(API_URL + url, config);
}else{
res = await axios(API_URL + url);
}
} catch (e) {
const t = await logErrorAndReturnT(e);
throw new Error(t('errors:network-error'));
}
const contentType = res.headers.get('content-type') ?? '';
if (contentType.includes('application/json')) {
let response: Data<T> | T;
try {
response = res.data;
} catch (e) {
const t = await logErrorAndReturnT(e);
throw new Error(t('errors:parse-error'));
}
if (response && 'status' in response) {
if (response.status !== 0) {
const t = await logErrorAndReturnT(response);
toast.error((response as ErrorData).msg);
throw new Error((response as ErrorData).msg || t('errors:error'));
} else {
return (response as SuccessData<T>).data;
}
}
return response;
} else if (contentType.startsWith('text/')) {
let response: string;
try {
response = res.data;
} catch (e) {
const t = await logErrorAndReturnT(e);
throw new Error(t('errors:parse-error'));
}
return response;
} else {
let data: any;
data = res.data;
let filename: string | null = null;
const disposition = res.headers.get('Content-Disposition');
if (disposition && disposition.indexOf('attachment') !== -1) {
const matches = /filename[^;=\n]*=((['"]).*?\2|[^;\n]*)/.exec(disposition);
if (matches != null && matches[1]) {
filename = matches[1].replace(/['"]/g, '');
}
}
return {data, type: res.headers.get('Content-Type'), filename};
}
}
export const cycleFetcher = async <T = unknown>(urls: string[], options?: RequestInit): Promise<T[]> => { export const cycleFetcher = async <T = unknown>(urls: string[], options?: RequestInit): Promise<T[]> => {
return await Promise.all(urls.map(url => fetcher<T>(url, options))); return await Promise.all(urls.map(url => fetcher<T>(url, options)));
}; };
/**
* Copyright 2020 Baidu Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// cSpell:disable
import graph from '../../../assets/graph/yolov3.cfg';
// import onnx from '../../../assets/graph/mobilenetv2-7-0.onnx';
export default async () => {
const result = await fetch(graph);
// const result = await fetch(onnx);
console.log('result', result);
return new Response(await result.arrayBuffer(), {
status: 200,
headers: {
'Content-Type': 'application/octet-stream',
'Content-Disposition': 'attachment; filename="yolov3.cfg"'
}
});
};
/**
* Copyright 2020 Baidu Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// cSpell:disable
import graph from '../../../assets/graph/yolov3.cfg';
// import onnx from '../../../assets/graph/mobilenetv2-7-0.onnx';
export default async () => {
const result = await fetch(graph);
// const result = await fetch(onnx);
console.log('result', result);
return new Response(await result.arrayBuffer(), {
status: 200,
headers: {
'Content-Type': 'application/octet-stream',
'Content-Disposition': 'attachment; filename="yolov3.cfg"'
}
});
};
...@@ -9,10 +9,10 @@ six >= 1.14.0 ...@@ -9,10 +9,10 @@ six >= 1.14.0
matplotlib matplotlib
pandas pandas
packaging packaging
x2paddle x2paddle >= 1.4.0
paddle2onnx >= 1.0.5
rarfile rarfile
gradio gradio == 3.11.0
tritonclient[all] tritonclient[all]
attrdict
psutil psutil
onnx >= 1.6.0 onnx >= 1.6.0
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
# ======================================================================= # =======================================================================
import collections import collections
import os.path import os.path
import pathlib
import re import re
_graph_version = '1.0.0' _graph_version = '1.0.0'
...@@ -108,6 +109,8 @@ def construct_edges(var_name, all_ops, all_vars, all_edges): ...@@ -108,6 +109,8 @@ def construct_edges(var_name, all_ops, all_vars, all_edges):
all_edges[(src_node, dst_node)]['vars'].add(var_name) all_edges[(src_node, dst_node)]['vars'].add(var_name)
else: else:
common_ancestor = os.path.commonpath([src_node, dst_node]) common_ancestor = os.path.commonpath([src_node, dst_node])
common_ancestor = pathlib.Path(common_ancestor).as_posix(
) # in windows, os.path.commonpath will return windows path, we should convert it to posix
src_base_node = src_node src_base_node = src_node
while True: while True:
parent_node = all_ops[src_base_node]['parent_node'] parent_node = all_ops[src_base_node]['parent_node']
......
...@@ -18,13 +18,6 @@ import re ...@@ -18,13 +18,6 @@ import re
import numpy as np import numpy as np
import requests import requests
import tritonclient.http as httpclient import tritonclient.http as httpclient
from attrdict import AttrDict
def convert_http_metadata_config(metadata):
metadata = AttrDict(metadata)
return metadata
def prepare_request(inputs_meta, inputs_data, outputs_meta): def prepare_request(inputs_meta, inputs_data, outputs_meta):
...@@ -58,7 +51,7 @@ def prepare_request(inputs_meta, inputs_data, outputs_meta): ...@@ -58,7 +51,7 @@ def prepare_request(inputs_meta, inputs_data, outputs_meta):
inputs.append(infer_input) inputs.append(infer_input)
outputs = [] outputs = []
for output_dict in outputs_meta: for output_dict in outputs_meta:
infer_output = httpclient.InferRequestedOutput(output_dict.name) infer_output = httpclient.InferRequestedOutput(output_dict['name'])
outputs.append(infer_output) outputs.append(infer_output)
return inputs, outputs return inputs, outputs
...@@ -321,8 +314,8 @@ class HttpClientManager: ...@@ -321,8 +314,8 @@ class HttpClientManager:
results = {} results = {}
for output in output_metadata: for output in output_metadata:
result = response.as_numpy(output.name) # datatype: numpy result = response.as_numpy(output['name']) # datatype: numpy
if output.datatype == 'BYTES': # datatype: bytes if output['datatype'] == 'BYTES': # datatype: bytes
try: try:
value = result value = result
if len(result.shape) == 1: if len(result.shape) == 1:
...@@ -336,7 +329,7 @@ class HttpClientManager: ...@@ -336,7 +329,7 @@ class HttpClientManager:
pass pass
else: else:
result = result[0] result = result[0]
results[output.name] = result results[output['name']] = result
return results return results
def raw_infer(self, server_url, model_name, model_version, raw_input): def raw_infer(self, server_url, model_name, model_version, raw_input):
...@@ -353,8 +346,6 @@ class HttpClientManager: ...@@ -353,8 +346,6 @@ class HttpClientManager:
except Exception as e: except Exception as e:
raise RuntimeError("Failed to retrieve the metadata: " + str(e)) raise RuntimeError("Failed to retrieve the metadata: " + str(e))
model_metadata = convert_http_metadata_config(model_metadata) input_metadata = model_metadata['inputs']
output_metadata = model_metadata['outputs']
input_metadata = model_metadata.inputs
output_metadata = model_metadata.outputs
return input_metadata, output_metadata return input_metadata, output_metadata
# flake8: noqa
# Copyright (c) 2022 VisualDL Authors. All Rights Reserve. # Copyright (c) 2022 VisualDL Authors. All Rights Reserve.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
...@@ -13,20 +14,19 @@ ...@@ -13,20 +14,19 @@
# limitations under the License. # limitations under the License.
# ======================================================================= # =======================================================================
import base64 import base64
import glob
import hashlib import hashlib
import json import json
import os import os
import shutil import shutil
import tempfile import tempfile
from threading import Lock
import paddle
import paddle2onnx
from flask import request from flask import request
from x2paddle.convert import caffe2paddle
from x2paddle.convert import onnx2paddle from x2paddle.convert import onnx2paddle
from .xarfile import archive from .xarfile import archive
from .xarfile import unarchive from visualdl.io.bfile import BosFileSystem
from visualdl.server.api import gen_result from visualdl.server.api import gen_result
from visualdl.server.api import result from visualdl.server.api import result
from visualdl.utils.dir import X2PADDLE_CACHE_PATH from visualdl.utils.dir import X2PADDLE_CACHE_PATH
...@@ -35,126 +35,241 @@ _max_cache_numbers = 200 ...@@ -35,126 +35,241 @@ _max_cache_numbers = 200
class ModelConvertApi(object): class ModelConvertApi(object):
'''!
Integrate multiple model convertion tools, and provide convertion service for users.
When user uploads a model to this server, convert model and upload the results to VDL Bos.
When user downloads the model, we get the data from Bos and send it to client.
Maybe users can download from bos directy if frontend can achieve it.
'''
def __init__(self): def __init__(self):
self.supported_formats = {'onnx', 'caffe'} '''
self.lock = Lock() Initialize a object to provide service. Need a BosFileSystem client to write data.
self.server_count = 0 # we use this variable to count requests handled, '''
# and check the number of files every 100 requests. try:
# If more than _max_cache_numbers files in cache, we delete the last recent used 50 files. self.bos_client = BosFileSystem()
self.bucket_name = os.getenv("BOS_BUCKET_NAME")
except Exception:
# When BOS_HOST, BOS_AK, BOS_SK, BOS_STS are not set in the environment variables.
# We use VDL BOS by default
self.bos_client = BosFileSystem(write_flag=False)
self.bos_client.renew_bos_client_from_server()
self.bucket_name = 'visualdl-server'
@result() @result()
def convert_model(self, format): def onnx2paddle_model_convert(self, convert_to_lite, lite_valid_places,
file_handle = request.files['file'] lite_model_type): # noqa:C901
data = file_handle.stream.read() '''
if format not in self.supported_formats: Convert onnx model to paddle model.
raise RuntimeError('Model format {} is not supported. "\ '''
"Only onnx and caffe models are supported now.'.format(format)) model_handle = request.files['model']
data = model_handle.stream.read()
result = {} result = {}
result['from'] = format # Do a simple data verification
result['to'] = 'paddle' if convert_to_lite in ['true', 'True', 'yes', 'Yes', 'y']:
convert_to_lite = True
else:
convert_to_lite = False
if lite_valid_places not in [
'arm', 'opencl', 'x86', 'metal', 'xpu', 'bm', 'mlu',
'intel_fpga', 'huawei_ascend_npu', 'imagination_nna',
'rockchip_npu', 'mediatek_apu', 'huawei_kirin_npu',
'amlogic_npu'
]:
lite_valid_places = 'arm'
if lite_model_type not in ['protobuf', 'naive_buffer']:
lite_model_type = 'naive_buffer'
# call x2paddle to convert models # call x2paddle to convert models
hl = hashlib.md5() hl = hashlib.md5()
hl.update(data) hl.update(data)
identity = hl.hexdigest() identity = hl.hexdigest()
result['request_id'] = identity result['request_id'] = identity
target_path = os.path.join(X2PADDLE_CACHE_PATH, identity)
if os.path.exists(target_path): target_path = os.path.join(X2PADDLE_CACHE_PATH, 'onnx2paddle',
if os.path.exists( identity)
os.path.join(target_path, 'inference_model', if not os.path.exists(target_path):
'model.pdmodel')): # if data in cache
with open(
os.path.join(target_path, 'inference_model',
'model.pdmodel'), 'rb') as model_fp:
model_encoded = base64.b64encode(
model_fp.read()).decode('utf-8')
result['pdmodel'] = model_encoded
return result
else:
os.makedirs(target_path, exist_ok=True) os.makedirs(target_path, exist_ok=True)
with tempfile.NamedTemporaryFile() as fp: with tempfile.NamedTemporaryFile() as fp:
fp.write(data) fp.write(data)
fp.flush() fp.flush()
try: try:
if format == 'onnx': import onnx # noqa: F401
try: except Exception:
import onnx # noqa: F401 raise RuntimeError(
except Exception: "[ERROR] onnx is not installed, use \"pip install onnx>=1.6.0\"."
raise RuntimeError( )
"[ERROR] onnx is not installed, use \"pip install onnx>=1.6.0\"." try:
) if convert_to_lite is False:
onnx2paddle(fp.name, target_path) with paddle.fluid.dygraph.guard():
elif format == 'caffe': onnx2paddle(
with tempfile.TemporaryDirectory() as unarchivedir: fp.name,
unarchive(fp.name, unarchivedir) target_path,
prototxt_path = None convert_to_lite=convert_to_lite)
weight_path = None else:
for dirname, subdirs, filenames in os.walk( with paddle.fluid.dygraph.guard():
unarchivedir): onnx2paddle(
for filename in filenames: fp.name,
if '.prototxt' in filename: target_path,
prototxt_path = os.path.join( convert_to_lite=convert_to_lite,
dirname, filename) lite_valid_places=lite_valid_places,
if '.caffemodel' in filename: lite_model_type=lite_model_type)
weight_path = os.path.join(
dirname, filename)
if prototxt_path is None or weight_path is None:
raise RuntimeError(
".prototxt or .caffemodel file is missing in your archive file, "
"please check files uploaded.")
caffe2paddle(prototxt_path, weight_path, target_path,
None)
except Exception as e: except Exception as e:
raise RuntimeError( raise RuntimeError(
"[Convertion error] {}.\n Please open an issue at " "[Convertion error] {}.\n Please open an issue at "
"https://github.com/PaddlePaddle/X2Paddle/issues to report your problem." "https://github.com/PaddlePaddle/X2Paddle/issues to report your problem."
.format(e)) .format(e))
with self.lock: # we need to enter dirname(target_path) to archive,
# in case unneccessary directory added in archive.
origin_dir = os.getcwd() origin_dir = os.getcwd()
os.chdir(os.path.dirname(target_path)) os.chdir(os.path.dirname(target_path))
archive(os.path.basename(target_path)) archive(os.path.basename(target_path))
os.chdir(origin_dir) os.chdir(origin_dir)
self.server_count += 1 with open(
os.path.join(X2PADDLE_CACHE_PATH, 'onnx2paddle',
'{}.tar'.format(identity)), 'rb') as f:
# upload archived transformed model to vdl bos
data = f.read()
filename = 'bos://{}/onnx2paddle/{}.tar'.format(
self.bucket_name, identity)
try:
self.bos_client.write(filename, data, append=False)
except Exception as e:
print(
"Exception: Write file {}.tar to bos failed, due to {}"
.format(identity, e))
with open( with open(
os.path.join(target_path, 'inference_model', 'model.pdmodel'), os.path.join(target_path, 'inference_model', 'model.pdmodel'),
'rb') as model_fp: 'rb') as model_fp:
model_encoded = base64.b64encode(model_fp.read()).decode('utf-8') # upload pdmodel file to bos, if some model has been transformed before, we can directly download from bos
result['pdmodel'] = model_encoded filename = 'bos://{}/onnx2paddle/{}/model.pdmodel'.format(
self.bucket_name, identity)
data = model_fp.read()
try:
self.bos_client.write(filename, data)
except Exception as e:
print(
"Exception: Write file {}/model.pdmodel to bos failed, due to {}"
.format(identity, e))
# return transformed pdmodel file to frontend to show model structure graph
model_encoded = base64.b64encode(data).decode('utf-8')
# delete target_path
shutil.rmtree(target_path)
result['model'] = model_encoded
print(len(model_encoded))
return result return result
@result('application/octet-stream') @result('application/octet-stream')
def download_model(self, request_id): def onnx2paddle_model_download(self, request_id):
if os.path.exists( '''
os.path.join(X2PADDLE_CACHE_PATH, Download converted paddle model from bos.
'{}.tar'.format(request_id))): '''
with open( filename = 'bos://{}/onnx2paddle/{}.tar'.format(
os.path.join(X2PADDLE_CACHE_PATH, self.bucket_name, request_id)
'{}.tar'.format(request_id)), 'rb') as f: data = None
data = f.read() if self.bos_client.exists(filename):
if self.server_count % 100 == 0: # we check number of files every 100 request data = self.bos_client.read_file(filename)
file_paths = glob.glob( if not data:
os.path.join(X2PADDLE_CACHE_PATH, '*.tar')) raise RuntimeError(
if len(file_paths) >= _max_cache_numbers: "The requested model can not be downloaded due to not existing or convertion failed."
file_paths = sorted( )
file_paths, key=os.path.getctime, reverse=True) print(len(data))
for file_path in file_paths: return data
try:
os.remove(file_path) @result()
shutil.rmtree( def paddle2onnx_convert(self, opset_version, deploy_backend):
os.path.join( '''
os.path.dirname(file_path), Convert paddle model to onnx model.
os.path.splitext( '''
os.path.basename(file_path))[0])) model_handle = request.files['model']
except Exception: params_handle = request.files['param']
pass model_data = model_handle.stream.read()
return data param_data = params_handle.stream.read()
result = {}
# Do a simple data verification
try:
opset_version = int(opset_version)
except Exception:
opset_version = 11
if deploy_backend not in ['onnxruntime', 'tensorrt', 'others']:
deploy_backend = 'onnxruntime'
# call paddle2onnx to convert models
hl = hashlib.md5()
hl.update(model_data + param_data)
identity = hl.hexdigest()
result['request_id'] = identity
with tempfile.NamedTemporaryFile() as model_fp:
with tempfile.NamedTemporaryFile() as param_fp:
model_fp.write(model_data)
param_fp.write(param_data)
model_fp.flush()
param_fp.flush()
try:
onnx_model = paddle2onnx.export(
model_fp.name,
param_fp.name,
opset_version=opset_version,
deploy_backend=deploy_backend)
except Exception as e:
raise RuntimeError(
"[Convertion error] {}.\n Please open an issue at "
"https://github.com/PaddlePaddle/Paddle2ONNX/issues to report your problem."
.format(e))
if not onnx_model:
raise RuntimeError(
"[Convertion error] Please check your input model and param files."
)
# upload transformed model to vdl bos
filename = 'bos://{}/paddle2onnx/{}/model.onnx'.format(
self.bucket_name, identity)
model_encoded = None
if onnx_model:
try:
self.bos_client.write(
filename, onnx_model, append=False)
except Exception as e:
print(
"Exception: Write file {}/model.onnx to bos failed, due to {}"
.format(identity, e))
model_encoded = base64.b64encode(onnx_model).decode(
'utf-8')
result['model'] = model_encoded
print(len(model_encoded))
return result
@result('application/octet-stream')
def paddle2onnx_download(self, request_id):
'''
Download converted onnx model from bos.
'''
filename = 'bos://{}/paddle2onnx/{}/model.onnx'.format(
self.bucket_name, request_id)
data = None
if self.bos_client.exists(filename):
data = self.bos_client.read_file(filename)
if not data:
raise RuntimeError(
"The requested model can not be downloaded due to not existing or convertion failed."
)
print(len(data))
return data
def create_model_convert_api_call(): def create_model_convert_api_call():
api = ModelConvertApi() api = ModelConvertApi()
routes = { routes = {
'convert': (api.convert_model, ['format']), 'paddle2onnx/convert': (api.paddle2onnx_convert,
'download': (api.download_model, ['request_id']) ['opset_version', 'deploy_backend']),
'paddle2onnx/download': (api.paddle2onnx_download, ['request_id']),
'onnx2paddle/convert':
(api.onnx2paddle_model_convert,
['convert_to_lite', 'lite_valid_places', 'lite_model_type']),
'onnx2paddle/download': (api.onnx2paddle_model_download,
['request_id'])
} }
def call(path: str, args): def call(path: str, args):
......
...@@ -12,11 +12,10 @@ ...@@ -12,11 +12,10 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# ======================================================================= # =======================================================================
import base64
import hashlib
import os import os
import tempfile import tempfile
import hashlib
import base64
import time import time
try: try:
...@@ -251,6 +250,13 @@ class BosConfigClient(object): ...@@ -251,6 +250,13 @@ class BosConfigClient(object):
return result return result
def upload_object_from_file(self, path, filename): def upload_object_from_file(self, path, filename):
"""!
Upload a local file to baidu bos filesystem. The path can de divided as bucket name and prefix directory.
The file would be uploaded in bucket at path `join(prefix directory in path, filename)`
@param self object
@param path(str) bos directory path to store file, which consists of bucket_name + prefix directory.
@param filename(str) local file path to upload
"""
if not self.exists(path): if not self.exists(path):
self.makedirs(path) self.makedirs(path)
bucket_name, object_key = get_object_info(path) bucket_name, object_key = get_object_info(path)
...@@ -265,18 +271,21 @@ class BosConfigClient(object): ...@@ -265,18 +271,21 @@ class BosConfigClient(object):
class BosFileSystem(object): class BosFileSystem(object):
def __init__(self, write_flag=True): def __init__(self, write_flag=True):
self.max_contents_count = 1
self.max_contents_time = 1
self.file_length_map = {}
self._file_contents_to_add = b''
self._file_contents_count = 0
self._start_append_time = time.time()
if write_flag: if write_flag:
self.max_contents_count = 1
self.max_contents_time = 1
self.get_bos_config() self.get_bos_config()
self.bos_client = BosClient(self.config) self.bos_client = BosClient(self.config)
self.file_length_map = {}
self._file_contents_to_add = b''
self._file_contents_count = 0
self._start_append_time = time.time()
def get_bos_config(self): def get_bos_config(self):
'''
Get Bos configuration from environment variables.
'''
bos_host = os.getenv("BOS_HOST") bos_host = os.getenv("BOS_HOST")
if not bos_host: if not bos_host:
raise KeyError('${BOS_HOST} is not found.') raise KeyError('${BOS_HOST} is not found.')
...@@ -296,6 +305,9 @@ class BosFileSystem(object): ...@@ -296,6 +305,9 @@ class BosFileSystem(object):
def set_bos_config(self, bos_ak, bos_sk, bos_sts, def set_bos_config(self, bos_ak, bos_sk, bos_sts,
bos_host="bj.bcebos.com"): bos_host="bj.bcebos.com"):
'''
Set Bos configuration and get bos client according to parameters.
'''
self.config = BceClientConfiguration( self.config = BceClientConfiguration(
credentials=BceCredentials(bos_ak, bos_sk), credentials=BceCredentials(bos_ak, bos_sk),
endpoint=bos_host, endpoint=bos_host,
...@@ -303,6 +315,9 @@ class BosFileSystem(object): ...@@ -303,6 +315,9 @@ class BosFileSystem(object):
self.bos_client = BosClient(self.config) self.bos_client = BosClient(self.config)
def renew_bos_client_from_server(self): def renew_bos_client_from_server(self):
'''
Get bos client by visualdl provided ak, sk, and sts token
'''
import requests import requests
import json import json
from visualdl.utils.dir import CONFIG_PATH from visualdl.utils.dir import CONFIG_PATH
...@@ -407,12 +422,14 @@ class BosFileSystem(object): ...@@ -407,12 +422,14 @@ class BosFileSystem(object):
data=init_data, data=init_data,
content_md5=content_md5(init_data), content_md5=content_md5(init_data),
content_length=len(init_data)) content_length=len(init_data))
except (exception.BceServerError, exception.BceHttpClientError) as e: except (exception.BceServerError,
exception.BceHttpClientError) as e:
if bucket_name == 'visualdl-server': # only sts token from visualdl-server, we can renew automatically if bucket_name == 'visualdl-server': # only sts token from visualdl-server, we can renew automatically
self.renew_bos_client_from_server() self.renew_bos_client_from_server()
# we should add a judgement for case 2 # we should add a judgement for case 2
try: try:
self.bos_client.get_object_meta_data(bucket_name, object_key) self.bos_client.get_object_meta_data(
bucket_name, object_key)
except exception.BceError: except exception.BceError:
# the file not exists, then create the file # the file not exists, then create the file
self.bos_client.append_object( self.bos_client.append_object(
...@@ -454,16 +471,30 @@ class BosFileSystem(object): ...@@ -454,16 +471,30 @@ class BosFileSystem(object):
self._file_contents_count = 0 self._file_contents_count = 0
self._start_append_time = time.time() self._start_append_time = time.time()
def write(self, filename, file_content, binary_mode=False): def write(self, filename, file_content, binary_mode=False, append=True):
self.append(filename, file_content, binary_mode=False) if append:
self.append(filename, file_content, binary_mode=False)
# bucket_name, object_key = BosFileSystem._get_object_info(filename) else:
# bucket_name, object_key = get_object_info(filename)
# self.bos_client.append_object(bucket_name=bucket_name, try:
# key=object_key, self.bos_client.put_object(
# data=file_content, bucket_name=bucket_name,
# content_md5=content_md5(file_content), key=object_key,
# content_length=len(file_content)) data=file_content,
content_length=len(file_content),
content_md5=content_md5(file_content))
except (exception.BceServerError,
exception.BceHttpClientError) as e: # sts token invalid
if bucket_name == 'visualdl-server': # only sts token from visualdl-server, we can renew automatically
self.renew_bos_client_from_server()
self.bos_client.put_object(
bucket_name=bucket_name,
key=object_key,
data=file_content,
content_length=len(file_content),
content_md5=content_md5(file_content))
else:
raise e # user defined bos token, we have no idea to renew the token, so throw the exception
def walk(self, dir): def walk(self, dir):
class WalkGenerator(): class WalkGenerator():
......
...@@ -13,4 +13,4 @@ ...@@ -13,4 +13,4 @@
# limitations under the License. # limitations under the License.
# ======================================================================= # =======================================================================
vdl_version = '2.5.1' vdl_version = '2.5.2'
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册